Refactor connection task management to avoid cancels (#117)
[osm/N2VC.git] / juju / model.py
1 import asyncio
2 import base64
3 import collections
4 import hashlib
5 import json
6 import logging
7 import os
8 import re
9 import stat
10 import tempfile
11 import weakref
12 import zipfile
13 from concurrent.futures import CancelledError
14 from functools import partial
15 from pathlib import Path
16
17 import yaml
18 import theblues.charmstore
19 import theblues.errors
20
21 from . import tag, utils
22 from .client import client
23 from .client import connection
24 from .constraints import parse as parse_constraints, normalize_key
25 from .delta import get_entity_delta
26 from .delta import get_entity_class
27 from .exceptions import DeadEntityException
28 from .errors import JujuError, JujuAPIError
29 from .placement import parse as parse_placement
30
31 log = logging.getLogger(__name__)
32
33
34 class _Observer(object):
35 """Wrapper around an observer callable.
36
37 This wrapper allows filter criteria to be associated with the
38 callable so that it's only called for changes that meet the criteria.
39
40 """
41 def __init__(self, callable_, entity_type, action, entity_id, predicate):
42 self.callable_ = callable_
43 self.entity_type = entity_type
44 self.action = action
45 self.entity_id = entity_id
46 self.predicate = predicate
47 if self.entity_id:
48 self.entity_id = str(self.entity_id)
49 if not self.entity_id.startswith('^'):
50 self.entity_id = '^' + self.entity_id
51 if not self.entity_id.endswith('$'):
52 self.entity_id += '$'
53
54 async def __call__(self, delta, old, new, model):
55 await self.callable_(delta, old, new, model)
56
57 def cares_about(self, delta):
58 """Return True if this observer "cares about" (i.e. wants to be
59 called) for a this delta.
60
61 """
62 if (self.entity_id and delta.get_id() and
63 not re.match(self.entity_id, str(delta.get_id()))):
64 return False
65
66 if self.entity_type and self.entity_type != delta.entity:
67 return False
68
69 if self.action and self.action != delta.type:
70 return False
71
72 if self.predicate and not self.predicate(delta):
73 return False
74
75 return True
76
77
78 class ModelObserver(object):
79 async def __call__(self, delta, old, new, model):
80 handler_name = 'on_{}_{}'.format(delta.entity, delta.type)
81 method = getattr(self, handler_name, self.on_change)
82 await method(delta, old, new, model)
83
84 async def on_change(self, delta, old, new, model):
85 """Generic model-change handler.
86
87 :param delta: :class:`juju.client.overrides.Delta`
88 :param old: :class:`juju.model.ModelEntity`
89 :param new: :class:`juju.model.ModelEntity`
90 :param model: :class:`juju.model.Model`
91
92 """
93 pass
94
95
96 class ModelState(object):
97 """Holds the state of the model, including the delta history of all
98 entities in the model.
99
100 """
101 def __init__(self, model):
102 self.model = model
103 self.state = dict()
104
105 def _live_entity_map(self, entity_type):
106 """Return an id:Entity map of all the living entities of
107 type ``entity_type``.
108
109 """
110 return {
111 entity_id: self.get_entity(entity_type, entity_id)
112 for entity_id, history in self.state.get(entity_type, {}).items()
113 if history[-1] is not None
114 }
115
116 @property
117 def applications(self):
118 """Return a map of application-name:Application for all applications
119 currently in the model.
120
121 """
122 return self._live_entity_map('application')
123
124 @property
125 def machines(self):
126 """Return a map of machine-id:Machine for all machines currently in
127 the model.
128
129 """
130 return self._live_entity_map('machine')
131
132 @property
133 def units(self):
134 """Return a map of unit-id:Unit for all units currently in
135 the model.
136
137 """
138 return self._live_entity_map('unit')
139
140 def entity_history(self, entity_type, entity_id):
141 """Return the history deque for an entity.
142
143 """
144 return self.state[entity_type][entity_id]
145
146 def entity_data(self, entity_type, entity_id, history_index):
147 """Return the data dict for an entity at a specific index of its
148 history.
149
150 """
151 return self.entity_history(entity_type, entity_id)[history_index]
152
153 def apply_delta(self, delta):
154 """Apply delta to our state and return a copy of the
155 affected object as it was before and after the update, e.g.:
156
157 old_obj, new_obj = self.apply_delta(delta)
158
159 old_obj may be None if the delta is for the creation of a new object,
160 e.g. a new application or unit is deployed.
161
162 new_obj will never be None, but may be dead (new_obj.dead == True)
163 if the object was deleted as a result of the delta being applied.
164
165 """
166 history = (
167 self.state
168 .setdefault(delta.entity, {})
169 .setdefault(delta.get_id(), collections.deque())
170 )
171
172 history.append(delta.data)
173 if delta.type == 'remove':
174 history.append(None)
175
176 entity = self.get_entity(delta.entity, delta.get_id())
177 return entity.previous(), entity
178
179 def get_entity(
180 self, entity_type, entity_id, history_index=-1, connected=True):
181 """Return an object instance for the given entity_type and id.
182
183 By default the object state matches the most recent state from
184 Juju. To get an instance of the object in an older state, pass
185 history_index, an index into the history deque for the entity.
186
187 """
188
189 if history_index < 0 and history_index != -1:
190 history_index += len(self.entity_history(entity_type, entity_id))
191 if history_index < 0:
192 return None
193
194 try:
195 self.entity_data(entity_type, entity_id, history_index)
196 except IndexError:
197 return None
198
199 entity_class = get_entity_class(entity_type)
200 return entity_class(
201 entity_id, self.model, history_index=history_index,
202 connected=connected)
203
204
205 class ModelEntity(object):
206 """An object in the Model tree"""
207
208 def __init__(self, entity_id, model, history_index=-1, connected=True):
209 """Initialize a new entity
210
211 :param entity_id str: The unique id of the object in the model
212 :param model: The model instance in whose object tree this
213 entity resides
214 :history_index int: The index of this object's state in the model's
215 history deque for this entity
216 :connected bool: Flag indicating whether this object gets live updates
217 from the model.
218
219 """
220 self.entity_id = entity_id
221 self.model = model
222 self._history_index = history_index
223 self.connected = connected
224 self.connection = model.connection
225
226 def __repr__(self):
227 return '<{} entity_id="{}">'.format(type(self).__name__,
228 self.entity_id)
229
230 def __getattr__(self, name):
231 """Fetch object attributes from the underlying data dict held in the
232 model.
233
234 """
235 try:
236 return self.safe_data[name]
237 except KeyError:
238 name = name.replace('_', '-')
239 if name in self.safe_data:
240 return self.safe_data[name]
241 else:
242 raise
243
244 def __bool__(self):
245 return bool(self.data)
246
247 def on_change(self, callable_):
248 """Add a change observer to this entity.
249
250 """
251 self.model.add_observer(
252 callable_, self.entity_type, 'change', self.entity_id)
253
254 def on_remove(self, callable_):
255 """Add a remove observer to this entity.
256
257 """
258 self.model.add_observer(
259 callable_, self.entity_type, 'remove', self.entity_id)
260
261 @property
262 def entity_type(self):
263 """A string identifying the entity type of this object, e.g.
264 'application' or 'unit', etc.
265
266 """
267 return self.__class__.__name__.lower()
268
269 @property
270 def current(self):
271 """Return True if this object represents the current state of the
272 entity in the underlying model.
273
274 This will be True except when the object represents an entity at a
275 non-latest state in history, e.g. if the object was obtained by calling
276 .previous() on another object.
277
278 """
279 return self._history_index == -1
280
281 @property
282 def dead(self):
283 """Returns True if this entity no longer exists in the underlying
284 model.
285
286 """
287 return (
288 self.data is None or
289 self.model.state.entity_data(
290 self.entity_type, self.entity_id, -1) is None
291 )
292
293 @property
294 def alive(self):
295 """Returns True if this entity still exists in the underlying
296 model.
297
298 """
299 return not self.dead
300
301 @property
302 def data(self):
303 """The data dictionary for this entity.
304
305 """
306 return self.model.state.entity_data(
307 self.entity_type, self.entity_id, self._history_index)
308
309 @property
310 def safe_data(self):
311 """The data dictionary for this entity.
312
313 If this `ModelEntity` points to the dead state, it will
314 raise `DeadEntityException`.
315
316 """
317 if self.data is None:
318 raise DeadEntityException(
319 "Entity {}:{} is dead - its attributes can no longer be "
320 "accessed. Use the .previous() method on this object to get "
321 "a copy of the object at its previous state.".format(
322 self.entity_type, self.entity_id))
323 return self.data
324
325 def previous(self):
326 """Return a copy of this object as was at its previous state in
327 history.
328
329 Returns None if this object is new (and therefore has no history).
330
331 The returned object is always "disconnected", i.e. does not receive
332 live updates.
333
334 """
335 return self.model.state.get_entity(
336 self.entity_type, self.entity_id, self._history_index - 1,
337 connected=False)
338
339 def next(self):
340 """Return a copy of this object at its next state in
341 history.
342
343 Returns None if this object is already the latest.
344
345 The returned object is "disconnected", i.e. does not receive
346 live updates, unless it is current (latest).
347
348 """
349 if self._history_index == -1:
350 return None
351
352 new_index = self._history_index + 1
353 connected = (
354 new_index == len(self.model.state.entity_history(
355 self.entity_type, self.entity_id)) - 1
356 )
357 return self.model.state.get_entity(
358 self.entity_type, self.entity_id, self._history_index - 1,
359 connected=connected)
360
361 def latest(self):
362 """Return a copy of this object at its current state in the model.
363
364 Returns self if this object is already the latest.
365
366 The returned object is always "connected", i.e. receives
367 live updates from the model.
368
369 """
370 if self._history_index == -1:
371 return self
372
373 return self.model.state.get_entity(self.entity_type, self.entity_id)
374
375
376 class Model(object):
377 def __init__(self, loop=None):
378 """Instantiate a new connected Model.
379
380 :param loop: an asyncio event loop
381
382 """
383 self.loop = loop or asyncio.get_event_loop()
384 self.connection = None
385 self.observers = weakref.WeakValueDictionary()
386 self.state = ModelState(self)
387 self.info = None
388 self._watch_stopping = asyncio.Event(loop=self.loop)
389 self._watch_stopped = asyncio.Event(loop=self.loop)
390 self._watch_received = asyncio.Event(loop=self.loop)
391 self._charmstore = CharmStore(self.loop)
392
393 async def connect(self, *args, **kw):
394 """Connect to an arbitrary Juju model.
395
396 args and kw are passed through to Connection.connect()
397
398 """
399 if 'loop' not in kw:
400 kw['loop'] = self.loop
401 self.connection = await connection.Connection.connect(*args, **kw)
402 await self._after_connect()
403
404 async def connect_current(self):
405 """Connect to the current Juju model.
406
407 """
408 self.connection = await connection.Connection.connect_current(
409 self.loop)
410 await self._after_connect()
411
412 async def connect_model(self, model_name):
413 """Connect to a specific Juju model by name.
414
415 :param model_name: Format [controller:][user/]model
416
417 """
418 self.connection = await connection.Connection.connect_model(model_name,
419 self.loop)
420 await self._after_connect()
421
422 async def _after_connect(self):
423 """Run initialization steps after connecting to websocket.
424
425 """
426 self._watch()
427 await self._watch_received.wait()
428 await self.get_info()
429
430 async def disconnect(self):
431 """Shut down the watcher task and close websockets.
432
433 """
434 if self.connection and self.connection.is_open:
435 log.debug('Stopping watcher task')
436 self._watch_stopping.set()
437 await self._watch_stopped.wait()
438 log.debug('Closing model connection')
439 await self.connection.close()
440 self.connection = None
441
442 async def add_local_charm_dir(self, charm_dir, series):
443 """Upload a local charm to the model.
444
445 This will automatically generate an archive from
446 the charm dir.
447
448 :param charm_dir: Path to the charm directory
449 :param series: Charm series
450
451 """
452 fh = tempfile.NamedTemporaryFile()
453 CharmArchiveGenerator(charm_dir).make_archive(fh.name)
454 with fh:
455 func = partial(
456 self.add_local_charm, fh, series, os.stat(fh.name).st_size)
457 charm_url = await self.loop.run_in_executor(None, func)
458
459 log.debug('Uploaded local charm: %s -> %s', charm_dir, charm_url)
460 return charm_url
461
462 def add_local_charm(self, charm_file, series, size=None):
463 """Upload a local charm archive to the model.
464
465 Returns the 'local:...' url that should be used to deploy the charm.
466
467 :param charm_file: Path to charm zip archive
468 :param series: Charm series
469 :param size: Size of the archive, in bytes
470 :return str: 'local:...' url for deploying the charm
471 :raises: :class:`JujuError` if the upload fails
472
473 Uses an https endpoint at the same host:port as the wss.
474 Supports large file uploads.
475
476 .. warning::
477
478 This method will block. Consider using :meth:`add_local_charm_dir`
479 instead.
480
481 """
482 conn, headers, path_prefix = self.connection.https_connection()
483 path = "%s/charms?series=%s" % (path_prefix, series)
484 headers['Content-Type'] = 'application/zip'
485 if size:
486 headers['Content-Length'] = size
487 conn.request("POST", path, charm_file, headers)
488 response = conn.getresponse()
489 result = response.read().decode()
490 if not response.status == 200:
491 raise JujuError(result)
492 result = json.loads(result)
493 return result['charm-url']
494
495 def all_units_idle(self):
496 """Return True if all units are idle.
497
498 """
499 for unit in self.units.values():
500 unit_status = unit.data['agent-status']['current']
501 if unit_status != 'idle':
502 return False
503 return True
504
505 async def reset(self, force=False):
506 """Reset the model to a clean state.
507
508 :param bool force: Force-terminate machines.
509
510 This returns only after the model has reached a clean state. "Clean"
511 means no applications or machines exist in the model.
512
513 """
514 log.debug('Resetting model')
515 for app in self.applications.values():
516 await app.destroy()
517 for machine in self.machines.values():
518 await machine.destroy(force=force)
519 await self.block_until(
520 lambda: len(self.machines) == 0
521 )
522
523 async def block_until(self, *conditions, timeout=None, wait_period=0.5):
524 """Return only after all conditions are true.
525
526 """
527 async def _block():
528 while not all(c() for c in conditions):
529 await asyncio.sleep(wait_period, loop=self.loop)
530 await asyncio.wait_for(_block(), timeout, loop=self.loop)
531
532 @property
533 def applications(self):
534 """Return a map of application-name:Application for all applications
535 currently in the model.
536
537 """
538 return self.state.applications
539
540 @property
541 def machines(self):
542 """Return a map of machine-id:Machine for all machines currently in
543 the model.
544
545 """
546 return self.state.machines
547
548 @property
549 def units(self):
550 """Return a map of unit-id:Unit for all units currently in
551 the model.
552
553 """
554 return self.state.units
555
556 async def get_info(self):
557 """Return a client.ModelInfo object for this Model.
558
559 Retrieves latest info for this Model from the api server. The
560 return value is cached on the Model.info attribute so that the
561 valued may be accessed again without another api call, if
562 desired.
563
564 This method is called automatically when the Model is connected,
565 resulting in Model.info being initialized without requiring an
566 explicit call to this method.
567
568 """
569 facade = client.ClientFacade.from_connection(self.connection)
570
571 self.info = await facade.ModelInfo()
572 log.debug('Got ModelInfo: %s', vars(self.info))
573
574 return self.info
575
576 def add_observer(
577 self, callable_, entity_type=None, action=None, entity_id=None,
578 predicate=None):
579 """Register an "on-model-change" callback
580
581 Once the model is connected, ``callable_``
582 will be called each time the model changes. ``callable_`` should
583 be Awaitable and accept the following positional arguments:
584
585 delta - An instance of :class:`juju.delta.EntityDelta`
586 containing the raw delta data recv'd from the Juju
587 websocket.
588
589 old_obj - If the delta modifies an existing object in the model,
590 old_obj will be a copy of that object, as it was before the
591 delta was applied. Will be None if the delta creates a new
592 entity in the model.
593
594 new_obj - A copy of the new or updated object, after the delta
595 is applied. Will be None if the delta removes an entity
596 from the model.
597
598 model - The :class:`Model` itself.
599
600 Events for which ``callable_`` is called can be specified by passing
601 entity_type, action, and/or entitiy_id filter criteria, e.g.::
602
603 add_observer(
604 myfunc,
605 entity_type='application', action='add', entity_id='ubuntu')
606
607 For more complex filtering conditions, pass a predicate function. It
608 will be called with a delta as its only argument. If the predicate
609 function returns True, the ``callable_`` will be called.
610
611 """
612 observer = _Observer(
613 callable_, entity_type, action, entity_id, predicate)
614 self.observers[observer] = callable_
615
616 def _watch(self):
617 """Start an asynchronous watch against this model.
618
619 See :meth:`add_observer` to register an onchange callback.
620
621 """
622 async def _start_watch():
623 try:
624 allwatcher = client.AllWatcherFacade.from_connection(
625 self.connection)
626 while not self._watch_stopping.is_set():
627 results = await utils.run_with_interrupt(
628 allwatcher.Next(),
629 self._watch_stopping,
630 self.loop)
631 if self._watch_stopping.is_set():
632 break
633 for delta in results.deltas:
634 delta = get_entity_delta(delta)
635 old_obj, new_obj = self.state.apply_delta(delta)
636 await self._notify_observers(delta, old_obj, new_obj)
637 self._watch_received.set()
638 except CancelledError:
639 pass
640 except Exception:
641 log.exception('Error in watcher')
642 raise
643 finally:
644 self._watch_stopped.set()
645
646 log.debug('Starting watcher task')
647 self._watch_received.clear()
648 self._watch_stopping.clear()
649 self._watch_stopped.clear()
650 self.loop.create_task(_start_watch())
651
652 async def _notify_observers(self, delta, old_obj, new_obj):
653 """Call observing callbacks, notifying them of a change in model state
654
655 :param delta: The raw change from the watcher
656 (:class:`juju.client.overrides.Delta`)
657 :param old_obj: The object in the model that this delta updates.
658 May be None.
659 :param new_obj: The object in the model that is created or updated
660 by applying this delta.
661
662 """
663 if new_obj and not old_obj:
664 delta.type = 'add'
665
666 log.debug(
667 'Model changed: %s %s %s',
668 delta.entity, delta.type, delta.get_id())
669
670 for o in self.observers:
671 if o.cares_about(delta):
672 asyncio.ensure_future(o(delta, old_obj, new_obj, self),
673 loop=self.loop)
674
675 async def _wait(self, entity_type, entity_id, action, predicate=None):
676 """
677 Block the calling routine until a given action has happened to the
678 given entity
679
680 :param entity_type: The entity's type.
681 :param entity_id: The entity's id.
682 :param action: the type of action (e.g., 'add', 'change', or 'remove')
683 :param predicate: optional callable that must take as an
684 argument a delta, and must return a boolean, indicating
685 whether the delta contains the specific action we're looking
686 for. For example, you might check to see whether a 'change'
687 has a 'completed' status. See the _Observer class for details.
688
689 """
690 q = asyncio.Queue(loop=self.loop)
691
692 async def callback(delta, old, new, model):
693 await q.put(delta.get_id())
694
695 self.add_observer(callback, entity_type, action, entity_id, predicate)
696 entity_id = await q.get()
697 # object might not be in the entity_map if we were waiting for a
698 # 'remove' action
699 return self.state._live_entity_map(entity_type).get(entity_id)
700
701 async def _wait_for_new(self, entity_type, entity_id=None, predicate=None):
702 """Wait for a new object to appear in the Model and return it.
703
704 Waits for an object of type ``entity_type`` with id ``entity_id``.
705 If ``entity_id`` is ``None``, it will wait for the first new entity
706 of the correct type.
707
708 This coroutine blocks until the new object appears in the model.
709
710 """
711 # if the entity is already in the model, just return it
712 if entity_id in self.state._live_entity_map(entity_type):
713 return self.state._live_entity_map(entity_type)[entity_id]
714 # if we know the entity_id, we can trigger on any action that puts
715 # the enitty into the model; otherwise, we have to watch for the
716 # next "add" action on that entity_type
717 action = 'add' if entity_id is None else None
718 return await self._wait(entity_type, entity_id, action, predicate)
719
720 async def wait_for_action(self, action_id):
721 """Given an action, wait for it to complete."""
722
723 if action_id.startswith("action-"):
724 # if we've been passed action.tag, transform it into the
725 # id that the api deltas will use.
726 action_id = action_id[7:]
727
728 def predicate(delta):
729 return delta.data['status'] in ('completed', 'failed')
730
731 return await self._wait('action', action_id, 'change', predicate)
732
733 async def add_machine(
734 self, spec=None, constraints=None, disks=None, series=None):
735 """Start a new, empty machine and optionally a container, or add a
736 container to a machine.
737
738 :param str spec: Machine specification
739 Examples::
740
741 (None) - starts a new machine
742 'lxd' - starts a new machine with one lxd container
743 'lxd:4' - starts a new lxd container on machine 4
744 'ssh:user@10.10.0.3' - manually provisions a machine with ssh
745 'zone=us-east-1a' - starts a machine in zone us-east-1s on AWS
746 'maas2.name' - acquire machine maas2.name on MAAS
747
748 :param dict constraints: Machine constraints
749 Example::
750
751 constraints={
752 'mem': 256 * MB,
753 }
754
755 :param list disks: List of disk constraint dictionaries
756 Example::
757
758 disks=[{
759 'pool': 'rootfs',
760 'size': 10 * GB,
761 'count': 1,
762 }]
763
764 :param str series: Series, e.g. 'xenial'
765
766 Supported container types are: lxd, kvm
767
768 When deploying a container to an existing machine, constraints cannot
769 be used.
770
771 """
772 params = client.AddMachineParams()
773 params.jobs = ['JobHostUnits']
774
775 if spec:
776 placement = parse_placement(spec)
777 if placement:
778 params.placement = placement[0]
779
780 if constraints:
781 params.constraints = client.Value.from_json(constraints)
782
783 if disks:
784 params.disks = [
785 client.Constraints.from_json(o) for o in disks]
786
787 if series:
788 params.series = series
789
790 # Submit the request.
791 client_facade = client.ClientFacade.from_connection(self.connection)
792 results = await client_facade.AddMachines([params])
793 error = results.machines[0].error
794 if error:
795 raise ValueError("Error adding machine: %s" % error.message)
796 machine_id = results.machines[0].machine
797 log.debug('Added new machine %s', machine_id)
798 return await self._wait_for_new('machine', machine_id)
799
800 async def add_relation(self, relation1, relation2):
801 """Add a relation between two applications.
802
803 :param str relation1: '<application>[:<relation_name>]'
804 :param str relation2: '<application>[:<relation_name>]'
805
806 """
807 app_facade = client.ApplicationFacade.from_connection(self.connection)
808
809 log.debug(
810 'Adding relation %s <-> %s', relation1, relation2)
811
812 try:
813 result = await app_facade.AddRelation([relation1, relation2])
814 except JujuAPIError as e:
815 if 'relation already exists' not in e.message:
816 raise
817 log.debug(
818 'Relation %s <-> %s already exists', relation1, relation2)
819 # TODO: if relation already exists we should return the
820 # Relation ModelEntity here
821 return None
822
823 def predicate(delta):
824 endpoints = {}
825 for endpoint in delta.data['endpoints']:
826 endpoints[endpoint['application-name']] = endpoint['relation']
827 return endpoints == result.endpoints
828
829 return await self._wait_for_new('relation', None, predicate)
830
831 def add_space(self, name, *cidrs):
832 """Add a new network space.
833
834 Adds a new space with the given name and associates the given
835 (optional) list of existing subnet CIDRs with it.
836
837 :param str name: Name of the space
838 :param \*cidrs: Optional list of existing subnet CIDRs
839
840 """
841 raise NotImplementedError()
842
843 async def add_ssh_key(self, user, key):
844 """Add a public SSH key to this model.
845
846 :param str user: The username of the user
847 :param str key: The public ssh key
848
849 """
850 key_facade = client.KeyManagerFacade.from_connection(self.connection)
851 return await key_facade.AddKeys([key], user)
852 add_ssh_keys = add_ssh_key
853
854 def add_subnet(self, cidr_or_id, space, *zones):
855 """Add an existing subnet to this model.
856
857 :param str cidr_or_id: CIDR or provider ID of the existing subnet
858 :param str space: Network space with which to associate
859 :param str \*zones: Zone(s) in which the subnet resides
860
861 """
862 raise NotImplementedError()
863
864 def get_backups(self):
865 """Retrieve metadata for backups in this model.
866
867 """
868 raise NotImplementedError()
869
870 def block(self, *commands):
871 """Add a new block to this model.
872
873 :param str \*commands: The commands to block. Valid values are
874 'all-changes', 'destroy-model', 'remove-object'
875
876 """
877 raise NotImplementedError()
878
879 def get_blocks(self):
880 """List blocks for this model.
881
882 """
883 raise NotImplementedError()
884
885 def get_cached_images(self, arch=None, kind=None, series=None):
886 """Return a list of cached OS images.
887
888 :param str arch: Filter by image architecture
889 :param str kind: Filter by image kind, e.g. 'lxd'
890 :param str series: Filter by image series, e.g. 'xenial'
891
892 """
893 raise NotImplementedError()
894
895 def create_backup(self, note=None, no_download=False):
896 """Create a backup of this model.
897
898 :param str note: A note to store with the backup
899 :param bool no_download: Do not download the backup archive
900 :return str: Path to downloaded archive
901
902 """
903 raise NotImplementedError()
904
905 def create_storage_pool(self, name, provider_type, **pool_config):
906 """Create or define a storage pool.
907
908 :param str name: Name to give the storage pool
909 :param str provider_type: Pool provider type
910 :param \*\*pool_config: key/value pool configuration pairs
911
912 """
913 raise NotImplementedError()
914
915 def debug_log(
916 self, no_tail=False, exclude_module=None, include_module=None,
917 include=None, level=None, limit=0, lines=10, replay=False,
918 exclude=None):
919 """Get log messages for this model.
920
921 :param bool no_tail: Stop after returning existing log messages
922 :param list exclude_module: Do not show log messages for these logging
923 modules
924 :param list include_module: Only show log messages for these logging
925 modules
926 :param list include: Only show log messages for these entities
927 :param str level: Log level to show, valid options are 'TRACE',
928 'DEBUG', 'INFO', 'WARNING', 'ERROR,
929 :param int limit: Return this many of the most recent (possibly
930 filtered) lines are shown
931 :param int lines: Yield this many of the most recent lines, and keep
932 yielding
933 :param bool replay: Yield the entire log, and keep yielding
934 :param list exclude: Do not show log messages for these entities
935
936 """
937 raise NotImplementedError()
938
939 def _get_series(self, entity_url, entity):
940 # try to get the series from the provided charm URL
941 if entity_url.startswith('cs:'):
942 parts = entity_url[3:].split('/')
943 else:
944 parts = entity_url.split('/')
945 if parts[0].startswith('~'):
946 parts.pop(0)
947 if len(parts) > 1:
948 # series was specified in the URL
949 return parts[0]
950 # series was not supplied at all, so use the newest
951 # supported series according to the charm store
952 ss = entity['Meta']['supported-series']
953 return ss['SupportedSeries'][0]
954
955 async def deploy(
956 self, entity_url, application_name=None, bind=None, budget=None,
957 channel=None, config=None, constraints=None, force=False,
958 num_units=1, plan=None, resources=None, series=None, storage=None,
959 to=None):
960 """Deploy a new service or bundle.
961
962 :param str entity_url: Charm or bundle url
963 :param str application_name: Name to give the service
964 :param dict bind: <charm endpoint>:<network space> pairs
965 :param dict budget: <budget name>:<limit> pairs
966 :param str channel: Charm store channel from which to retrieve
967 the charm or bundle, e.g. 'development'
968 :param dict config: Charm configuration dictionary
969 :param constraints: Service constraints
970 :type constraints: :class:`juju.Constraints`
971 :param bool force: Allow charm to be deployed to a machine running
972 an unsupported series
973 :param int num_units: Number of units to deploy
974 :param str plan: Plan under which to deploy charm
975 :param dict resources: <resource name>:<file path> pairs
976 :param str series: Series on which to deploy
977 :param dict storage: Storage constraints TODO how do these look?
978 :param to: Placement directive as a string. For example:
979
980 '23' - place on machine 23
981 'lxd:7' - place in new lxd container on machine 7
982 '24/lxd/3' - place in container 3 on machine 24
983
984 If None, a new machine is provisioned.
985
986
987 TODO::
988
989 - support local resources
990
991 """
992 if storage:
993 storage = {
994 k: client.Constraints(**v)
995 for k, v in storage.items()
996 }
997
998 is_local = (
999 entity_url.startswith('local:') or
1000 os.path.isdir(entity_url)
1001 )
1002 if is_local:
1003 entity_id = entity_url
1004 else:
1005 entity = await self.charmstore.entity(entity_url)
1006 entity_id = entity['Id']
1007
1008 client_facade = client.ClientFacade.from_connection(self.connection)
1009
1010 is_bundle = ((is_local and
1011 (Path(entity_id) / 'bundle.yaml').exists()) or
1012 (not is_local and 'bundle/' in entity_id))
1013
1014 if is_bundle:
1015 handler = BundleHandler(self)
1016 await handler.fetch_plan(entity_id)
1017 await handler.execute_plan()
1018 extant_apps = {app for app in self.applications}
1019 pending_apps = set(handler.applications) - extant_apps
1020 if pending_apps:
1021 # new apps will usually be in the model by now, but if some
1022 # haven't made it yet we'll need to wait on them to be added
1023 await asyncio.gather(*[
1024 asyncio.ensure_future(
1025 self._wait_for_new('application', app_name),
1026 loop=self.loop)
1027 for app_name in pending_apps
1028 ], loop=self.loop)
1029 return [app for name, app in self.applications.items()
1030 if name in handler.applications]
1031 else:
1032 if not is_local:
1033 if not application_name:
1034 application_name = entity['Meta']['charm-metadata']['Name']
1035 if not series:
1036 series = self._get_series(entity_url, entity)
1037 if not channel:
1038 channel = 'stable'
1039 await client_facade.AddCharm(channel, entity_id)
1040 # XXX: we're dropping local resources here, but we don't
1041 # actually support them yet anyway
1042 resources = await self._add_store_resources(application_name,
1043 entity_id,
1044 entity)
1045 else:
1046 # We have a local charm dir that needs to be uploaded
1047 charm_dir = os.path.abspath(
1048 os.path.expanduser(entity_id))
1049 series = series or get_charm_series(charm_dir)
1050 if not series:
1051 raise JujuError(
1052 "Couldn't determine series for charm at {}. "
1053 "Pass a 'series' kwarg to Model.deploy().".format(
1054 charm_dir))
1055 entity_id = await self.add_local_charm_dir(charm_dir, series)
1056 return await self._deploy(
1057 charm_url=entity_id,
1058 application=application_name,
1059 series=series,
1060 config=config or {},
1061 constraints=constraints,
1062 endpoint_bindings=bind,
1063 resources=resources,
1064 storage=storage,
1065 channel=channel,
1066 num_units=num_units,
1067 placement=parse_placement(to)
1068 )
1069
1070 async def _add_store_resources(self, application, entity_url, entity=None):
1071 if not entity:
1072 # avoid extra charm store call if one was already made
1073 entity = await self.charmstore.entity(entity_url)
1074 resources = [
1075 {
1076 'description': resource['Description'],
1077 'fingerprint': resource['Fingerprint'],
1078 'name': resource['Name'],
1079 'path': resource['Path'],
1080 'revision': resource['Revision'],
1081 'size': resource['Size'],
1082 'type_': resource['Type'],
1083 'origin': 'store',
1084 } for resource in entity['Meta']['resources']
1085 ]
1086
1087 if not resources:
1088 return None
1089
1090 resources_facade = client.ResourcesFacade.from_connection(
1091 self.connection)
1092 response = await resources_facade.AddPendingResources(
1093 tag.application(application),
1094 entity_url,
1095 [client.CharmResource(**resource) for resource in resources])
1096 resource_map = {resource['name']: pid
1097 for resource, pid
1098 in zip(resources, response.pending_ids)}
1099 return resource_map
1100
1101 async def _deploy(self, charm_url, application, series, config,
1102 constraints, endpoint_bindings, resources, storage,
1103 channel=None, num_units=None, placement=None):
1104 """Logic shared between `Model.deploy` and `BundleHandler.deploy`.
1105 """
1106 log.info('Deploying %s', charm_url)
1107
1108 # stringify all config values for API, and convert to YAML
1109 config = {k: str(v) for k, v in config.items()}
1110 config = yaml.dump({application: config},
1111 default_flow_style=False)
1112
1113 app_facade = client.ApplicationFacade.from_connection(
1114 self.connection)
1115
1116 app = client.ApplicationDeploy(
1117 charm_url=charm_url,
1118 application=application,
1119 series=series,
1120 channel=channel,
1121 config_yaml=config,
1122 constraints=parse_constraints(constraints),
1123 endpoint_bindings=endpoint_bindings,
1124 num_units=num_units,
1125 resources=resources,
1126 storage=storage,
1127 placement=placement
1128 )
1129
1130 result = await app_facade.Deploy([app])
1131 errors = [r.error.message for r in result.results if r.error]
1132 if errors:
1133 raise JujuError('\n'.join(errors))
1134 return await self._wait_for_new('application', application)
1135
1136 async def destroy(self):
1137 """Terminate all machines and resources for this model.
1138 Is already implemented in controller.py.
1139 """
1140 raise NotImplementedError()
1141
1142 async def destroy_unit(self, *unit_names):
1143 """Destroy units by name.
1144
1145 """
1146 app_facade = client.ApplicationFacade.from_connection(self.connection)
1147
1148 log.debug(
1149 'Destroying unit%s %s',
1150 's' if len(unit_names) == 1 else '',
1151 ' '.join(unit_names))
1152
1153 return await app_facade.DestroyUnits(list(unit_names))
1154 destroy_units = destroy_unit
1155
1156 def get_backup(self, archive_id):
1157 """Download a backup archive file.
1158
1159 :param str archive_id: The id of the archive to download
1160 :return str: Path to the archive file
1161
1162 """
1163 raise NotImplementedError()
1164
1165 def enable_ha(
1166 self, num_controllers=0, constraints=None, series=None, to=None):
1167 """Ensure sufficient controllers exist to provide redundancy.
1168
1169 :param int num_controllers: Number of controllers to make available
1170 :param constraints: Constraints to apply to the controller machines
1171 :type constraints: :class:`juju.Constraints`
1172 :param str series: Series of the controller machines
1173 :param list to: Placement directives for controller machines, e.g.::
1174
1175 '23' - machine 23
1176 'lxc:7' - new lxc container on machine 7
1177 '24/lxc/3' - lxc container 3 or machine 24
1178
1179 If None, a new machine is provisioned.
1180
1181 """
1182 raise NotImplementedError()
1183
1184 def get_config(self):
1185 """Return the configuration settings for this model.
1186
1187 """
1188 raise NotImplementedError()
1189
1190 def get_constraints(self):
1191 """Return the machine constraints for this model.
1192
1193 """
1194 raise NotImplementedError()
1195
1196 async def grant(self, username, acl='read'):
1197 """Grant a user access to this model.
1198
1199 :param str username: Username
1200 :param str acl: Access control ('read' or 'write')
1201
1202 """
1203 controller_conn = await self.connection.controller()
1204 model_facade = client.ModelManagerFacade.from_connection(
1205 controller_conn)
1206 user = tag.user(username)
1207 model = tag.model(self.info.uuid)
1208 changes = client.ModifyModelAccess(acl, 'grant', model, user)
1209 await self.revoke(username)
1210 return await model_facade.ModifyModelAccess([changes])
1211
1212 def import_ssh_key(self, identity):
1213 """Add a public SSH key from a trusted indentity source to this model.
1214
1215 :param str identity: User identity in the form <lp|gh>:<username>
1216
1217 """
1218 raise NotImplementedError()
1219 import_ssh_keys = import_ssh_key
1220
1221 async def get_machines(self):
1222 """Return list of machines in this model.
1223
1224 """
1225 return list(self.state.machines.keys())
1226
1227 def get_shares(self):
1228 """Return list of all users with access to this model.
1229
1230 """
1231 raise NotImplementedError()
1232
1233 def get_spaces(self):
1234 """Return list of all known spaces, including associated subnets.
1235
1236 """
1237 raise NotImplementedError()
1238
1239 async def get_ssh_key(self, raw_ssh=False):
1240 """Return known SSH keys for this model.
1241 :param bool raw_ssh: if True, returns the raw ssh key,
1242 else it's fingerprint
1243
1244 """
1245 key_facade = client.KeyManagerFacade.from_connection(self.connection)
1246 entity = {'tag': tag.model(self.info.uuid)}
1247 entities = client.Entities([entity])
1248 return await key_facade.ListKeys(entities, raw_ssh)
1249 get_ssh_keys = get_ssh_key
1250
1251 def get_storage(self, filesystem=False, volume=False):
1252 """Return details of storage instances.
1253
1254 :param bool filesystem: Include filesystem storage
1255 :param bool volume: Include volume storage
1256
1257 """
1258 raise NotImplementedError()
1259
1260 def get_storage_pools(self, names=None, providers=None):
1261 """Return list of storage pools.
1262
1263 :param list names: Only include pools with these names
1264 :param list providers: Only include pools for these providers
1265
1266 """
1267 raise NotImplementedError()
1268
1269 def get_subnets(self, space=None, zone=None):
1270 """Return list of known subnets.
1271
1272 :param str space: Only include subnets in this space
1273 :param str zone: Only include subnets in this zone
1274
1275 """
1276 raise NotImplementedError()
1277
1278 def remove_blocks(self):
1279 """Remove all blocks from this model.
1280
1281 """
1282 raise NotImplementedError()
1283
1284 def remove_backup(self, backup_id):
1285 """Delete a backup.
1286
1287 :param str backup_id: The id of the backup to remove
1288
1289 """
1290 raise NotImplementedError()
1291
1292 def remove_cached_images(self, arch=None, kind=None, series=None):
1293 """Remove cached OS images.
1294
1295 :param str arch: Architecture of the images to remove
1296 :param str kind: Image kind to remove, e.g. 'lxd'
1297 :param str series: Image series to remove, e.g. 'xenial'
1298
1299 """
1300 raise NotImplementedError()
1301
1302 def remove_machine(self, *machine_ids):
1303 """Remove a machine from this model.
1304
1305 :param str \*machine_ids: Ids of the machines to remove
1306
1307 """
1308 raise NotImplementedError()
1309 remove_machines = remove_machine
1310
1311 async def remove_ssh_key(self, user, key):
1312 """Remove a public SSH key(s) from this model.
1313
1314 :param str key: Full ssh key
1315 :param str user: Juju user to which the key is registered
1316
1317 """
1318 key_facade = client.KeyManagerFacade.from_connection(self.connection)
1319 key = base64.b64decode(bytes(key.strip().split()[1].encode('ascii')))
1320 key = hashlib.md5(key).hexdigest()
1321 key = ':'.join(a+b for a, b in zip(key[::2], key[1::2]))
1322 await key_facade.DeleteKeys([key], user)
1323 remove_ssh_keys = remove_ssh_key
1324
1325 def restore_backup(
1326 self, bootstrap=False, constraints=None, archive=None,
1327 backup_id=None, upload_tools=False):
1328 """Restore a backup archive to a new controller.
1329
1330 :param bool bootstrap: Bootstrap a new state machine
1331 :param constraints: Model constraints
1332 :type constraints: :class:`juju.Constraints`
1333 :param str archive: Path to backup archive to restore
1334 :param str backup_id: Id of backup to restore
1335 :param bool upload_tools: Upload tools if bootstrapping a new machine
1336
1337 """
1338 raise NotImplementedError()
1339
1340 def retry_provisioning(self):
1341 """Retry provisioning for failed machines.
1342
1343 """
1344 raise NotImplementedError()
1345
1346 async def revoke(self, username):
1347 """Revoke a user's access to this model.
1348
1349 :param str username: Username to revoke
1350
1351 """
1352 controller_conn = await self.connection.controller()
1353 model_facade = client.ModelManagerFacade.from_connection(
1354 controller_conn)
1355 user = tag.user(username)
1356 model = tag.model(self.info.uuid)
1357 changes = client.ModifyModelAccess('read', 'revoke', model, user)
1358 return await model_facade.ModifyModelAccess([changes])
1359
1360 def run(self, command, timeout=None):
1361 """Run command on all machines in this model.
1362
1363 :param str command: The command to run
1364 :param int timeout: Time to wait before command is considered failed
1365
1366 """
1367 raise NotImplementedError()
1368
1369 def set_config(self, **config):
1370 """Set configuration keys on this model.
1371
1372 :param \*\*config: Config key/values
1373
1374 """
1375 raise NotImplementedError()
1376
1377 def set_constraints(self, constraints):
1378 """Set machine constraints on this model.
1379
1380 :param :class:`juju.Constraints` constraints: Machine constraints
1381
1382 """
1383 raise NotImplementedError()
1384
1385 def get_action_output(self, action_uuid, wait=-1):
1386 """Get the results of an action by ID.
1387
1388 :param str action_uuid: Id of the action
1389 :param int wait: Time in seconds to wait for action to complete
1390
1391 """
1392 raise NotImplementedError()
1393
1394 def get_action_status(self, uuid_or_prefix=None, name=None):
1395 """Get the status of all actions, filtered by ID, ID prefix, or action name.
1396
1397 :param str uuid_or_prefix: Filter by action uuid or prefix
1398 :param str name: Filter by action name
1399
1400 """
1401 raise NotImplementedError()
1402
1403 def get_budget(self, budget_name):
1404 """Get budget usage info.
1405
1406 :param str budget_name: Name of budget
1407
1408 """
1409 raise NotImplementedError()
1410
1411 async def get_status(self, filters=None, utc=False):
1412 """Return the status of the model.
1413
1414 :param str filters: Optional list of applications, units, or machines
1415 to include, which can use wildcards ('*').
1416 :param bool utc: Display time as UTC in RFC3339 format
1417
1418 """
1419 client_facade = client.ClientFacade.from_connection(self.connection)
1420 return await client_facade.FullStatus(filters)
1421
1422 def sync_tools(
1423 self, all_=False, destination=None, dry_run=False, public=False,
1424 source=None, stream=None, version=None):
1425 """Copy Juju tools into this model.
1426
1427 :param bool all_: Copy all versions, not just the latest
1428 :param str destination: Path to local destination directory
1429 :param bool dry_run: Don't do the actual copy
1430 :param bool public: Tools are for a public cloud, so generate mirrors
1431 information
1432 :param str source: Path to local source directory
1433 :param str stream: Simplestreams stream for which to sync metadata
1434 :param str version: Copy a specific major.minor version
1435
1436 """
1437 raise NotImplementedError()
1438
1439 def unblock(self, *commands):
1440 """Unblock an operation that would alter this model.
1441
1442 :param str \*commands: The commands to unblock. Valid values are
1443 'all-changes', 'destroy-model', 'remove-object'
1444
1445 """
1446 raise NotImplementedError()
1447
1448 def unset_config(self, *keys):
1449 """Unset configuration on this model.
1450
1451 :param str \*keys: The keys to unset
1452
1453 """
1454 raise NotImplementedError()
1455
1456 def upgrade_gui(self):
1457 """Upgrade the Juju GUI for this model.
1458
1459 """
1460 raise NotImplementedError()
1461
1462 def upgrade_juju(
1463 self, dry_run=False, reset_previous_upgrade=False,
1464 upload_tools=False, version=None):
1465 """Upgrade Juju on all machines in a model.
1466
1467 :param bool dry_run: Don't do the actual upgrade
1468 :param bool reset_previous_upgrade: Clear the previous (incomplete)
1469 upgrade status
1470 :param bool upload_tools: Upload local version of tools
1471 :param str version: Upgrade to a specific version
1472
1473 """
1474 raise NotImplementedError()
1475
1476 def upload_backup(self, archive_path):
1477 """Store a backup archive remotely in Juju.
1478
1479 :param str archive_path: Path to local archive
1480
1481 """
1482 raise NotImplementedError()
1483
1484 @property
1485 def charmstore(self):
1486 return self._charmstore
1487
1488 async def get_metrics(self, *tags):
1489 """Retrieve metrics.
1490
1491 :param str \*tags: Tags of entities from which to retrieve metrics.
1492 No tags retrieves the metrics of all units in the model.
1493 :return: Dictionary of unit_name:metrics
1494
1495 """
1496 log.debug("Retrieving metrics for %s",
1497 ', '.join(tags) if tags else "all units")
1498
1499 metrics_facade = client.MetricsDebugFacade.from_connection(
1500 self.connection)
1501
1502 entities = [client.Entity(tag) for tag in tags]
1503 metrics_result = await metrics_facade.GetMetrics(entities)
1504
1505 metrics = collections.defaultdict(list)
1506
1507 for entity_metrics in metrics_result.results:
1508 error = entity_metrics.error
1509 if error:
1510 if "is not a valid tag" in error:
1511 raise ValueError(error.message)
1512 else:
1513 raise Exception(error.message)
1514
1515 for metric in entity_metrics.metrics:
1516 metrics[metric.unit].append(vars(metric))
1517
1518 return metrics
1519
1520
1521 def get_charm_series(path):
1522 """Inspects the charm directory at ``path`` and returns a default
1523 series from its metadata.yaml (the first item in the 'series' list).
1524
1525 Returns None if no series can be determined.
1526
1527 """
1528 md = Path(path) / "metadata.yaml"
1529 if not md.exists():
1530 return None
1531 data = yaml.load(md.open())
1532 series = data.get('series')
1533 return series[0] if series else None
1534
1535
1536 class BundleHandler(object):
1537 """
1538 Handle bundles by using the API to translate bundle YAML into a plan of
1539 steps and then dispatching each of those using the API.
1540 """
1541 def __init__(self, model):
1542 self.model = model
1543 self.charmstore = model.charmstore
1544 self.plan = []
1545 self.references = {}
1546 self._units_by_app = {}
1547 for unit_name, unit in model.units.items():
1548 app_units = self._units_by_app.setdefault(unit.application, [])
1549 app_units.append(unit_name)
1550 self.client_facade = client.ClientFacade.from_connection(
1551 model.connection)
1552 self.app_facade = client.ApplicationFacade.from_connection(
1553 model.connection)
1554 self.ann_facade = client.AnnotationsFacade.from_connection(
1555 model.connection)
1556
1557 async def _handle_local_charms(self, bundle):
1558 """Search for references to local charms (i.e. filesystem paths)
1559 in the bundle. Upload the local charms to the model, and replace
1560 the filesystem paths with appropriate 'local:' paths in the bundle.
1561
1562 Return the modified bundle.
1563
1564 :param dict bundle: Bundle dictionary
1565 :return: Modified bundle dictionary
1566
1567 """
1568 apps, args = [], []
1569
1570 default_series = bundle.get('series')
1571 for app_name in self.applications:
1572 app_dict = bundle['services'][app_name]
1573 charm_dir = os.path.abspath(os.path.expanduser(app_dict['charm']))
1574 if not os.path.isdir(charm_dir):
1575 continue
1576 series = (
1577 app_dict.get('series') or
1578 default_series or
1579 get_charm_series(charm_dir)
1580 )
1581 if not series:
1582 raise JujuError(
1583 "Couldn't determine series for charm at {}. "
1584 "Add a 'series' key to the bundle.".format(charm_dir))
1585
1586 # Keep track of what we need to update. We keep a list of apps
1587 # that need to be updated, and a corresponding list of args
1588 # needed to update those apps.
1589 apps.append(app_name)
1590 args.append((charm_dir, series))
1591
1592 if apps:
1593 # If we have apps to update, spawn all the coroutines concurrently
1594 # and wait for them to finish.
1595 charm_urls = await asyncio.gather(*[
1596 self.model.add_local_charm_dir(*params)
1597 for params in args
1598 ], loop=self.model.loop)
1599 # Update the 'charm:' entry for each app with the new 'local:' url.
1600 for app_name, charm_url in zip(apps, charm_urls):
1601 bundle['services'][app_name]['charm'] = charm_url
1602
1603 return bundle
1604
1605 async def fetch_plan(self, entity_id):
1606 is_local = not entity_id.startswith('cs:') and os.path.isdir(entity_id)
1607 if is_local:
1608 bundle_yaml = (Path(entity_id) / "bundle.yaml").read_text()
1609 else:
1610 bundle_yaml = await self.charmstore.files(entity_id,
1611 filename='bundle.yaml',
1612 read_file=True)
1613 self.bundle = yaml.safe_load(bundle_yaml)
1614 self.bundle = await self._handle_local_charms(self.bundle)
1615
1616 self.plan = await self.client_facade.GetBundleChanges(
1617 yaml.dump(self.bundle))
1618
1619 async def execute_plan(self):
1620 for step in self.plan.changes:
1621 method = getattr(self, step.method)
1622 result = await method(*step.args)
1623 self.references[step.id_] = result
1624
1625 @property
1626 def applications(self):
1627 return list(self.bundle['services'].keys())
1628
1629 def resolve(self, reference):
1630 if reference and reference.startswith('$'):
1631 reference = self.references[reference[1:]]
1632 return reference
1633
1634 async def addCharm(self, charm, series):
1635 """
1636 :param charm string:
1637 Charm holds the URL of the charm to be added.
1638
1639 :param series string:
1640 Series holds the series of the charm to be added
1641 if the charm default is not sufficient.
1642 """
1643 # We don't add local charms because they've already been added
1644 # by self._handle_local_charms
1645 if charm.startswith('local:'):
1646 return charm
1647
1648 entity_id = await self.charmstore.entityId(charm)
1649 log.debug('Adding %s', entity_id)
1650 await self.client_facade.AddCharm(None, entity_id)
1651 return entity_id
1652
1653 async def addMachines(self, params=None):
1654 """
1655 :param params dict:
1656 Dictionary specifying the machine to add. All keys are optional.
1657 Keys include:
1658
1659 series: string specifying the machine OS series.
1660
1661 constraints: string holding machine constraints, if any. We'll
1662 parse this into the json friendly dict that the juju api
1663 expects.
1664
1665 container_type: string holding the type of the container (for
1666 instance ""lxd" or kvm"). It is not specified for top level
1667 machines.
1668
1669 parent_id: string holding a placeholder pointing to another
1670 machine change or to a unit change. This value is only
1671 specified in the case this machine is a container, in
1672 which case also ContainerType is set.
1673
1674 """
1675 params = params or {}
1676
1677 # Normalize keys
1678 params = {normalize_key(k): params[k] for k in params.keys()}
1679
1680 # Fix up values, as necessary.
1681 if 'parent_id' in params:
1682 params['parent_id'] = self.resolve(params['parent_id'])
1683
1684 params['constraints'] = parse_constraints(
1685 params.get('constraints'))
1686 params['jobs'] = params.get('jobs', ['JobHostUnits'])
1687
1688 if params.get('container_type') == 'lxc':
1689 log.warning('Juju 2.0 does not support lxc containers. '
1690 'Converting containers to lxd.')
1691 params['container_type'] = 'lxd'
1692
1693 # Submit the request.
1694 params = client.AddMachineParams(**params)
1695 results = await self.client_facade.AddMachines([params])
1696 error = results.machines[0].error
1697 if error:
1698 raise ValueError("Error adding machine: %s" % error.message)
1699 machine = results.machines[0].machine
1700 log.debug('Added new machine %s', machine)
1701 return machine
1702
1703 async def addRelation(self, endpoint1, endpoint2):
1704 """
1705 :param endpoint1 string:
1706 :param endpoint2 string:
1707 Endpoint1 and Endpoint2 hold relation endpoints in the
1708 "application:interface" form, where the application is always a
1709 placeholder pointing to an application change, and the interface is
1710 optional. Examples are "$deploy-42:web" or just "$deploy-42".
1711 """
1712 endpoints = [endpoint1, endpoint2]
1713 # resolve indirect references
1714 for i in range(len(endpoints)):
1715 parts = endpoints[i].split(':')
1716 parts[0] = self.resolve(parts[0])
1717 endpoints[i] = ':'.join(parts)
1718
1719 log.info('Relating %s <-> %s', *endpoints)
1720 return await self.model.add_relation(*endpoints)
1721
1722 async def deploy(self, charm, series, application, options, constraints,
1723 storage, endpoint_bindings, resources):
1724 """
1725 :param charm string:
1726 Charm holds the URL of the charm to be used to deploy this
1727 application.
1728
1729 :param series string:
1730 Series holds the series of the application to be deployed
1731 if the charm default is not sufficient.
1732
1733 :param application string:
1734 Application holds the application name.
1735
1736 :param options map[string]interface{}:
1737 Options holds application options.
1738
1739 :param constraints string:
1740 Constraints holds the optional application constraints.
1741
1742 :param storage map[string]string:
1743 Storage holds the optional storage constraints.
1744
1745 :param endpoint_bindings map[string]string:
1746 EndpointBindings holds the optional endpoint bindings
1747
1748 :param resources map[string]int:
1749 Resources identifies the revision to use for each resource
1750 of the application's charm.
1751 """
1752 # resolve indirect references
1753 charm = self.resolve(charm)
1754 # the bundle plan doesn't actually do anything with resources, even
1755 # though it ostensibly gives us something (None) for that param
1756 if not charm.startswith('local:'):
1757 resources = await self.model._add_store_resources(application,
1758 charm)
1759 await self.model._deploy(
1760 charm_url=charm,
1761 application=application,
1762 series=series,
1763 config=options,
1764 constraints=constraints,
1765 endpoint_bindings=endpoint_bindings,
1766 resources=resources,
1767 storage=storage,
1768 )
1769 return application
1770
1771 async def addUnit(self, application, to):
1772 """
1773 :param application string:
1774 Application holds the application placeholder name for which a unit
1775 is added.
1776
1777 :param to string:
1778 To holds the optional location where to add the unit, as a
1779 placeholder pointing to another unit change or to a machine change.
1780 """
1781 application = self.resolve(application)
1782 placement = self.resolve(to)
1783 if self._units_by_app.get(application):
1784 # enough units for this application already exist;
1785 # claim one, and carry on
1786 # NB: this should probably honor placement, but the juju client
1787 # doesn't, so we're not bothering, either
1788 unit_name = self._units_by_app[application].pop()
1789 log.debug('Reusing unit %s for %s', unit_name, application)
1790 return self.model.units[unit_name]
1791
1792 log.debug('Adding new unit for %s%s', application,
1793 ' to %s' % placement if placement else '')
1794 return await self.model.applications[application].add_unit(
1795 count=1,
1796 to=placement,
1797 )
1798
1799 async def expose(self, application):
1800 """
1801 :param application string:
1802 Application holds the placeholder name of the application that must
1803 be exposed.
1804 """
1805 application = self.resolve(application)
1806 log.info('Exposing %s', application)
1807 return await self.model.applications[application].expose()
1808
1809 async def setAnnotations(self, id_, entity_type, annotations):
1810 """
1811 :param id_ string:
1812 Id is the placeholder for the application or machine change
1813 corresponding to the entity to be annotated.
1814
1815 :param entity_type EntityType:
1816 EntityType holds the type of the entity, "application" or
1817 "machine".
1818
1819 :param annotations map[string]string:
1820 Annotations holds the annotations as key/value pairs.
1821 """
1822 entity_id = self.resolve(id_)
1823 try:
1824 entity = self.model.state.get_entity(entity_type, entity_id)
1825 except KeyError:
1826 entity = await self.model._wait_for_new(entity_type, entity_id)
1827 return await entity.set_annotations(annotations)
1828
1829
1830 class CharmStore(object):
1831 """
1832 Async wrapper around theblues.charmstore.CharmStore
1833 """
1834 def __init__(self, loop):
1835 self.loop = loop
1836 self._cs = theblues.charmstore.CharmStore(timeout=5)
1837
1838 def __getattr__(self, name):
1839 """
1840 Wrap method calls in coroutines that use run_in_executor to make them
1841 async.
1842 """
1843 attr = getattr(self._cs, name)
1844 if not callable(attr):
1845 wrapper = partial(getattr, self._cs, name)
1846 setattr(self, name, wrapper)
1847 else:
1848 async def coro(*args, **kwargs):
1849 method = partial(attr, *args, **kwargs)
1850 for attempt in range(1, 4):
1851 try:
1852 return await self.loop.run_in_executor(None, method)
1853 except theblues.errors.ServerError:
1854 if attempt == 3:
1855 raise
1856 await asyncio.sleep(1, loop=self.loop)
1857 setattr(self, name, coro)
1858 wrapper = coro
1859 return wrapper
1860
1861
1862 class CharmArchiveGenerator(object):
1863 def __init__(self, path):
1864 self.path = os.path.abspath(os.path.expanduser(path))
1865
1866 def make_archive(self, path):
1867 """Create archive of directory and write to ``path``.
1868
1869 :param path: Path to archive
1870
1871 Ignored::
1872
1873 * build/\* - This is used for packing the charm itself and any
1874 similar tasks.
1875 * \*/.\* - Hidden files are all ignored for now. This will most
1876 likely be changed into a specific ignore list
1877 (.bzr, etc)
1878
1879 """
1880 zf = zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED)
1881 for dirpath, dirnames, filenames in os.walk(self.path):
1882 relative_path = dirpath[len(self.path) + 1:]
1883 if relative_path and not self._ignore(relative_path):
1884 zf.write(dirpath, relative_path)
1885 for name in filenames:
1886 archive_name = os.path.join(relative_path, name)
1887 if not self._ignore(archive_name):
1888 real_path = os.path.join(dirpath, name)
1889 self._check_type(real_path)
1890 if os.path.islink(real_path):
1891 self._check_link(real_path)
1892 self._write_symlink(
1893 zf, os.readlink(real_path), archive_name)
1894 else:
1895 zf.write(real_path, archive_name)
1896 zf.close()
1897 return path
1898
1899 def _check_type(self, path):
1900 """Check the path
1901 """
1902 s = os.stat(path)
1903 if stat.S_ISDIR(s.st_mode) or stat.S_ISREG(s.st_mode):
1904 return path
1905 raise ValueError("Invalid Charm at % %s" % (
1906 path, "Invalid file type for a charm"))
1907
1908 def _check_link(self, path):
1909 link_path = os.readlink(path)
1910 if link_path[0] == "/":
1911 raise ValueError(
1912 "Invalid Charm at %s: %s" % (
1913 path, "Absolute links are invalid"))
1914 path_dir = os.path.dirname(path)
1915 link_path = os.path.join(path_dir, link_path)
1916 if not link_path.startswith(os.path.abspath(self.path)):
1917 raise ValueError(
1918 "Invalid charm at %s %s" % (
1919 path, "Only internal symlinks are allowed"))
1920
1921 def _write_symlink(self, zf, link_target, link_path):
1922 """Package symlinks with appropriate zipfile metadata."""
1923 info = zipfile.ZipInfo()
1924 info.filename = link_path
1925 info.create_system = 3
1926 # Magic code for symlinks / py2/3 compat
1927 # 27166663808 = (stat.S_IFLNK | 0755) << 16
1928 info.external_attr = 2716663808
1929 zf.writestr(info, link_target)
1930
1931 def _ignore(self, path):
1932 if path == "build" or path.startswith("build/"):
1933 return True
1934 if path.startswith('.'):
1935 return True