11 from concurrent
.futures
import CancelledError
12 from functools
import partial
13 from pathlib
import Path
16 import theblues
.charmstore
17 import theblues
.errors
20 from .client
import client
21 from .client
import watcher
22 from .client
import connection
23 from .constraints
import parse
as parse_constraints
, normalize_key
24 from .delta
import get_entity_delta
25 from .delta
import get_entity_class
26 from .exceptions
import DeadEntityException
27 from .errors
import JujuError
, JujuAPIError
28 from .placement
import parse
as parse_placement
30 log
= logging
.getLogger(__name__
)
33 class _Observer(object):
34 """Wrapper around an observer callable.
36 This wrapper allows filter criteria to be associated with the
37 callable so that it's only called for changes that meet the criteria.
40 def __init__(self
, callable_
, entity_type
, action
, entity_id
, predicate
):
41 self
.callable_
= callable_
42 self
.entity_type
= entity_type
44 self
.entity_id
= entity_id
45 self
.predicate
= predicate
47 self
.entity_id
= str(self
.entity_id
)
48 if not self
.entity_id
.startswith('^'):
49 self
.entity_id
= '^' + self
.entity_id
50 if not self
.entity_id
.endswith('$'):
53 async def __call__(self
, delta
, old
, new
, model
):
54 await self
.callable_(delta
, old
, new
, model
)
56 def cares_about(self
, delta
):
57 """Return True if this observer "cares about" (i.e. wants to be
58 called) for a this delta.
61 if (self
.entity_id
and delta
.get_id() and
62 not re
.match(self
.entity_id
, str(delta
.get_id()))):
65 if self
.entity_type
and self
.entity_type
!= delta
.entity
:
68 if self
.action
and self
.action
!= delta
.type:
71 if self
.predicate
and not self
.predicate(delta
):
77 class ModelObserver(object):
78 async def __call__(self
, delta
, old
, new
, model
):
79 handler_name
= 'on_{}_{}'.format(delta
.entity
, delta
.type)
80 method
= getattr(self
, handler_name
, self
.on_change
)
81 await method(delta
, old
, new
, model
)
83 async def on_change(self
, delta
, old
, new
, model
):
84 """Generic model-change handler.
86 :param delta: :class:`juju.client.overrides.Delta`
87 :param old: :class:`juju.model.ModelEntity`
88 :param new: :class:`juju.model.ModelEntity`
89 :param model: :class:`juju.model.Model`
95 class ModelState(object):
96 """Holds the state of the model, including the delta history of all
97 entities in the model.
100 def __init__(self
, model
):
104 def _live_entity_map(self
, entity_type
):
105 """Return an id:Entity map of all the living entities of
106 type ``entity_type``.
110 entity_id
: self
.get_entity(entity_type
, entity_id
)
111 for entity_id
, history
in self
.state
.get(entity_type
, {}).items()
112 if history
[-1] is not None
116 def applications(self
):
117 """Return a map of application-name:Application for all applications
118 currently in the model.
121 return self
._live
_entity
_map
('application')
125 """Return a map of machine-id:Machine for all machines currently in
129 return self
._live
_entity
_map
('machine')
133 """Return a map of unit-id:Unit for all units currently in
137 return self
._live
_entity
_map
('unit')
139 def entity_history(self
, entity_type
, entity_id
):
140 """Return the history deque for an entity.
143 return self
.state
[entity_type
][entity_id
]
145 def entity_data(self
, entity_type
, entity_id
, history_index
):
146 """Return the data dict for an entity at a specific index of its
150 return self
.entity_history(entity_type
, entity_id
)[history_index
]
152 def apply_delta(self
, delta
):
153 """Apply delta to our state and return a copy of the
154 affected object as it was before and after the update, e.g.:
156 old_obj, new_obj = self.apply_delta(delta)
158 old_obj may be None if the delta is for the creation of a new object,
159 e.g. a new application or unit is deployed.
161 new_obj will never be None, but may be dead (new_obj.dead == True)
162 if the object was deleted as a result of the delta being applied.
167 .setdefault(delta
.entity
, {})
168 .setdefault(delta
.get_id(), collections
.deque())
171 history
.append(delta
.data
)
172 if delta
.type == 'remove':
175 entity
= self
.get_entity(delta
.entity
, delta
.get_id())
176 return entity
.previous(), entity
179 self
, entity_type
, entity_id
, history_index
=-1, connected
=True):
180 """Return an object instance for the given entity_type and id.
182 By default the object state matches the most recent state from
183 Juju. To get an instance of the object in an older state, pass
184 history_index, an index into the history deque for the entity.
188 if history_index
< 0 and history_index
!= -1:
189 history_index
+= len(self
.entity_history(entity_type
, entity_id
))
190 if history_index
< 0:
194 self
.entity_data(entity_type
, entity_id
, history_index
)
198 entity_class
= get_entity_class(entity_type
)
200 entity_id
, self
.model
, history_index
=history_index
,
204 class ModelEntity(object):
205 """An object in the Model tree"""
207 def __init__(self
, entity_id
, model
, history_index
=-1, connected
=True):
208 """Initialize a new entity
210 :param entity_id str: The unique id of the object in the model
211 :param model: The model instance in whose object tree this
213 :history_index int: The index of this object's state in the model's
214 history deque for this entity
215 :connected bool: Flag indicating whether this object gets live updates
219 self
.entity_id
= entity_id
221 self
._history
_index
= history_index
222 self
.connected
= connected
223 self
.connection
= model
.connection
226 return '<{} entity_id="{}">'.format(type(self
).__name
__,
229 def __getattr__(self
, name
):
230 """Fetch object attributes from the underlying data dict held in the
235 return self
.safe_data
[name
]
237 name
= name
.replace('_', '-')
238 if name
in self
.safe_data
:
239 return self
.safe_data
[name
]
244 return bool(self
.data
)
246 def on_change(self
, callable_
):
247 """Add a change observer to this entity.
250 self
.model
.add_observer(
251 callable_
, self
.entity_type
, 'change', self
.entity_id
)
253 def on_remove(self
, callable_
):
254 """Add a remove observer to this entity.
257 self
.model
.add_observer(
258 callable_
, self
.entity_type
, 'remove', self
.entity_id
)
261 def entity_type(self
):
262 """A string identifying the entity type of this object, e.g.
263 'application' or 'unit', etc.
266 return self
.__class
__.__name
__.lower()
270 """Return True if this object represents the current state of the
271 entity in the underlying model.
273 This will be True except when the object represents an entity at a
274 non-latest state in history, e.g. if the object was obtained by calling
275 .previous() on another object.
278 return self
._history
_index
== -1
282 """Returns True if this entity no longer exists in the underlying
288 self
.model
.state
.entity_data(
289 self
.entity_type
, self
.entity_id
, -1) is None
294 """Returns True if this entity still exists in the underlying
302 """The data dictionary for this entity.
305 return self
.model
.state
.entity_data(
306 self
.entity_type
, self
.entity_id
, self
._history
_index
)
310 """The data dictionary for this entity.
312 If this `ModelEntity` points to the dead state, it will
313 raise `DeadEntityException`.
316 if self
.data
is None:
317 raise DeadEntityException(
318 "Entity {}:{} is dead - its attributes can no longer be "
319 "accessed. Use the .previous() method on this object to get "
320 "a copy of the object at its previous state.".format(
321 self
.entity_type
, self
.entity_id
))
325 """Return a copy of this object as was at its previous state in
328 Returns None if this object is new (and therefore has no history).
330 The returned object is always "disconnected", i.e. does not receive
334 return self
.model
.state
.get_entity(
335 self
.entity_type
, self
.entity_id
, self
._history
_index
- 1,
339 """Return a copy of this object at its next state in
342 Returns None if this object is already the latest.
344 The returned object is "disconnected", i.e. does not receive
345 live updates, unless it is current (latest).
348 if self
._history
_index
== -1:
351 new_index
= self
._history
_index
+ 1
353 new_index
== len(self
.model
.state
.entity_history(
354 self
.entity_type
, self
.entity_id
)) - 1
356 return self
.model
.state
.get_entity(
357 self
.entity_type
, self
.entity_id
, self
._history
_index
- 1,
361 """Return a copy of this object at its current state in the model.
363 Returns self if this object is already the latest.
365 The returned object is always "connected", i.e. receives
366 live updates from the model.
369 if self
._history
_index
== -1:
372 return self
.model
.state
.get_entity(self
.entity_type
, self
.entity_id
)
376 def __init__(self
, loop
=None):
377 """Instantiate a new connected Model.
379 :param loop: an asyncio event loop
382 self
.loop
= loop
or asyncio
.get_event_loop()
383 self
.connection
= None
384 self
.observers
= weakref
.WeakValueDictionary()
385 self
.state
= ModelState(self
)
387 self
._watcher
_task
= None
388 self
._watch
_shutdown
= asyncio
.Event(loop
=self
.loop
)
389 self
._watch
_received
= asyncio
.Event(loop
=self
.loop
)
390 self
._charmstore
= CharmStore(self
.loop
)
392 async def connect(self
, *args
, **kw
):
393 """Connect to an arbitrary Juju model.
395 args and kw are passed through to Connection.connect()
399 kw
['loop'] = self
.loop
400 self
.connection
= await connection
.Connection
.connect(*args
, **kw
)
401 await self
._after
_connect
()
403 async def connect_current(self
):
404 """Connect to the current Juju model.
407 self
.connection
= await connection
.Connection
.connect_current(
409 await self
._after
_connect
()
411 async def connect_model(self
, model_name
):
412 """Connect to a specific Juju model by name.
414 :param model_name: Format [controller:][user/]model
417 self
.connection
= await connection
.Connection
.connect_model(model_name
,
419 await self
._after
_connect
()
421 async def _after_connect(self
):
422 """Run initialization steps after connecting to websocket.
426 await self
._watch
_received
.wait()
427 await self
.get_info()
429 async def disconnect(self
):
430 """Shut down the watcher task and close websockets.
433 self
._stop
_watching
()
434 if self
.connection
and self
.connection
.is_open
:
435 await self
._watch
_shutdown
.wait()
436 log
.debug('Closing model connection')
437 await self
.connection
.close()
438 self
.connection
= None
440 async def add_local_charm_dir(self
, charm_dir
, series
):
441 """Upload a local charm to the model.
443 This will automatically generate an archive from
446 :param charm_dir: Path to the charm directory
447 :param series: Charm series
450 fh
= tempfile
.NamedTemporaryFile()
451 CharmArchiveGenerator(charm_dir
).make_archive(fh
.name
)
454 self
.add_local_charm
, fh
, series
, os
.stat(fh
.name
).st_size
)
455 charm_url
= await self
.loop
.run_in_executor(None, func
)
457 log
.debug('Uploaded local charm: %s -> %s', charm_dir
, charm_url
)
460 def add_local_charm(self
, charm_file
, series
, size
=None):
461 """Upload a local charm archive to the model.
463 Returns the 'local:...' url that should be used to deploy the charm.
465 :param charm_file: Path to charm zip archive
466 :param series: Charm series
467 :param size: Size of the archive, in bytes
468 :return str: 'local:...' url for deploying the charm
469 :raises: :class:`JujuError` if the upload fails
471 Uses an https endpoint at the same host:port as the wss.
472 Supports large file uploads.
476 This method will block. Consider using :meth:`add_local_charm_dir`
480 conn
, headers
, path_prefix
= self
.connection
.https_connection()
481 path
= "%s/charms?series=%s" % (path_prefix
, series
)
482 headers
['Content-Type'] = 'application/zip'
484 headers
['Content-Length'] = size
485 conn
.request("POST", path
, charm_file
, headers
)
486 response
= conn
.getresponse()
487 result
= response
.read().decode()
488 if not response
.status
== 200:
489 raise JujuError(result
)
490 result
= json
.loads(result
)
491 return result
['charm-url']
493 def all_units_idle(self
):
494 """Return True if all units are idle.
497 for unit
in self
.units
.values():
498 unit_status
= unit
.data
['agent-status']['current']
499 if unit_status
!= 'idle':
503 async def reset(self
, force
=False):
504 """Reset the model to a clean state.
506 :param bool force: Force-terminate machines.
508 This returns only after the model has reached a clean state. "Clean"
509 means no applications or machines exist in the model.
512 log
.debug('Resetting model')
513 for app
in self
.applications
.values():
515 for machine
in self
.machines
.values():
516 await machine
.destroy(force
=force
)
517 await self
.block_until(
518 lambda: len(self
.machines
) == 0
521 async def block_until(self
, *conditions
, timeout
=None, wait_period
=0.5):
522 """Return only after all conditions are true.
526 while not all(c() for c
in conditions
):
527 await asyncio
.sleep(wait_period
, loop
=self
.loop
)
528 await asyncio
.wait_for(_block(), timeout
, loop
=self
.loop
)
531 def applications(self
):
532 """Return a map of application-name:Application for all applications
533 currently in the model.
536 return self
.state
.applications
540 """Return a map of machine-id:Machine for all machines currently in
544 return self
.state
.machines
548 """Return a map of unit-id:Unit for all units currently in
552 return self
.state
.units
554 async def get_info(self
):
555 """Return a client.ModelInfo object for this Model.
557 Retrieves latest info for this Model from the api server. The
558 return value is cached on the Model.info attribute so that the
559 valued may be accessed again without another api call, if
562 This method is called automatically when the Model is connected,
563 resulting in Model.info being initialized without requiring an
564 explicit call to this method.
567 facade
= client
.ClientFacade()
568 facade
.connect(self
.connection
)
570 self
.info
= await facade
.ModelInfo()
571 log
.debug('Got ModelInfo: %s', vars(self
.info
))
576 self
, callable_
, entity_type
=None, action
=None, entity_id
=None,
578 """Register an "on-model-change" callback
580 Once the model is connected, ``callable_``
581 will be called each time the model changes. ``callable_`` should
582 be Awaitable and accept the following positional arguments:
584 delta - An instance of :class:`juju.delta.EntityDelta`
585 containing the raw delta data recv'd from the Juju
588 old_obj - If the delta modifies an existing object in the model,
589 old_obj will be a copy of that object, as it was before the
590 delta was applied. Will be None if the delta creates a new
593 new_obj - A copy of the new or updated object, after the delta
594 is applied. Will be None if the delta removes an entity
597 model - The :class:`Model` itself.
599 Events for which ``callable_`` is called can be specified by passing
600 entity_type, action, and/or entitiy_id filter criteria, e.g.::
604 entity_type='application', action='add', entity_id='ubuntu')
606 For more complex filtering conditions, pass a predicate function. It
607 will be called with a delta as its only argument. If the predicate
608 function returns True, the ``callable_`` will be called.
611 observer
= _Observer(
612 callable_
, entity_type
, action
, entity_id
, predicate
)
613 self
.observers
[observer
] = callable_
616 """Start an asynchronous watch against this model.
618 See :meth:`add_observer` to register an onchange callback.
621 async def _start_watch():
622 self
._watch
_shutdown
.clear()
624 allwatcher
= watcher
.AllWatcher()
625 self
._watch
_conn
= await self
.connection
.clone()
626 allwatcher
.connect(self
._watch
_conn
)
628 results
= await allwatcher
.Next()
629 for delta
in results
.deltas
:
630 delta
= get_entity_delta(delta
)
631 old_obj
, new_obj
= self
.state
.apply_delta(delta
)
632 # XXX: Might not want to shield at this level
633 # We are shielding because when the watcher is
634 # canceled (on disconnect()), we don't want all of
635 # its children (every observer callback) to be
636 # canceled with it. So we shield them. But this means
637 # they can *never* be canceled.
638 await asyncio
.shield(
639 self
._notify
_observers
(delta
, old_obj
, new_obj
),
641 self
._watch
_received
.set()
642 except CancelledError
:
643 log
.debug('Closing watcher connection')
644 await self
._watch
_conn
.close()
645 self
._watch
_shutdown
.set()
646 self
._watch
_conn
= None
648 log
.debug('Starting watcher task')
649 self
._watcher
_task
= self
.loop
.create_task(_start_watch())
651 def _stop_watching(self
):
652 """Stop the asynchronous watch against this model.
655 log
.debug('Stopping watcher task')
656 if self
._watcher
_task
:
657 self
._watcher
_task
.cancel()
659 async def _notify_observers(self
, delta
, old_obj
, new_obj
):
660 """Call observing callbacks, notifying them of a change in model state
662 :param delta: The raw change from the watcher
663 (:class:`juju.client.overrides.Delta`)
664 :param old_obj: The object in the model that this delta updates.
666 :param new_obj: The object in the model that is created or updated
667 by applying this delta.
670 if new_obj
and not old_obj
:
674 'Model changed: %s %s %s',
675 delta
.entity
, delta
.type, delta
.get_id())
677 for o
in self
.observers
:
678 if o
.cares_about(delta
):
679 asyncio
.ensure_future(o(delta
, old_obj
, new_obj
, self
),
682 async def _wait(self
, entity_type
, entity_id
, action
, predicate
=None):
684 Block the calling routine until a given action has happened to the
687 :param entity_type: The entity's type.
688 :param entity_id: The entity's id.
689 :param action: the type of action (e.g., 'add', 'change', or 'remove')
690 :param predicate: optional callable that must take as an
691 argument a delta, and must return a boolean, indicating
692 whether the delta contains the specific action we're looking
693 for. For example, you might check to see whether a 'change'
694 has a 'completed' status. See the _Observer class for details.
697 q
= asyncio
.Queue(loop
=self
.loop
)
699 async def callback(delta
, old
, new
, model
):
700 await q
.put(delta
.get_id())
702 self
.add_observer(callback
, entity_type
, action
, entity_id
, predicate
)
703 entity_id
= await q
.get()
704 # object might not be in the entity_map if we were waiting for a
706 return self
.state
._live
_entity
_map
(entity_type
).get(entity_id
)
708 async def _wait_for_new(self
, entity_type
, entity_id
=None, predicate
=None):
709 """Wait for a new object to appear in the Model and return it.
711 Waits for an object of type ``entity_type`` with id ``entity_id``.
712 If ``entity_id`` is ``None``, it will wait for the first new entity
715 This coroutine blocks until the new object appears in the model.
718 # if the entity is already in the model, just return it
719 if entity_id
in self
.state
._live
_entity
_map
(entity_type
):
720 return self
.state
._live
_entity
_map
(entity_type
)[entity_id
]
721 # if we know the entity_id, we can trigger on any action that puts
722 # the enitty into the model; otherwise, we have to watch for the
723 # next "add" action on that entity_type
724 action
= 'add' if entity_id
is None else None
725 return await self
._wait
(entity_type
, entity_id
, action
, predicate
)
727 async def wait_for_action(self
, action_id
):
728 """Given an action, wait for it to complete."""
730 if action_id
.startswith("action-"):
731 # if we've been passed action.tag, transform it into the
732 # id that the api deltas will use.
733 action_id
= action_id
[7:]
735 def predicate(delta
):
736 return delta
.data
['status'] in ('completed', 'failed')
738 return await self
._wait
('action', action_id
, 'change', predicate
)
740 async def add_machine(
741 self
, spec
=None, constraints
=None, disks
=None, series
=None):
742 """Start a new, empty machine and optionally a container, or add a
743 container to a machine.
745 :param str spec: Machine specification
748 (None) - starts a new machine
749 'lxd' - starts a new machine with one lxd container
750 'lxd:4' - starts a new lxd container on machine 4
751 'ssh:user@10.10.0.3' - manually provisions a machine with ssh
752 'zone=us-east-1a' - starts a machine in zone us-east-1s on AWS
753 'maas2.name' - acquire machine maas2.name on MAAS
755 :param dict constraints: Machine constraints
762 :param list disks: List of disk constraint dictionaries
771 :param str series: Series, e.g. 'xenial'
773 Supported container types are: lxd, kvm
775 When deploying a container to an existing machine, constraints cannot
779 params
= client
.AddMachineParams()
780 params
.jobs
= ['JobHostUnits']
783 placement
= parse_placement(spec
)
785 params
.placement
= placement
[0]
788 params
.constraints
= client
.Value
.from_json(constraints
)
792 client
.Constraints
.from_json(o
) for o
in disks
]
795 params
.series
= series
797 # Submit the request.
798 client_facade
= client
.ClientFacade()
799 client_facade
.connect(self
.connection
)
800 results
= await client_facade
.AddMachines([params
])
801 error
= results
.machines
[0].error
803 raise ValueError("Error adding machine: %s", error
.message
)
804 machine_id
= results
.machines
[0].machine
805 log
.debug('Added new machine %s', machine_id
)
806 return await self
._wait
_for
_new
('machine', machine_id
)
808 async def add_relation(self
, relation1
, relation2
):
809 """Add a relation between two applications.
811 :param str relation1: '<application>[:<relation_name>]'
812 :param str relation2: '<application>[:<relation_name>]'
815 app_facade
= client
.ApplicationFacade()
816 app_facade
.connect(self
.connection
)
819 'Adding relation %s <-> %s', relation1
, relation2
)
822 result
= await app_facade
.AddRelation([relation1
, relation2
])
823 except JujuAPIError
as e
:
824 if 'relation already exists' not in e
.message
:
827 'Relation %s <-> %s already exists', relation1
, relation2
)
828 # TODO: if relation already exists we should return the
829 # Relation ModelEntity here
832 def predicate(delta
):
834 for endpoint
in delta
.data
['endpoints']:
835 endpoints
[endpoint
['application-name']] = endpoint
['relation']
836 return endpoints
== result
.endpoints
838 return await self
._wait
_for
_new
('relation', None, predicate
)
840 def add_space(self
, name
, *cidrs
):
841 """Add a new network space.
843 Adds a new space with the given name and associates the given
844 (optional) list of existing subnet CIDRs with it.
846 :param str name: Name of the space
847 :param \*cidrs: Optional list of existing subnet CIDRs
850 raise NotImplementedError()
852 def add_ssh_key(self
, key
):
853 """Add a public SSH key to this model.
855 :param str key: The public ssh key
858 raise NotImplementedError()
859 add_ssh_keys
= add_ssh_key
861 def add_subnet(self
, cidr_or_id
, space
, *zones
):
862 """Add an existing subnet to this model.
864 :param str cidr_or_id: CIDR or provider ID of the existing subnet
865 :param str space: Network space with which to associate
866 :param str \*zones: Zone(s) in which the subnet resides
869 raise NotImplementedError()
871 def get_backups(self
):
872 """Retrieve metadata for backups in this model.
875 raise NotImplementedError()
877 def block(self
, *commands
):
878 """Add a new block to this model.
880 :param str \*commands: The commands to block. Valid values are
881 'all-changes', 'destroy-model', 'remove-object'
884 raise NotImplementedError()
886 def get_blocks(self
):
887 """List blocks for this model.
890 raise NotImplementedError()
892 def get_cached_images(self
, arch
=None, kind
=None, series
=None):
893 """Return a list of cached OS images.
895 :param str arch: Filter by image architecture
896 :param str kind: Filter by image kind, e.g. 'lxd'
897 :param str series: Filter by image series, e.g. 'xenial'
900 raise NotImplementedError()
902 def create_backup(self
, note
=None, no_download
=False):
903 """Create a backup of this model.
905 :param str note: A note to store with the backup
906 :param bool no_download: Do not download the backup archive
907 :return str: Path to downloaded archive
910 raise NotImplementedError()
912 def create_storage_pool(self
, name
, provider_type
, **pool_config
):
913 """Create or define a storage pool.
915 :param str name: Name to give the storage pool
916 :param str provider_type: Pool provider type
917 :param \*\*pool_config: key/value pool configuration pairs
920 raise NotImplementedError()
923 self
, no_tail
=False, exclude_module
=None, include_module
=None,
924 include
=None, level
=None, limit
=0, lines
=10, replay
=False,
926 """Get log messages for this model.
928 :param bool no_tail: Stop after returning existing log messages
929 :param list exclude_module: Do not show log messages for these logging
931 :param list include_module: Only show log messages for these logging
933 :param list include: Only show log messages for these entities
934 :param str level: Log level to show, valid options are 'TRACE',
935 'DEBUG', 'INFO', 'WARNING', 'ERROR,
936 :param int limit: Return this many of the most recent (possibly
937 filtered) lines are shown
938 :param int lines: Yield this many of the most recent lines, and keep
940 :param bool replay: Yield the entire log, and keep yielding
941 :param list exclude: Do not show log messages for these entities
944 raise NotImplementedError()
946 def _get_series(self
, entity_url
, entity
):
947 # try to get the series from the provided charm URL
948 if entity_url
.startswith('cs:'):
949 parts
= entity_url
[3:].split('/')
951 parts
= entity_url
.split('/')
952 if parts
[0].startswith('~'):
955 # series was specified in the URL
957 # series was not supplied at all, so use the newest
958 # supported series according to the charm store
959 ss
= entity
['Meta']['supported-series']
960 return ss
['SupportedSeries'][0]
963 self
, entity_url
, application_name
=None, bind
=None, budget
=None,
964 channel
=None, config
=None, constraints
=None, force
=False,
965 num_units
=1, plan
=None, resources
=None, series
=None, storage
=None,
967 """Deploy a new service or bundle.
969 :param str entity_url: Charm or bundle url
970 :param str application_name: Name to give the service
971 :param dict bind: <charm endpoint>:<network space> pairs
972 :param dict budget: <budget name>:<limit> pairs
973 :param str channel: Charm store channel from which to retrieve
974 the charm or bundle, e.g. 'development'
975 :param dict config: Charm configuration dictionary
976 :param constraints: Service constraints
977 :type constraints: :class:`juju.Constraints`
978 :param bool force: Allow charm to be deployed to a machine running
979 an unsupported series
980 :param int num_units: Number of units to deploy
981 :param str plan: Plan under which to deploy charm
982 :param dict resources: <resource name>:<file path> pairs
983 :param str series: Series on which to deploy
984 :param dict storage: Storage constraints TODO how do these look?
985 :param to: Placement directive as a string. For example:
987 '23' - place on machine 23
988 'lxd:7' - place in new lxd container on machine 7
989 '24/lxd/3' - place in container 3 on machine 24
991 If None, a new machine is provisioned.
996 - support local resources
1001 k
: client
.Constraints(**v
)
1002 for k
, v
in storage
.items()
1006 entity_url
.startswith('local:') or
1007 os
.path
.isdir(entity_url
)
1010 entity_id
= entity_url
1012 entity
= await self
.charmstore
.entity(entity_url
)
1013 entity_id
= entity
['Id']
1015 client_facade
= client
.ClientFacade()
1016 client_facade
.connect(self
.connection
)
1018 is_bundle
= ((is_local
and
1019 (Path(entity_id
) / 'bundle.yaml').exists()) or
1020 (not is_local
and 'bundle/' in entity_id
))
1023 handler
= BundleHandler(self
)
1024 await handler
.fetch_plan(entity_id
)
1025 await handler
.execute_plan()
1026 extant_apps
= {app
for app
in self
.applications
}
1027 pending_apps
= set(handler
.applications
) - extant_apps
1029 # new apps will usually be in the model by now, but if some
1030 # haven't made it yet we'll need to wait on them to be added
1031 await asyncio
.gather(*[
1032 asyncio
.ensure_future(
1033 self
._wait
_for
_new
('application', app_name
),
1035 for app_name
in pending_apps
1037 return [app
for name
, app
in self
.applications
.items()
1038 if name
in handler
.applications
]
1041 if not application_name
:
1042 application_name
= entity
['Meta']['charm-metadata']['Name']
1044 series
= self
._get
_series
(entity_url
, entity
)
1047 await client_facade
.AddCharm(channel
, entity_id
)
1048 # XXX: we're dropping local resources here, but we don't
1049 # actually support them yet anyway
1050 resources
= await self
._add
_store
_resources
(application_name
,
1054 # We have a local charm dir that needs to be uploaded
1055 charm_dir
= os
.path
.abspath(
1056 os
.path
.expanduser(entity_id
))
1057 series
= series
or get_charm_series(charm_dir
)
1060 "Couldn't determine series for charm at {}. "
1061 "Pass a 'series' kwarg to Model.deploy().".format(
1063 entity_id
= await self
.add_local_charm_dir(charm_dir
, series
)
1064 return await self
._deploy
(
1065 charm_url
=entity_id
,
1066 application
=application_name
,
1068 config
=config
or {},
1069 constraints
=constraints
,
1070 endpoint_bindings
=bind
,
1071 resources
=resources
,
1074 num_units
=num_units
,
1075 placement
=parse_placement(to
),
1078 async def _add_store_resources(self
, application
, entity_url
, entity
=None):
1080 # avoid extra charm store call if one was already made
1081 entity
= await self
.charmstore
.entity(entity_url
)
1084 'description': resource
['Description'],
1085 'fingerprint': resource
['Fingerprint'],
1086 'name': resource
['Name'],
1087 'path': resource
['Path'],
1088 'revision': resource
['Revision'],
1089 'size': resource
['Size'],
1090 'type_': resource
['Type'],
1092 } for resource
in entity
['Meta']['resources']
1098 resources_facade
= client
.ResourcesFacade()
1099 resources_facade
.connect(self
.connection
)
1100 response
= await resources_facade
.AddPendingResources(
1101 tag
.application(application
),
1103 [client
.CharmResource(**resource
) for resource
in resources
])
1104 resource_map
= {resource
['name']: pid
1106 in zip(resources
, response
.pending_ids
)}
1109 async def _deploy(self
, charm_url
, application
, series
, config
,
1110 constraints
, endpoint_bindings
, resources
, storage
,
1111 channel
=None, num_units
=None, placement
=None):
1112 """Logic shared between `Model.deploy` and `BundleHandler.deploy`.
1114 log
.info('Deploying %s', charm_url
)
1116 # stringify all config values for API, and convert to YAML
1117 config
= {k
: str(v
) for k
, v
in config
.items()}
1118 config
= yaml
.dump({application
: config
},
1119 default_flow_style
=False)
1121 app_facade
= client
.ApplicationFacade()
1122 app_facade
.connect(self
.connection
)
1124 app
= client
.ApplicationDeploy(
1125 charm_url
=charm_url
,
1126 application
=application
,
1130 constraints
=parse_constraints(constraints
),
1131 endpoint_bindings
=endpoint_bindings
,
1132 num_units
=num_units
,
1133 resources
=resources
,
1135 placement
=placement
,
1138 result
= await app_facade
.Deploy([app
])
1139 errors
= [r
.error
.message
for r
in result
.results
if r
.error
]
1141 raise JujuError('\n'.join(errors
))
1142 return await self
._wait
_for
_new
('application', application
)
1145 """Terminate all machines and resources for this model.
1148 raise NotImplementedError()
1150 async def destroy_unit(self
, *unit_names
):
1151 """Destroy units by name.
1154 app_facade
= client
.ApplicationFacade()
1155 app_facade
.connect(self
.connection
)
1158 'Destroying unit%s %s',
1159 's' if len(unit_names
) == 1 else '',
1160 ' '.join(unit_names
))
1162 return await app_facade
.DestroyUnits(list(unit_names
))
1163 destroy_units
= destroy_unit
1165 def get_backup(self
, archive_id
):
1166 """Download a backup archive file.
1168 :param str archive_id: The id of the archive to download
1169 :return str: Path to the archive file
1172 raise NotImplementedError()
1175 self
, num_controllers
=0, constraints
=None, series
=None, to
=None):
1176 """Ensure sufficient controllers exist to provide redundancy.
1178 :param int num_controllers: Number of controllers to make available
1179 :param constraints: Constraints to apply to the controller machines
1180 :type constraints: :class:`juju.Constraints`
1181 :param str series: Series of the controller machines
1182 :param list to: Placement directives for controller machines, e.g.::
1185 'lxc:7' - new lxc container on machine 7
1186 '24/lxc/3' - lxc container 3 or machine 24
1188 If None, a new machine is provisioned.
1191 raise NotImplementedError()
1193 def get_config(self
):
1194 """Return the configuration settings for this model.
1197 raise NotImplementedError()
1199 def get_constraints(self
):
1200 """Return the machine constraints for this model.
1203 raise NotImplementedError()
1205 def grant(self
, username
, acl
='read'):
1206 """Grant a user access to this model.
1208 :param str username: Username
1209 :param str acl: Access control ('read' or 'write')
1212 raise NotImplementedError()
1214 def import_ssh_key(self
, identity
):
1215 """Add a public SSH key from a trusted indentity source to this model.
1217 :param str identity: User identity in the form <lp|gh>:<username>
1220 raise NotImplementedError()
1221 import_ssh_keys
= import_ssh_key
1223 def get_machines(self
, machine
, utc
=False):
1224 """Return list of machines in this model.
1226 :param str machine: Machine id, e.g. '0'
1227 :param bool utc: Display time as UTC in RFC3339 format
1230 raise NotImplementedError()
1232 def get_shares(self
):
1233 """Return list of all users with access to this model.
1236 raise NotImplementedError()
1238 def get_spaces(self
):
1239 """Return list of all known spaces, including associated subnets.
1242 raise NotImplementedError()
1244 def get_ssh_key(self
):
1245 """Return known SSH keys for this model.
1248 raise NotImplementedError()
1249 get_ssh_keys
= get_ssh_key
1251 def get_storage(self
, filesystem
=False, volume
=False):
1252 """Return details of storage instances.
1254 :param bool filesystem: Include filesystem storage
1255 :param bool volume: Include volume storage
1258 raise NotImplementedError()
1260 def get_storage_pools(self
, names
=None, providers
=None):
1261 """Return list of storage pools.
1263 :param list names: Only include pools with these names
1264 :param list providers: Only include pools for these providers
1267 raise NotImplementedError()
1269 def get_subnets(self
, space
=None, zone
=None):
1270 """Return list of known subnets.
1272 :param str space: Only include subnets in this space
1273 :param str zone: Only include subnets in this zone
1276 raise NotImplementedError()
1278 def remove_blocks(self
):
1279 """Remove all blocks from this model.
1282 raise NotImplementedError()
1284 def remove_backup(self
, backup_id
):
1287 :param str backup_id: The id of the backup to remove
1290 raise NotImplementedError()
1292 def remove_cached_images(self
, arch
=None, kind
=None, series
=None):
1293 """Remove cached OS images.
1295 :param str arch: Architecture of the images to remove
1296 :param str kind: Image kind to remove, e.g. 'lxd'
1297 :param str series: Image series to remove, e.g. 'xenial'
1300 raise NotImplementedError()
1302 def remove_machine(self
, *machine_ids
):
1303 """Remove a machine from this model.
1305 :param str \*machine_ids: Ids of the machines to remove
1308 raise NotImplementedError()
1309 remove_machines
= remove_machine
1311 def remove_ssh_key(self
, *keys
):
1312 """Remove a public SSH key(s) from this model.
1314 :param str \*keys: Keys to remove
1317 raise NotImplementedError()
1318 remove_ssh_keys
= remove_ssh_key
1321 self
, bootstrap
=False, constraints
=None, archive
=None,
1322 backup_id
=None, upload_tools
=False):
1323 """Restore a backup archive to a new controller.
1325 :param bool bootstrap: Bootstrap a new state machine
1326 :param constraints: Model constraints
1327 :type constraints: :class:`juju.Constraints`
1328 :param str archive: Path to backup archive to restore
1329 :param str backup_id: Id of backup to restore
1330 :param bool upload_tools: Upload tools if bootstrapping a new machine
1333 raise NotImplementedError()
1335 def retry_provisioning(self
):
1336 """Retry provisioning for failed machines.
1339 raise NotImplementedError()
1341 def revoke(self
, username
, acl
='read'):
1342 """Revoke a user's access to this model.
1344 :param str username: Username to revoke
1345 :param str acl: Access control ('read' or 'write')
1348 raise NotImplementedError()
1350 def run(self
, command
, timeout
=None):
1351 """Run command on all machines in this model.
1353 :param str command: The command to run
1354 :param int timeout: Time to wait before command is considered failed
1357 raise NotImplementedError()
1359 def set_config(self
, **config
):
1360 """Set configuration keys on this model.
1362 :param \*\*config: Config key/values
1365 raise NotImplementedError()
1367 def set_constraints(self
, constraints
):
1368 """Set machine constraints on this model.
1370 :param :class:`juju.Constraints` constraints: Machine constraints
1373 raise NotImplementedError()
1375 def get_action_output(self
, action_uuid
, wait
=-1):
1376 """Get the results of an action by ID.
1378 :param str action_uuid: Id of the action
1379 :param int wait: Time in seconds to wait for action to complete
1382 raise NotImplementedError()
1384 def get_action_status(self
, uuid_or_prefix
=None, name
=None):
1385 """Get the status of all actions, filtered by ID, ID prefix, or action name.
1387 :param str uuid_or_prefix: Filter by action uuid or prefix
1388 :param str name: Filter by action name
1391 raise NotImplementedError()
1393 def get_budget(self
, budget_name
):
1394 """Get budget usage info.
1396 :param str budget_name: Name of budget
1399 raise NotImplementedError()
1401 async def get_status(self
, filters
=None, utc
=False):
1402 """Return the status of the model.
1404 :param str filters: Optional list of applications, units, or machines
1405 to include, which can use wildcards ('*').
1406 :param bool utc: Display time as UTC in RFC3339 format
1409 client_facade
= client
.ClientFacade()
1410 client_facade
.connect(self
.connection
)
1411 return await client_facade
.FullStatus(filters
)
1414 self
, all_
=False, destination
=None, dry_run
=False, public
=False,
1415 source
=None, stream
=None, version
=None):
1416 """Copy Juju tools into this model.
1418 :param bool all_: Copy all versions, not just the latest
1419 :param str destination: Path to local destination directory
1420 :param bool dry_run: Don't do the actual copy
1421 :param bool public: Tools are for a public cloud, so generate mirrors
1423 :param str source: Path to local source directory
1424 :param str stream: Simplestreams stream for which to sync metadata
1425 :param str version: Copy a specific major.minor version
1428 raise NotImplementedError()
1430 def unblock(self
, *commands
):
1431 """Unblock an operation that would alter this model.
1433 :param str \*commands: The commands to unblock. Valid values are
1434 'all-changes', 'destroy-model', 'remove-object'
1437 raise NotImplementedError()
1439 def unset_config(self
, *keys
):
1440 """Unset configuration on this model.
1442 :param str \*keys: The keys to unset
1445 raise NotImplementedError()
1447 def upgrade_gui(self
):
1448 """Upgrade the Juju GUI for this model.
1451 raise NotImplementedError()
1454 self
, dry_run
=False, reset_previous_upgrade
=False,
1455 upload_tools
=False, version
=None):
1456 """Upgrade Juju on all machines in a model.
1458 :param bool dry_run: Don't do the actual upgrade
1459 :param bool reset_previous_upgrade: Clear the previous (incomplete)
1461 :param bool upload_tools: Upload local version of tools
1462 :param str version: Upgrade to a specific version
1465 raise NotImplementedError()
1467 def upload_backup(self
, archive_path
):
1468 """Store a backup archive remotely in Juju.
1470 :param str archive_path: Path to local archive
1473 raise NotImplementedError()
1476 def charmstore(self
):
1477 return self
._charmstore
1479 async def get_metrics(self
, *tags
):
1480 """Retrieve metrics.
1482 :param str \*tags: Tags of entities from which to retrieve metrics.
1483 No tags retrieves the metrics of all units in the model.
1484 :return: Dictionary of unit_name:metrics
1487 log
.debug("Retrieving metrics for %s",
1488 ', '.join(tags
) if tags
else "all units")
1490 metrics_facade
= client
.MetricsDebugFacade()
1491 metrics_facade
.connect(self
.connection
)
1493 entities
= [client
.Entity(tag
) for tag
in tags
]
1494 metrics_result
= await metrics_facade
.GetMetrics(entities
)
1496 metrics
= collections
.defaultdict(list)
1498 for entity_metrics
in metrics_result
.results
:
1499 error
= entity_metrics
.error
1501 if "is not a valid tag" in error
:
1502 raise ValueError(error
.message
)
1504 raise Exception(error
.message
)
1506 for metric
in entity_metrics
.metrics
:
1507 metrics
[metric
.unit
].append(vars(metric
))
1512 def get_charm_series(path
):
1513 """Inspects the charm directory at ``path`` and returns a default
1514 series from its metadata.yaml (the first item in the 'series' list).
1516 Returns None if no series can be determined.
1519 md
= Path(path
) / "metadata.yaml"
1522 data
= yaml
.load(md
.open())
1523 series
= data
.get('series')
1524 return series
[0] if series
else None
1527 class BundleHandler(object):
1529 Handle bundles by using the API to translate bundle YAML into a plan of
1530 steps and then dispatching each of those using the API.
1532 def __init__(self
, model
):
1534 self
.charmstore
= model
.charmstore
1536 self
.references
= {}
1537 self
._units
_by
_app
= {}
1538 for unit_name
, unit
in model
.units
.items():
1539 app_units
= self
._units
_by
_app
.setdefault(unit
.application
, [])
1540 app_units
.append(unit_name
)
1541 self
.client_facade
= client
.ClientFacade()
1542 self
.client_facade
.connect(model
.connection
)
1543 self
.app_facade
= client
.ApplicationFacade()
1544 self
.app_facade
.connect(model
.connection
)
1545 self
.ann_facade
= client
.AnnotationsFacade()
1546 self
.ann_facade
.connect(model
.connection
)
1548 async def _handle_local_charms(self
, bundle
):
1549 """Search for references to local charms (i.e. filesystem paths)
1550 in the bundle. Upload the local charms to the model, and replace
1551 the filesystem paths with appropriate 'local:' paths in the bundle.
1553 Return the modified bundle.
1555 :param dict bundle: Bundle dictionary
1556 :return: Modified bundle dictionary
1561 default_series
= bundle
.get('series')
1562 for app_name
in self
.applications
:
1563 app_dict
= bundle
['services'][app_name
]
1564 charm_dir
= os
.path
.abspath(os
.path
.expanduser(app_dict
['charm']))
1565 if not os
.path
.isdir(charm_dir
):
1568 app_dict
.get('series') or
1570 get_charm_series(charm_dir
)
1574 "Couldn't determine series for charm at {}. "
1575 "Add a 'series' key to the bundle.".format(charm_dir
))
1577 # Keep track of what we need to update. We keep a list of apps
1578 # that need to be updated, and a corresponding list of args
1579 # needed to update those apps.
1580 apps
.append(app_name
)
1581 args
.append((charm_dir
, series
))
1584 # If we have apps to update, spawn all the coroutines concurrently
1585 # and wait for them to finish.
1586 charm_urls
= await asyncio
.gather(*[
1587 self
.model
.add_local_charm_dir(*params
)
1589 ], loop
=self
.model
.loop
)
1590 # Update the 'charm:' entry for each app with the new 'local:' url.
1591 for app_name
, charm_url
in zip(apps
, charm_urls
):
1592 bundle
['services'][app_name
]['charm'] = charm_url
1596 async def fetch_plan(self
, entity_id
):
1597 is_local
= not entity_id
.startswith('cs:') and os
.path
.isdir(entity_id
)
1599 bundle_yaml
= (Path(entity_id
) / "bundle.yaml").read_text()
1601 bundle_yaml
= await self
.charmstore
.files(entity_id
,
1602 filename
='bundle.yaml',
1604 self
.bundle
= yaml
.safe_load(bundle_yaml
)
1605 self
.bundle
= await self
._handle
_local
_charms
(self
.bundle
)
1607 self
.plan
= await self
.client_facade
.GetBundleChanges(
1608 yaml
.dump(self
.bundle
))
1610 async def execute_plan(self
):
1611 for step
in self
.plan
.changes
:
1612 method
= getattr(self
, step
.method
)
1613 result
= await method(*step
.args
)
1614 self
.references
[step
.id_
] = result
1617 def applications(self
):
1618 return list(self
.bundle
['services'].keys())
1620 def resolve(self
, reference
):
1621 if reference
and reference
.startswith('$'):
1622 reference
= self
.references
[reference
[1:]]
1625 async def addCharm(self
, charm
, series
):
1627 :param charm string:
1628 Charm holds the URL of the charm to be added.
1630 :param series string:
1631 Series holds the series of the charm to be added
1632 if the charm default is not sufficient.
1634 # We don't add local charms because they've already been added
1635 # by self._handle_local_charms
1636 if charm
.startswith('local:'):
1639 entity_id
= await self
.charmstore
.entityId(charm
)
1640 log
.debug('Adding %s', entity_id
)
1641 await self
.client_facade
.AddCharm(None, entity_id
)
1644 async def addMachines(self
, params
=None):
1647 Dictionary specifying the machine to add. All keys are optional.
1650 series: string specifying the machine OS series.
1652 constraints: string holding machine constraints, if any. We'll
1653 parse this into the json friendly dict that the juju api
1656 container_type: string holding the type of the container (for
1657 instance ""lxd" or kvm"). It is not specified for top level
1660 parent_id: string holding a placeholder pointing to another
1661 machine change or to a unit change. This value is only
1662 specified in the case this machine is a container, in
1663 which case also ContainerType is set.
1666 params
= params
or {}
1669 params
= {normalize_key(k
): params
[k
] for k
in params
.keys()}
1671 # Fix up values, as necessary.
1672 if 'parent_id' in params
:
1673 params
['parent_id'] = self
.resolve(params
['parent_id'])
1675 params
['constraints'] = parse_constraints(
1676 params
.get('constraints'))
1677 params
['jobs'] = params
.get('jobs', ['JobHostUnits'])
1679 if params
.get('container_type') == 'lxc':
1680 log
.warning('Juju 2.0 does not support lxc containers. '
1681 'Converting containers to lxd.')
1682 params
['container_type'] = 'lxd'
1684 # Submit the request.
1685 params
= client
.AddMachineParams(**params
)
1686 results
= await self
.client_facade
.AddMachines([params
])
1687 error
= results
.machines
[0].error
1689 raise ValueError("Error adding machine: %s", error
.message
)
1690 machine
= results
.machines
[0].machine
1691 log
.debug('Added new machine %s', machine
)
1694 async def addRelation(self
, endpoint1
, endpoint2
):
1696 :param endpoint1 string:
1697 :param endpoint2 string:
1698 Endpoint1 and Endpoint2 hold relation endpoints in the
1699 "application:interface" form, where the application is always a
1700 placeholder pointing to an application change, and the interface is
1701 optional. Examples are "$deploy-42:web" or just "$deploy-42".
1703 endpoints
= [endpoint1
, endpoint2
]
1704 # resolve indirect references
1705 for i
in range(len(endpoints
)):
1706 parts
= endpoints
[i
].split(':')
1707 parts
[0] = self
.resolve(parts
[0])
1708 endpoints
[i
] = ':'.join(parts
)
1710 log
.info('Relating %s <-> %s', *endpoints
)
1711 return await self
.model
.add_relation(*endpoints
)
1713 async def deploy(self
, charm
, series
, application
, options
, constraints
,
1714 storage
, endpoint_bindings
, resources
):
1716 :param charm string:
1717 Charm holds the URL of the charm to be used to deploy this
1720 :param series string:
1721 Series holds the series of the application to be deployed
1722 if the charm default is not sufficient.
1724 :param application string:
1725 Application holds the application name.
1727 :param options map[string]interface{}:
1728 Options holds application options.
1730 :param constraints string:
1731 Constraints holds the optional application constraints.
1733 :param storage map[string]string:
1734 Storage holds the optional storage constraints.
1736 :param endpoint_bindings map[string]string:
1737 EndpointBindings holds the optional endpoint bindings
1739 :param resources map[string]int:
1740 Resources identifies the revision to use for each resource
1741 of the application's charm.
1743 # resolve indirect references
1744 charm
= self
.resolve(charm
)
1745 # the bundle plan doesn't actually do anything with resources, even
1746 # though it ostensibly gives us something (None) for that param
1747 if not charm
.startswith('local:'):
1748 resources
= await self
.model
._add
_store
_resources
(application
,
1750 await self
.model
._deploy
(
1752 application
=application
,
1755 constraints
=constraints
,
1756 endpoint_bindings
=endpoint_bindings
,
1757 resources
=resources
,
1762 async def addUnit(self
, application
, to
):
1764 :param application string:
1765 Application holds the application placeholder name for which a unit
1769 To holds the optional location where to add the unit, as a
1770 placeholder pointing to another unit change or to a machine change.
1772 application
= self
.resolve(application
)
1773 placement
= self
.resolve(to
)
1774 if self
._units
_by
_app
.get(application
):
1775 # enough units for this application already exist;
1776 # claim one, and carry on
1777 # NB: this should probably honor placement, but the juju client
1778 # doesn't, so we're not bothering, either
1779 unit_name
= self
._units
_by
_app
[application
].pop()
1780 log
.debug('Reusing unit %s for %s', unit_name
, application
)
1781 return self
.model
.units
[unit_name
]
1783 log
.debug('Adding new unit for %s%s', application
,
1784 ' to %s' % placement
if placement
else '')
1785 return await self
.model
.applications
[application
].add_unit(
1790 async def expose(self
, application
):
1792 :param application string:
1793 Application holds the placeholder name of the application that must
1796 application
= self
.resolve(application
)
1797 log
.info('Exposing %s', application
)
1798 return await self
.model
.applications
[application
].expose()
1800 async def setAnnotations(self
, id_
, entity_type
, annotations
):
1803 Id is the placeholder for the application or machine change
1804 corresponding to the entity to be annotated.
1806 :param entity_type EntityType:
1807 EntityType holds the type of the entity, "application" or
1810 :param annotations map[string]string:
1811 Annotations holds the annotations as key/value pairs.
1813 entity_id
= self
.resolve(id_
)
1815 entity
= self
.model
.state
.get_entity(entity_type
, entity_id
)
1817 entity
= await self
.model
._wait
_for
_new
(entity_type
, entity_id
)
1818 return await entity
.set_annotations(annotations
)
1821 class CharmStore(object):
1823 Async wrapper around theblues.charmstore.CharmStore
1825 def __init__(self
, loop
):
1827 self
._cs
= theblues
.charmstore
.CharmStore(timeout
=5)
1829 def __getattr__(self
, name
):
1831 Wrap method calls in coroutines that use run_in_executor to make them
1834 attr
= getattr(self
._cs
, name
)
1835 if not callable(attr
):
1836 wrapper
= partial(getattr, self
._cs
, name
)
1837 setattr(self
, name
, wrapper
)
1839 async def coro(*args
, **kwargs
):
1840 method
= partial(attr
, *args
, **kwargs
)
1841 for attempt
in range(1, 4):
1843 return await self
.loop
.run_in_executor(None, method
)
1844 except theblues
.errors
.ServerError
:
1847 await asyncio
.sleep(1, loop
=self
.loop
)
1848 setattr(self
, name
, coro
)
1853 class CharmArchiveGenerator(object):
1854 def __init__(self
, path
):
1855 self
.path
= os
.path
.abspath(os
.path
.expanduser(path
))
1857 def make_archive(self
, path
):
1858 """Create archive of directory and write to ``path``.
1860 :param path: Path to archive
1864 * build/\* - This is used for packing the charm itself and any
1866 * \*/.\* - Hidden files are all ignored for now. This will most
1867 likely be changed into a specific ignore list
1871 zf
= zipfile
.ZipFile(path
, 'w', zipfile
.ZIP_DEFLATED
)
1872 for dirpath
, dirnames
, filenames
in os
.walk(self
.path
):
1873 relative_path
= dirpath
[len(self
.path
) + 1:]
1874 if relative_path
and not self
._ignore
(relative_path
):
1875 zf
.write(dirpath
, relative_path
)
1876 for name
in filenames
:
1877 archive_name
= os
.path
.join(relative_path
, name
)
1878 if not self
._ignore
(archive_name
):
1879 real_path
= os
.path
.join(dirpath
, name
)
1880 self
._check
_type
(real_path
)
1881 if os
.path
.islink(real_path
):
1882 self
._check
_link
(real_path
)
1883 self
._write
_symlink
(
1884 zf
, os
.readlink(real_path
), archive_name
)
1886 zf
.write(real_path
, archive_name
)
1890 def _check_type(self
, path
):
1894 if stat
.S_ISDIR(s
.st_mode
) or stat
.S_ISREG(s
.st_mode
):
1896 raise ValueError("Invalid Charm at % %s" % (
1897 path
, "Invalid file type for a charm"))
1899 def _check_link(self
, path
):
1900 link_path
= os
.readlink(path
)
1901 if link_path
[0] == "/":
1903 "Invalid Charm at %s: %s" % (
1904 path
, "Absolute links are invalid"))
1905 path_dir
= os
.path
.dirname(path
)
1906 link_path
= os
.path
.join(path_dir
, link_path
)
1907 if not link_path
.startswith(os
.path
.abspath(self
.path
)):
1909 "Invalid charm at %s %s" % (
1910 path
, "Only internal symlinks are allowed"))
1912 def _write_symlink(self
, zf
, link_target
, link_path
):
1913 """Package symlinks with appropriate zipfile metadata."""
1914 info
= zipfile
.ZipInfo()
1915 info
.filename
= link_path
1916 info
.create_system
= 3
1917 # Magic code for symlinks / py2/3 compat
1918 # 27166663808 = (stat.S_IFLNK | 0755) << 16
1919 info
.external_attr
= 2716663808
1920 zf
.writestr(info
, link_target
)
1922 def _ignore(self
, path
):
1923 if path
== "build" or path
.startswith("build/"):
1925 if path
.startswith('.'):