11 from concurrent
.futures
import CancelledError
12 from functools
import partial
13 from pathlib
import Path
16 import theblues
.charmstore
17 import theblues
.errors
19 from .client
import client
20 from .client
import watcher
21 from .client
import connection
22 from .constraints
import parse
as parse_constraints
, normalize_key
23 from .delta
import get_entity_delta
24 from .delta
import get_entity_class
25 from .exceptions
import DeadEntityException
26 from .errors
import JujuError
, JujuAPIError
27 from .placement
import parse
as parse_placement
29 log
= logging
.getLogger(__name__
)
32 class _Observer(object):
33 """Wrapper around an observer callable.
35 This wrapper allows filter criteria to be associated with the
36 callable so that it's only called for changes that meet the criteria.
39 def __init__(self
, callable_
, entity_type
, action
, entity_id
, predicate
):
40 self
.callable_
= callable_
41 self
.entity_type
= entity_type
43 self
.entity_id
= entity_id
44 self
.predicate
= predicate
46 self
.entity_id
= str(self
.entity_id
)
47 if not self
.entity_id
.startswith('^'):
48 self
.entity_id
= '^' + self
.entity_id
49 if not self
.entity_id
.endswith('$'):
52 async def __call__(self
, delta
, old
, new
, model
):
53 await self
.callable_(delta
, old
, new
, model
)
55 def cares_about(self
, delta
):
56 """Return True if this observer "cares about" (i.e. wants to be
57 called) for a this delta.
60 if (self
.entity_id
and delta
.get_id() and
61 not re
.match(self
.entity_id
, str(delta
.get_id()))):
64 if self
.entity_type
and self
.entity_type
!= delta
.entity
:
67 if self
.action
and self
.action
!= delta
.type:
70 if self
.predicate
and not self
.predicate(delta
):
76 class ModelObserver(object):
77 async def __call__(self
, delta
, old
, new
, model
):
78 handler_name
= 'on_{}_{}'.format(delta
.entity
, delta
.type)
79 method
= getattr(self
, handler_name
, self
.on_change
)
80 await method(delta
, old
, new
, model
)
82 async def on_change(self
, delta
, old
, new
, model
):
83 """Generic model-change handler.
85 :param delta: :class:`juju.client.overrides.Delta`
86 :param old: :class:`juju.model.ModelEntity`
87 :param new: :class:`juju.model.ModelEntity`
88 :param model: :class:`juju.model.Model`
94 class ModelState(object):
95 """Holds the state of the model, including the delta history of all
96 entities in the model.
99 def __init__(self
, model
):
103 def _live_entity_map(self
, entity_type
):
104 """Return an id:Entity map of all the living entities of
105 type ``entity_type``.
109 entity_id
: self
.get_entity(entity_type
, entity_id
)
110 for entity_id
, history
in self
.state
.get(entity_type
, {}).items()
111 if history
[-1] is not None
115 def applications(self
):
116 """Return a map of application-name:Application for all applications
117 currently in the model.
120 return self
._live
_entity
_map
('application')
124 """Return a map of machine-id:Machine for all machines currently in
128 return self
._live
_entity
_map
('machine')
132 """Return a map of unit-id:Unit for all units currently in
136 return self
._live
_entity
_map
('unit')
138 def entity_history(self
, entity_type
, entity_id
):
139 """Return the history deque for an entity.
142 return self
.state
[entity_type
][entity_id
]
144 def entity_data(self
, entity_type
, entity_id
, history_index
):
145 """Return the data dict for an entity at a specific index of its
149 return self
.entity_history(entity_type
, entity_id
)[history_index
]
151 def apply_delta(self
, delta
):
152 """Apply delta to our state and return a copy of the
153 affected object as it was before and after the update, e.g.:
155 old_obj, new_obj = self.apply_delta(delta)
157 old_obj may be None if the delta is for the creation of a new object,
158 e.g. a new application or unit is deployed.
160 new_obj will never be None, but may be dead (new_obj.dead == True)
161 if the object was deleted as a result of the delta being applied.
166 .setdefault(delta
.entity
, {})
167 .setdefault(delta
.get_id(), collections
.deque())
170 history
.append(delta
.data
)
171 if delta
.type == 'remove':
174 entity
= self
.get_entity(delta
.entity
, delta
.get_id())
175 return entity
.previous(), entity
178 self
, entity_type
, entity_id
, history_index
=-1, connected
=True):
179 """Return an object instance for the given entity_type and id.
181 By default the object state matches the most recent state from
182 Juju. To get an instance of the object in an older state, pass
183 history_index, an index into the history deque for the entity.
187 if history_index
< 0 and history_index
!= -1:
188 history_index
+= len(self
.entity_history(entity_type
, entity_id
))
189 if history_index
< 0:
193 self
.entity_data(entity_type
, entity_id
, history_index
)
197 entity_class
= get_entity_class(entity_type
)
199 entity_id
, self
.model
, history_index
=history_index
,
203 class ModelEntity(object):
204 """An object in the Model tree"""
206 def __init__(self
, entity_id
, model
, history_index
=-1, connected
=True):
207 """Initialize a new entity
209 :param entity_id str: The unique id of the object in the model
210 :param model: The model instance in whose object tree this
212 :history_index int: The index of this object's state in the model's
213 history deque for this entity
214 :connected bool: Flag indicating whether this object gets live updates
218 self
.entity_id
= entity_id
220 self
._history
_index
= history_index
221 self
.connected
= connected
222 self
.connection
= model
.connection
225 return '<{} entity_id="{}">'.format(type(self
).__name
__,
228 def __getattr__(self
, name
):
229 """Fetch object attributes from the underlying data dict held in the
233 return self
.safe_data
[name
]
236 return bool(self
.data
)
238 def on_change(self
, callable_
):
239 """Add a change observer to this entity.
242 self
.model
.add_observer(
243 callable_
, self
.entity_type
, 'change', self
.entity_id
)
245 def on_remove(self
, callable_
):
246 """Add a remove observer to this entity.
249 self
.model
.add_observer(
250 callable_
, self
.entity_type
, 'remove', self
.entity_id
)
253 def entity_type(self
):
254 """A string identifying the entity type of this object, e.g.
255 'application' or 'unit', etc.
258 return self
.__class
__.__name
__.lower()
262 """Return True if this object represents the current state of the
263 entity in the underlying model.
265 This will be True except when the object represents an entity at a
266 non-latest state in history, e.g. if the object was obtained by calling
267 .previous() on another object.
270 return self
._history
_index
== -1
274 """Returns True if this entity no longer exists in the underlying
280 self
.model
.state
.entity_data(
281 self
.entity_type
, self
.entity_id
, -1) is None
286 """Returns True if this entity still exists in the underlying
294 """The data dictionary for this entity.
297 return self
.model
.state
.entity_data(
298 self
.entity_type
, self
.entity_id
, self
._history
_index
)
302 """The data dictionary for this entity.
304 If this `ModelEntity` points to the dead state, it will
305 raise `DeadEntityException`.
308 if self
.data
is None:
309 raise DeadEntityException(
310 "Entity {}:{} is dead - its attributes can no longer be "
311 "accessed. Use the .previous() method on this object to get "
312 "a copy of the object at its previous state.".format(
313 self
.entity_type
, self
.entity_id
))
317 """Return a copy of this object as was at its previous state in
320 Returns None if this object is new (and therefore has no history).
322 The returned object is always "disconnected", i.e. does not receive
326 return self
.model
.state
.get_entity(
327 self
.entity_type
, self
.entity_id
, self
._history
_index
- 1,
331 """Return a copy of this object at its next state in
334 Returns None if this object is already the latest.
336 The returned object is "disconnected", i.e. does not receive
337 live updates, unless it is current (latest).
340 if self
._history
_index
== -1:
343 new_index
= self
._history
_index
+ 1
345 new_index
== len(self
.model
.state
.entity_history(
346 self
.entity_type
, self
.entity_id
)) - 1
348 return self
.model
.state
.get_entity(
349 self
.entity_type
, self
.entity_id
, self
._history
_index
- 1,
353 """Return a copy of this object at its current state in the model.
355 Returns self if this object is already the latest.
357 The returned object is always "connected", i.e. receives
358 live updates from the model.
361 if self
._history
_index
== -1:
364 return self
.model
.state
.get_entity(self
.entity_type
, self
.entity_id
)
368 def __init__(self
, loop
=None):
369 """Instantiate a new connected Model.
371 :param loop: an asyncio event loop
374 self
.loop
= loop
or asyncio
.get_event_loop()
375 self
.connection
= None
376 self
.observers
= weakref
.WeakValueDictionary()
377 self
.state
= ModelState(self
)
379 self
._watcher
_task
= None
380 self
._watch
_shutdown
= asyncio
.Event(loop
=self
.loop
)
381 self
._watch
_received
= asyncio
.Event(loop
=self
.loop
)
382 self
._charmstore
= CharmStore(self
.loop
)
384 async def connect(self
, *args
, **kw
):
385 """Connect to an arbitrary Juju model.
387 args and kw are passed through to Connection.connect()
391 kw
['loop'] = self
.loop
392 self
.connection
= await connection
.Connection
.connect(*args
, **kw
)
393 await self
._after
_connect
()
395 async def connect_current(self
):
396 """Connect to the current Juju model.
399 self
.connection
= await connection
.Connection
.connect_current(
401 await self
._after
_connect
()
403 async def connect_model(self
, model_name
):
404 """Connect to a specific Juju model by name.
406 :param model_name: Format [controller:][user/]model
409 self
.connection
= await connection
.Connection
.connect_model(model_name
,
411 await self
._after
_connect
()
413 async def _after_connect(self
):
414 """Run initialization steps after connecting to websocket.
418 await self
._watch
_received
.wait()
419 await self
.get_info()
421 async def disconnect(self
):
422 """Shut down the watcher task and close websockets.
425 self
._stop
_watching
()
426 if self
.connection
and self
.connection
.is_open
:
427 await self
._watch
_shutdown
.wait()
428 log
.debug('Closing model connection')
429 await self
.connection
.close()
430 self
.connection
= None
432 async def add_local_charm_dir(self
, charm_dir
, series
):
433 """Upload a local charm to the model.
435 This will automatically generate an archive from
438 :param charm_dir: Path to the charm directory
439 :param series: Charm series
442 fh
= tempfile
.NamedTemporaryFile()
443 CharmArchiveGenerator(charm_dir
).make_archive(fh
.name
)
446 self
.add_local_charm
, fh
, series
, os
.stat(fh
.name
).st_size
)
447 charm_url
= await self
.loop
.run_in_executor(None, func
)
449 log
.debug('Uploaded local charm: %s -> %s', charm_dir
, charm_url
)
452 def add_local_charm(self
, charm_file
, series
, size
=None):
453 """Upload a local charm archive to the model.
455 Returns the 'local:...' url that should be used to deploy the charm.
457 :param charm_file: Path to charm zip archive
458 :param series: Charm series
459 :param size: Size of the archive, in bytes
460 :return str: 'local:...' url for deploying the charm
461 :raises: :class:`JujuError` if the upload fails
463 Uses an https endpoint at the same host:port as the wss.
464 Supports large file uploads.
468 This method will block. Consider using :meth:`add_local_charm_dir`
472 conn
, headers
, path_prefix
= self
.connection
.https_connection()
473 path
= "%s/charms?series=%s" % (path_prefix
, series
)
474 headers
['Content-Type'] = 'application/zip'
476 headers
['Content-Length'] = size
477 conn
.request("POST", path
, charm_file
, headers
)
478 response
= conn
.getresponse()
479 result
= response
.read().decode()
480 if not response
.status
== 200:
481 raise JujuError(result
)
482 result
= json
.loads(result
)
483 return result
['charm-url']
485 def all_units_idle(self
):
486 """Return True if all units are idle.
489 for unit
in self
.units
.values():
490 unit_status
= unit
.data
['agent-status']['current']
491 if unit_status
!= 'idle':
495 async def reset(self
, force
=False):
496 """Reset the model to a clean state.
498 :param bool force: Force-terminate machines.
500 This returns only after the model has reached a clean state. "Clean"
501 means no applications or machines exist in the model.
504 log
.debug('Resetting model')
505 for app
in self
.applications
.values():
507 for machine
in self
.machines
.values():
508 await machine
.destroy(force
=force
)
509 await self
.block_until(
510 lambda: len(self
.machines
) == 0
513 async def block_until(self
, *conditions
, timeout
=None, wait_period
=0.5):
514 """Return only after all conditions are true.
518 while not all(c() for c
in conditions
):
519 await asyncio
.sleep(wait_period
, loop
=self
.loop
)
520 await asyncio
.wait_for(_block(), timeout
, loop
=self
.loop
)
523 def applications(self
):
524 """Return a map of application-name:Application for all applications
525 currently in the model.
528 return self
.state
.applications
532 """Return a map of machine-id:Machine for all machines currently in
536 return self
.state
.machines
540 """Return a map of unit-id:Unit for all units currently in
544 return self
.state
.units
546 async def get_info(self
):
547 """Return a client.ModelInfo object for this Model.
549 Retrieves latest info for this Model from the api server. The
550 return value is cached on the Model.info attribute so that the
551 valued may be accessed again without another api call, if
554 This method is called automatically when the Model is connected,
555 resulting in Model.info being initialized without requiring an
556 explicit call to this method.
559 facade
= client
.ClientFacade()
560 facade
.connect(self
.connection
)
562 self
.info
= await facade
.ModelInfo()
563 log
.debug('Got ModelInfo: %s', vars(self
.info
))
568 self
, callable_
, entity_type
=None, action
=None, entity_id
=None,
570 """Register an "on-model-change" callback
572 Once the model is connected, ``callable_``
573 will be called each time the model changes. ``callable_`` should
574 be Awaitable and accept the following positional arguments:
576 delta - An instance of :class:`juju.delta.EntityDelta`
577 containing the raw delta data recv'd from the Juju
580 old_obj - If the delta modifies an existing object in the model,
581 old_obj will be a copy of that object, as it was before the
582 delta was applied. Will be None if the delta creates a new
585 new_obj - A copy of the new or updated object, after the delta
586 is applied. Will be None if the delta removes an entity
589 model - The :class:`Model` itself.
591 Events for which ``callable_`` is called can be specified by passing
592 entity_type, action, and/or entitiy_id filter criteria, e.g.::
596 entity_type='application', action='add', entity_id='ubuntu')
598 For more complex filtering conditions, pass a predicate function. It
599 will be called with a delta as its only argument. If the predicate
600 function returns True, the ``callable_`` will be called.
603 observer
= _Observer(
604 callable_
, entity_type
, action
, entity_id
, predicate
)
605 self
.observers
[observer
] = callable_
608 """Start an asynchronous watch against this model.
610 See :meth:`add_observer` to register an onchange callback.
613 async def _start_watch():
614 self
._watch
_shutdown
.clear()
616 allwatcher
= watcher
.AllWatcher()
617 self
._watch
_conn
= await self
.connection
.clone()
618 allwatcher
.connect(self
._watch
_conn
)
620 results
= await allwatcher
.Next()
621 for delta
in results
.deltas
:
622 delta
= get_entity_delta(delta
)
623 old_obj
, new_obj
= self
.state
.apply_delta(delta
)
624 # XXX: Might not want to shield at this level
625 # We are shielding because when the watcher is
626 # canceled (on disconnect()), we don't want all of
627 # its children (every observer callback) to be
628 # canceled with it. So we shield them. But this means
629 # they can *never* be canceled.
630 await asyncio
.shield(
631 self
._notify
_observers
(delta
, old_obj
, new_obj
),
633 self
._watch
_received
.set()
634 except CancelledError
:
635 log
.debug('Closing watcher connection')
636 await self
._watch
_conn
.close()
637 self
._watch
_shutdown
.set()
638 self
._watch
_conn
= None
640 log
.debug('Starting watcher task')
641 self
._watcher
_task
= self
.loop
.create_task(_start_watch())
643 def _stop_watching(self
):
644 """Stop the asynchronous watch against this model.
647 log
.debug('Stopping watcher task')
648 if self
._watcher
_task
:
649 self
._watcher
_task
.cancel()
651 async def _notify_observers(self
, delta
, old_obj
, new_obj
):
652 """Call observing callbacks, notifying them of a change in model state
654 :param delta: The raw change from the watcher
655 (:class:`juju.client.overrides.Delta`)
656 :param old_obj: The object in the model that this delta updates.
658 :param new_obj: The object in the model that is created or updated
659 by applying this delta.
662 if new_obj
and not old_obj
:
666 'Model changed: %s %s %s',
667 delta
.entity
, delta
.type, delta
.get_id())
669 for o
in self
.observers
:
670 if o
.cares_about(delta
):
671 asyncio
.ensure_future(o(delta
, old_obj
, new_obj
, self
),
674 async def _wait(self
, entity_type
, entity_id
, action
, predicate
=None):
676 Block the calling routine until a given action has happened to the
679 :param entity_type: The entity's type.
680 :param entity_id: The entity's id.
681 :param action: the type of action (e.g., 'add', 'change', or 'remove')
682 :param predicate: optional callable that must take as an
683 argument a delta, and must return a boolean, indicating
684 whether the delta contains the specific action we're looking
685 for. For example, you might check to see whether a 'change'
686 has a 'completed' status. See the _Observer class for details.
689 q
= asyncio
.Queue(loop
=self
.loop
)
691 async def callback(delta
, old
, new
, model
):
692 await q
.put(delta
.get_id())
694 self
.add_observer(callback
, entity_type
, action
, entity_id
, predicate
)
695 entity_id
= await q
.get()
696 # object might not be in the entity_map if we were waiting for a
698 return self
.state
._live
_entity
_map
(entity_type
).get(entity_id
)
700 async def _wait_for_new(self
, entity_type
, entity_id
=None, predicate
=None):
701 """Wait for a new object to appear in the Model and return it.
703 Waits for an object of type ``entity_type`` with id ``entity_id``.
704 If ``entity_id`` is ``None``, it will wait for the first new entity
707 This coroutine blocks until the new object appears in the model.
710 # if the entity is already in the model, just return it
711 if entity_id
in self
.state
._live
_entity
_map
(entity_type
):
712 return self
.state
._live
_entity
_map
(entity_type
)[entity_id
]
713 # if we know the entity_id, we can trigger on any action that puts
714 # the enitty into the model; otherwise, we have to watch for the
715 # next "add" action on that entity_type
716 action
= 'add' if entity_id
is None else None
717 return await self
._wait
(entity_type
, entity_id
, action
, predicate
)
719 async def wait_for_action(self
, action_id
):
720 """Given an action, wait for it to complete."""
722 if action_id
.startswith("action-"):
723 # if we've been passed action.tag, transform it into the
724 # id that the api deltas will use.
725 action_id
= action_id
[7:]
727 def predicate(delta
):
728 return delta
.data
['status'] in ('completed', 'failed')
730 return await self
._wait
('action', action_id
, 'change', predicate
)
732 async def add_machine(
733 self
, spec
=None, constraints
=None, disks
=None, series
=None):
734 """Start a new, empty machine and optionally a container, or add a
735 container to a machine.
737 :param str spec: Machine specification
740 (None) - starts a new machine
741 'lxd' - starts a new machine with one lxd container
742 'lxd:4' - starts a new lxd container on machine 4
743 'ssh:user@10.10.0.3' - manually provisions a machine with ssh
744 'zone=us-east-1a' - starts a machine in zone us-east-1s on AWS
745 'maas2.name' - acquire machine maas2.name on MAAS
747 :param dict constraints: Machine constraints
754 :param list disks: List of disk constraint dictionaries
763 :param str series: Series, e.g. 'xenial'
765 Supported container types are: lxd, kvm
767 When deploying a container to an existing machine, constraints cannot
771 params
= client
.AddMachineParams()
772 params
.jobs
= ['JobHostUnits']
775 placement
= parse_placement(spec
)
777 params
.placement
= placement
[0]
780 params
.constraints
= client
.Value
.from_json(constraints
)
784 client
.Constraints
.from_json(o
) for o
in disks
]
787 params
.series
= series
789 # Submit the request.
790 client_facade
= client
.ClientFacade()
791 client_facade
.connect(self
.connection
)
792 results
= await client_facade
.AddMachines([params
])
793 error
= results
.machines
[0].error
795 raise ValueError("Error adding machine: %s", error
.message
)
796 machine_id
= results
.machines
[0].machine
797 log
.debug('Added new machine %s', machine_id
)
798 return await self
._wait
_for
_new
('machine', machine_id
)
800 async def add_relation(self
, relation1
, relation2
):
801 """Add a relation between two applications.
803 :param str relation1: '<application>[:<relation_name>]'
804 :param str relation2: '<application>[:<relation_name>]'
807 app_facade
= client
.ApplicationFacade()
808 app_facade
.connect(self
.connection
)
811 'Adding relation %s <-> %s', relation1
, relation2
)
814 result
= await app_facade
.AddRelation([relation1
, relation2
])
815 except JujuAPIError
as e
:
816 if 'relation already exists' not in e
.message
:
819 'Relation %s <-> %s already exists', relation1
, relation2
)
820 # TODO: if relation already exists we should return the
821 # Relation ModelEntity here
824 def predicate(delta
):
826 for endpoint
in delta
.data
['endpoints']:
827 endpoints
[endpoint
['application-name']] = endpoint
['relation']
828 return endpoints
== result
.endpoints
830 return await self
._wait
_for
_new
('relation', None, predicate
)
832 def add_space(self
, name
, *cidrs
):
833 """Add a new network space.
835 Adds a new space with the given name and associates the given
836 (optional) list of existing subnet CIDRs with it.
838 :param str name: Name of the space
839 :param \*cidrs: Optional list of existing subnet CIDRs
842 raise NotImplementedError()
844 def add_ssh_key(self
, key
):
845 """Add a public SSH key to this model.
847 :param str key: The public ssh key
850 raise NotImplementedError()
851 add_ssh_keys
= add_ssh_key
853 def add_subnet(self
, cidr_or_id
, space
, *zones
):
854 """Add an existing subnet to this model.
856 :param str cidr_or_id: CIDR or provider ID of the existing subnet
857 :param str space: Network space with which to associate
858 :param str \*zones: Zone(s) in which the subnet resides
861 raise NotImplementedError()
863 def get_backups(self
):
864 """Retrieve metadata for backups in this model.
867 raise NotImplementedError()
869 def block(self
, *commands
):
870 """Add a new block to this model.
872 :param str \*commands: The commands to block. Valid values are
873 'all-changes', 'destroy-model', 'remove-object'
876 raise NotImplementedError()
878 def get_blocks(self
):
879 """List blocks for this model.
882 raise NotImplementedError()
884 def get_cached_images(self
, arch
=None, kind
=None, series
=None):
885 """Return a list of cached OS images.
887 :param str arch: Filter by image architecture
888 :param str kind: Filter by image kind, e.g. 'lxd'
889 :param str series: Filter by image series, e.g. 'xenial'
892 raise NotImplementedError()
894 def create_backup(self
, note
=None, no_download
=False):
895 """Create a backup of this model.
897 :param str note: A note to store with the backup
898 :param bool no_download: Do not download the backup archive
899 :return str: Path to downloaded archive
902 raise NotImplementedError()
904 def create_storage_pool(self
, name
, provider_type
, **pool_config
):
905 """Create or define a storage pool.
907 :param str name: Name to give the storage pool
908 :param str provider_type: Pool provider type
909 :param \*\*pool_config: key/value pool configuration pairs
912 raise NotImplementedError()
915 self
, no_tail
=False, exclude_module
=None, include_module
=None,
916 include
=None, level
=None, limit
=0, lines
=10, replay
=False,
918 """Get log messages for this model.
920 :param bool no_tail: Stop after returning existing log messages
921 :param list exclude_module: Do not show log messages for these logging
923 :param list include_module: Only show log messages for these logging
925 :param list include: Only show log messages for these entities
926 :param str level: Log level to show, valid options are 'TRACE',
927 'DEBUG', 'INFO', 'WARNING', 'ERROR,
928 :param int limit: Return this many of the most recent (possibly
929 filtered) lines are shown
930 :param int lines: Yield this many of the most recent lines, and keep
932 :param bool replay: Yield the entire log, and keep yielding
933 :param list exclude: Do not show log messages for these entities
936 raise NotImplementedError()
939 self
, entity_url
, application_name
=None, bind
=None, budget
=None,
940 channel
=None, config
=None, constraints
=None, force
=False,
941 num_units
=1, plan
=None, resources
=None, series
=None, storage
=None,
943 """Deploy a new service or bundle.
945 :param str entity_url: Charm or bundle url
946 :param str application_name: Name to give the service
947 :param dict bind: <charm endpoint>:<network space> pairs
948 :param dict budget: <budget name>:<limit> pairs
949 :param str channel: Charm store channel from which to retrieve
950 the charm or bundle, e.g. 'development'
951 :param dict config: Charm configuration dictionary
952 :param constraints: Service constraints
953 :type constraints: :class:`juju.Constraints`
954 :param bool force: Allow charm to be deployed to a machine running
955 an unsupported series
956 :param int num_units: Number of units to deploy
957 :param str plan: Plan under which to deploy charm
958 :param dict resources: <resource name>:<file path> pairs
959 :param str series: Series on which to deploy
960 :param dict storage: Storage constraints TODO how do these look?
961 :param to: Placement directive as a string. For example:
963 '23' - place on machine 23
964 'lxd:7' - place in new lxd container on machine 7
965 '24/lxd/3' - place in container 3 on machine 24
967 If None, a new machine is provisioned.
972 - application_name is required; fill this in automatically if not
974 - series is required; how do we pick a default?
978 placement
= parse_placement(to
)
984 k
: client
.Constraints(**v
)
985 for k
, v
in storage
.items()
989 entity_url
.startswith('local:') or
990 os
.path
.isdir(entity_url
)
992 entity_id
= await self
.charmstore
.entityId(entity_url
) \
993 if not is_local
else entity_url
995 app_facade
= client
.ApplicationFacade()
996 client_facade
= client
.ClientFacade()
997 app_facade
.connect(self
.connection
)
998 client_facade
.connect(self
.connection
)
1000 is_bundle
= ((is_local
and
1001 (Path(entity_id
) / 'bundle.yaml').exists()) or
1002 (not is_local
and 'bundle/' in entity_id
))
1005 handler
= BundleHandler(self
)
1006 await handler
.fetch_plan(entity_id
)
1007 await handler
.execute_plan()
1008 extant_apps
= {app
for app
in self
.applications
}
1009 pending_apps
= set(handler
.applications
) - extant_apps
1011 # new apps will usually be in the model by now, but if some
1012 # haven't made it yet we'll need to wait on them to be added
1013 await asyncio
.gather(*[
1014 asyncio
.ensure_future(
1015 self
._wait
_for
_new
('application', app_name
),
1017 for app_name
in pending_apps
1019 return [app
for name
, app
in self
.applications
.items()
1020 if name
in handler
.applications
]
1023 'Deploying %s', entity_id
)
1026 parts
= entity_id
[3:].split('/')
1027 if parts
[0].startswith('~'):
1029 if not application_name
:
1030 application_name
= parts
[-1].split('-')[0]
1035 entity
= await self
.charmstore
.entity(entity_id
)
1036 ss
= entity
['Meta']['supported-series']
1037 series
= ss
['SupportedSeries'][0]
1038 await client_facade
.AddCharm(channel
, entity_id
)
1039 elif not entity_id
.startswith('local:'):
1040 # We have a local charm dir that needs to be uploaded
1041 charm_dir
= os
.path
.abspath(
1042 os
.path
.expanduser(entity_id
))
1043 series
= series
or get_charm_series(charm_dir
)
1046 "Couldn't determine series for charm at {}. "
1047 "Pass a 'series' kwarg to Model.deploy().".format(
1049 entity_id
= await self
.add_local_charm_dir(charm_dir
, series
)
1051 app
= client
.ApplicationDeploy(
1052 application
=application_name
,
1054 charm_url
=entity_id
,
1056 constraints
=parse_constraints(constraints
),
1057 endpoint_bindings
=bind
,
1058 num_units
=num_units
,
1059 resources
=resources
,
1063 app
.placement
= placement
1065 result
= await app_facade
.Deploy([app
])
1066 errors
= [r
.error
.message
for r
in result
.results
if r
.error
]
1068 raise JujuError('\n'.join(errors
))
1069 return await self
._wait
_for
_new
('application', application_name
)
1072 """Terminate all machines and resources for this model.
1075 raise NotImplementedError()
1077 async def destroy_unit(self
, *unit_names
):
1078 """Destroy units by name.
1081 app_facade
= client
.ApplicationFacade()
1082 app_facade
.connect(self
.connection
)
1085 'Destroying unit%s %s',
1086 's' if len(unit_names
) == 1 else '',
1087 ' '.join(unit_names
))
1089 return await app_facade
.DestroyUnits(list(unit_names
))
1090 destroy_units
= destroy_unit
1092 def get_backup(self
, archive_id
):
1093 """Download a backup archive file.
1095 :param str archive_id: The id of the archive to download
1096 :return str: Path to the archive file
1099 raise NotImplementedError()
1102 self
, num_controllers
=0, constraints
=None, series
=None, to
=None):
1103 """Ensure sufficient controllers exist to provide redundancy.
1105 :param int num_controllers: Number of controllers to make available
1106 :param constraints: Constraints to apply to the controller machines
1107 :type constraints: :class:`juju.Constraints`
1108 :param str series: Series of the controller machines
1109 :param list to: Placement directives for controller machines, e.g.::
1112 'lxc:7' - new lxc container on machine 7
1113 '24/lxc/3' - lxc container 3 or machine 24
1115 If None, a new machine is provisioned.
1118 raise NotImplementedError()
1120 def get_config(self
):
1121 """Return the configuration settings for this model.
1124 raise NotImplementedError()
1126 def get_constraints(self
):
1127 """Return the machine constraints for this model.
1130 raise NotImplementedError()
1132 def grant(self
, username
, acl
='read'):
1133 """Grant a user access to this model.
1135 :param str username: Username
1136 :param str acl: Access control ('read' or 'write')
1139 raise NotImplementedError()
1141 def import_ssh_key(self
, identity
):
1142 """Add a public SSH key from a trusted indentity source to this model.
1144 :param str identity: User identity in the form <lp|gh>:<username>
1147 raise NotImplementedError()
1148 import_ssh_keys
= import_ssh_key
1150 def get_machines(self
, machine
, utc
=False):
1151 """Return list of machines in this model.
1153 :param str machine: Machine id, e.g. '0'
1154 :param bool utc: Display time as UTC in RFC3339 format
1157 raise NotImplementedError()
1159 def get_shares(self
):
1160 """Return list of all users with access to this model.
1163 raise NotImplementedError()
1165 def get_spaces(self
):
1166 """Return list of all known spaces, including associated subnets.
1169 raise NotImplementedError()
1171 def get_ssh_key(self
):
1172 """Return known SSH keys for this model.
1175 raise NotImplementedError()
1176 get_ssh_keys
= get_ssh_key
1178 def get_storage(self
, filesystem
=False, volume
=False):
1179 """Return details of storage instances.
1181 :param bool filesystem: Include filesystem storage
1182 :param bool volume: Include volume storage
1185 raise NotImplementedError()
1187 def get_storage_pools(self
, names
=None, providers
=None):
1188 """Return list of storage pools.
1190 :param list names: Only include pools with these names
1191 :param list providers: Only include pools for these providers
1194 raise NotImplementedError()
1196 def get_subnets(self
, space
=None, zone
=None):
1197 """Return list of known subnets.
1199 :param str space: Only include subnets in this space
1200 :param str zone: Only include subnets in this zone
1203 raise NotImplementedError()
1205 def remove_blocks(self
):
1206 """Remove all blocks from this model.
1209 raise NotImplementedError()
1211 def remove_backup(self
, backup_id
):
1214 :param str backup_id: The id of the backup to remove
1217 raise NotImplementedError()
1219 def remove_cached_images(self
, arch
=None, kind
=None, series
=None):
1220 """Remove cached OS images.
1222 :param str arch: Architecture of the images to remove
1223 :param str kind: Image kind to remove, e.g. 'lxd'
1224 :param str series: Image series to remove, e.g. 'xenial'
1227 raise NotImplementedError()
1229 def remove_machine(self
, *machine_ids
):
1230 """Remove a machine from this model.
1232 :param str \*machine_ids: Ids of the machines to remove
1235 raise NotImplementedError()
1236 remove_machines
= remove_machine
1238 def remove_ssh_key(self
, *keys
):
1239 """Remove a public SSH key(s) from this model.
1241 :param str \*keys: Keys to remove
1244 raise NotImplementedError()
1245 remove_ssh_keys
= remove_ssh_key
1248 self
, bootstrap
=False, constraints
=None, archive
=None,
1249 backup_id
=None, upload_tools
=False):
1250 """Restore a backup archive to a new controller.
1252 :param bool bootstrap: Bootstrap a new state machine
1253 :param constraints: Model constraints
1254 :type constraints: :class:`juju.Constraints`
1255 :param str archive: Path to backup archive to restore
1256 :param str backup_id: Id of backup to restore
1257 :param bool upload_tools: Upload tools if bootstrapping a new machine
1260 raise NotImplementedError()
1262 def retry_provisioning(self
):
1263 """Retry provisioning for failed machines.
1266 raise NotImplementedError()
1268 def revoke(self
, username
, acl
='read'):
1269 """Revoke a user's access to this model.
1271 :param str username: Username to revoke
1272 :param str acl: Access control ('read' or 'write')
1275 raise NotImplementedError()
1277 def run(self
, command
, timeout
=None):
1278 """Run command on all machines in this model.
1280 :param str command: The command to run
1281 :param int timeout: Time to wait before command is considered failed
1284 raise NotImplementedError()
1286 def set_config(self
, **config
):
1287 """Set configuration keys on this model.
1289 :param \*\*config: Config key/values
1292 raise NotImplementedError()
1294 def set_constraints(self
, constraints
):
1295 """Set machine constraints on this model.
1297 :param :class:`juju.Constraints` constraints: Machine constraints
1300 raise NotImplementedError()
1302 def get_action_output(self
, action_uuid
, wait
=-1):
1303 """Get the results of an action by ID.
1305 :param str action_uuid: Id of the action
1306 :param int wait: Time in seconds to wait for action to complete
1309 raise NotImplementedError()
1311 def get_action_status(self
, uuid_or_prefix
=None, name
=None):
1312 """Get the status of all actions, filtered by ID, ID prefix, or action name.
1314 :param str uuid_or_prefix: Filter by action uuid or prefix
1315 :param str name: Filter by action name
1318 raise NotImplementedError()
1320 def get_budget(self
, budget_name
):
1321 """Get budget usage info.
1323 :param str budget_name: Name of budget
1326 raise NotImplementedError()
1328 async def get_status(self
, filters
=None, utc
=False):
1329 """Return the status of the model.
1331 :param str filters: Optional list of applications, units, or machines
1332 to include, which can use wildcards ('*').
1333 :param bool utc: Display time as UTC in RFC3339 format
1336 client_facade
= client
.ClientFacade()
1337 client_facade
.connect(self
.connection
)
1338 return await client_facade
.FullStatus(filters
)
1341 self
, all_
=False, destination
=None, dry_run
=False, public
=False,
1342 source
=None, stream
=None, version
=None):
1343 """Copy Juju tools into this model.
1345 :param bool all_: Copy all versions, not just the latest
1346 :param str destination: Path to local destination directory
1347 :param bool dry_run: Don't do the actual copy
1348 :param bool public: Tools are for a public cloud, so generate mirrors
1350 :param str source: Path to local source directory
1351 :param str stream: Simplestreams stream for which to sync metadata
1352 :param str version: Copy a specific major.minor version
1355 raise NotImplementedError()
1357 def unblock(self
, *commands
):
1358 """Unblock an operation that would alter this model.
1360 :param str \*commands: The commands to unblock. Valid values are
1361 'all-changes', 'destroy-model', 'remove-object'
1364 raise NotImplementedError()
1366 def unset_config(self
, *keys
):
1367 """Unset configuration on this model.
1369 :param str \*keys: The keys to unset
1372 raise NotImplementedError()
1374 def upgrade_gui(self
):
1375 """Upgrade the Juju GUI for this model.
1378 raise NotImplementedError()
1381 self
, dry_run
=False, reset_previous_upgrade
=False,
1382 upload_tools
=False, version
=None):
1383 """Upgrade Juju on all machines in a model.
1385 :param bool dry_run: Don't do the actual upgrade
1386 :param bool reset_previous_upgrade: Clear the previous (incomplete)
1388 :param bool upload_tools: Upload local version of tools
1389 :param str version: Upgrade to a specific version
1392 raise NotImplementedError()
1394 def upload_backup(self
, archive_path
):
1395 """Store a backup archive remotely in Juju.
1397 :param str archive_path: Path to local archive
1400 raise NotImplementedError()
1403 def charmstore(self
):
1404 return self
._charmstore
1406 async def get_metrics(self
, *tags
):
1407 """Retrieve metrics.
1409 :param str \*tags: Tags of entities from which to retrieve metrics.
1410 No tags retrieves the metrics of all units in the model.
1411 :return: Dictionary of unit_name:metrics
1414 log
.debug("Retrieving metrics for %s",
1415 ', '.join(tags
) if tags
else "all units")
1417 metrics_facade
= client
.MetricsDebugFacade()
1418 metrics_facade
.connect(self
.connection
)
1420 entities
= [client
.Entity(tag
) for tag
in tags
]
1421 metrics_result
= await metrics_facade
.GetMetrics(entities
)
1423 metrics
= collections
.defaultdict(list)
1425 for entity_metrics
in metrics_result
.results
:
1426 error
= entity_metrics
.error
1428 if "is not a valid tag" in error
:
1429 raise ValueError(error
.message
)
1431 raise Exception(error
.message
)
1433 for metric
in entity_metrics
.metrics
:
1434 metrics
[metric
.unit
].append(vars(metric
))
1439 def get_charm_series(path
):
1440 """Inspects the charm directory at ``path`` and returns a default
1441 series from its metadata.yaml (the first item in the 'series' list).
1443 Returns None if no series can be determined.
1446 md
= Path(path
) / "metadata.yaml"
1449 data
= yaml
.load(md
.open())
1450 series
= data
.get('series')
1451 return series
[0] if series
else None
1454 class BundleHandler(object):
1456 Handle bundles by using the API to translate bundle YAML into a plan of
1457 steps and then dispatching each of those using the API.
1459 def __init__(self
, model
):
1461 self
.charmstore
= model
.charmstore
1463 self
.references
= {}
1464 self
._units
_by
_app
= {}
1465 for unit_name
, unit
in model
.units
.items():
1466 app_units
= self
._units
_by
_app
.setdefault(unit
.application
, [])
1467 app_units
.append(unit_name
)
1468 self
.client_facade
= client
.ClientFacade()
1469 self
.client_facade
.connect(model
.connection
)
1470 self
.app_facade
= client
.ApplicationFacade()
1471 self
.app_facade
.connect(model
.connection
)
1472 self
.ann_facade
= client
.AnnotationsFacade()
1473 self
.ann_facade
.connect(model
.connection
)
1475 async def _handle_local_charms(self
, bundle
):
1476 """Search for references to local charms (i.e. filesystem paths)
1477 in the bundle. Upload the local charms to the model, and replace
1478 the filesystem paths with appropriate 'local:' paths in the bundle.
1480 Return the modified bundle.
1482 :param dict bundle: Bundle dictionary
1483 :return: Modified bundle dictionary
1488 default_series
= bundle
.get('series')
1489 for app_name
in self
.applications
:
1490 app_dict
= bundle
['services'][app_name
]
1491 charm_dir
= os
.path
.abspath(os
.path
.expanduser(app_dict
['charm']))
1492 if not os
.path
.isdir(charm_dir
):
1495 app_dict
.get('series') or
1497 get_charm_series(charm_dir
)
1501 "Couldn't determine series for charm at {}. "
1502 "Add a 'series' key to the bundle.".format(charm_dir
))
1504 # Keep track of what we need to update. We keep a list of apps
1505 # that need to be updated, and a corresponding list of args
1506 # needed to update those apps.
1507 apps
.append(app_name
)
1508 args
.append((charm_dir
, series
))
1511 # If we have apps to update, spawn all the coroutines concurrently
1512 # and wait for them to finish.
1513 charm_urls
= await asyncio
.gather(*[
1514 self
.model
.add_local_charm_dir(*params
)
1516 ], loop
=self
.model
.loop
)
1517 # Update the 'charm:' entry for each app with the new 'local:' url.
1518 for app_name
, charm_url
in zip(apps
, charm_urls
):
1519 bundle
['services'][app_name
]['charm'] = charm_url
1523 async def fetch_plan(self
, entity_id
):
1524 is_local
= not entity_id
.startswith('cs:') and os
.path
.isdir(entity_id
)
1526 bundle_yaml
= (Path(entity_id
) / "bundle.yaml").read_text()
1528 bundle_yaml
= await self
.charmstore
.files(entity_id
,
1529 filename
='bundle.yaml',
1531 self
.bundle
= yaml
.safe_load(bundle_yaml
)
1532 self
.bundle
= await self
._handle
_local
_charms
(self
.bundle
)
1534 self
.plan
= await self
.client_facade
.GetBundleChanges(
1535 yaml
.dump(self
.bundle
))
1537 if self
.plan
.errors
:
1538 raise JujuError('\n'.join(self
.plan
.errors
))
1540 async def execute_plan(self
):
1541 for step
in self
.plan
.changes
:
1542 method
= getattr(self
, step
.method
)
1543 result
= await method(*step
.args
)
1544 self
.references
[step
.id_
] = result
1547 def applications(self
):
1548 return list(self
.bundle
['services'].keys())
1550 def resolve(self
, reference
):
1551 if reference
and reference
.startswith('$'):
1552 reference
= self
.references
[reference
[1:]]
1555 async def addCharm(self
, charm
, series
):
1557 :param charm string:
1558 Charm holds the URL of the charm to be added.
1560 :param series string:
1561 Series holds the series of the charm to be added
1562 if the charm default is not sufficient.
1564 # We don't add local charms because they've already been added
1565 # by self._handle_local_charms
1566 if charm
.startswith('local:'):
1569 entity_id
= await self
.charmstore
.entityId(charm
)
1570 log
.debug('Adding %s', entity_id
)
1571 await self
.client_facade
.AddCharm(None, entity_id
)
1574 async def addMachines(self
, params
=None):
1577 Dictionary specifying the machine to add. All keys are optional.
1580 series: string specifying the machine OS series.
1582 constraints: string holding machine constraints, if any. We'll
1583 parse this into the json friendly dict that the juju api
1586 container_type: string holding the type of the container (for
1587 instance ""lxd" or kvm"). It is not specified for top level
1590 parent_id: string holding a placeholder pointing to another
1591 machine change or to a unit change. This value is only
1592 specified in the case this machine is a container, in
1593 which case also ContainerType is set.
1596 params
= params
or {}
1599 params
= {normalize_key(k
): params
[k
] for k
in params
.keys()}
1601 # Fix up values, as necessary.
1602 if 'parent_id' in params
:
1603 params
['parent_id'] = self
.resolve(params
['parent_id'])
1605 params
['constraints'] = parse_constraints(
1606 params
.get('constraints'))
1607 params
['jobs'] = params
.get('jobs', ['JobHostUnits'])
1609 if params
.get('container_type') == 'lxc':
1610 log
.warning('Juju 2.0 does not support lxc containers. '
1611 'Converting containers to lxd.')
1612 params
['container_type'] = 'lxd'
1614 # Submit the request.
1615 params
= client
.AddMachineParams(**params
)
1616 results
= await self
.client_facade
.AddMachines([params
])
1617 error
= results
.machines
[0].error
1619 raise ValueError("Error adding machine: %s", error
.message
)
1620 machine
= results
.machines
[0].machine
1621 log
.debug('Added new machine %s', machine
)
1624 async def addRelation(self
, endpoint1
, endpoint2
):
1626 :param endpoint1 string:
1627 :param endpoint2 string:
1628 Endpoint1 and Endpoint2 hold relation endpoints in the
1629 "application:interface" form, where the application is always a
1630 placeholder pointing to an application change, and the interface is
1631 optional. Examples are "$deploy-42:web" or just "$deploy-42".
1633 endpoints
= [endpoint1
, endpoint2
]
1634 # resolve indirect references
1635 for i
in range(len(endpoints
)):
1636 parts
= endpoints
[i
].split(':')
1637 parts
[0] = self
.resolve(parts
[0])
1638 endpoints
[i
] = ':'.join(parts
)
1640 log
.info('Relating %s <-> %s', *endpoints
)
1641 return await self
.model
.add_relation(*endpoints
)
1643 async def deploy(self
, charm
, series
, application
, options
, constraints
,
1644 storage
, endpoint_bindings
, resources
):
1646 :param charm string:
1647 Charm holds the URL of the charm to be used to deploy this
1650 :param series string:
1651 Series holds the series of the application to be deployed
1652 if the charm default is not sufficient.
1654 :param application string:
1655 Application holds the application name.
1657 :param options map[string]interface{}:
1658 Options holds application options.
1660 :param constraints string:
1661 Constraints holds the optional application constraints.
1663 :param storage map[string]string:
1664 Storage holds the optional storage constraints.
1666 :param endpoint_bindings map[string]string:
1667 EndpointBindings holds the optional endpoint bindings
1669 :param resources map[string]int:
1670 Resources identifies the revision to use for each resource
1671 of the application's charm.
1673 # resolve indirect references
1674 charm
= self
.resolve(charm
)
1675 # stringify all config values for API, and convert to YAML
1676 options
= {k
: str(v
) for k
, v
in options
.items()}
1677 options
= yaml
.dump({application
: options
}, default_flow_style
=False)
1678 # build param object
1679 app
= client
.ApplicationDeploy(
1682 application
=application
,
1683 # Pass options to config-yaml rather than config, as
1684 # config-yaml invokes a newer codepath that better handles
1685 # empty strings in the options values.
1686 config_yaml
=options
,
1687 constraints
=parse_constraints(constraints
),
1689 endpoint_bindings
=endpoint_bindings
,
1690 resources
=resources
,
1693 log
.info('Deploying %s', charm
)
1694 await self
.app_facade
.Deploy([app
])
1695 # ensure the app is in the model for future operations
1696 await self
.model
._wait
_for
_new
('application', application
)
1699 async def addUnit(self
, application
, to
):
1701 :param application string:
1702 Application holds the application placeholder name for which a unit
1706 To holds the optional location where to add the unit, as a
1707 placeholder pointing to another unit change or to a machine change.
1709 application
= self
.resolve(application
)
1710 placement
= self
.resolve(to
)
1711 if self
._units
_by
_app
.get(application
):
1712 # enough units for this application already exist;
1713 # claim one, and carry on
1714 # NB: this should probably honor placement, but the juju client
1715 # doesn't, so we're not bothering, either
1716 unit_name
= self
._units
_by
_app
[application
].pop()
1717 log
.debug('Reusing unit %s for %s', unit_name
, application
)
1718 return self
.model
.units
[unit_name
]
1720 log
.debug('Adding new unit for %s%s', application
,
1721 ' to %s' % placement
if placement
else '')
1722 return await self
.model
.applications
[application
].add_unit(
1727 async def expose(self
, application
):
1729 :param application string:
1730 Application holds the placeholder name of the application that must
1733 application
= self
.resolve(application
)
1734 log
.info('Exposing %s', application
)
1735 return await self
.model
.applications
[application
].expose()
1737 async def setAnnotations(self
, id_
, entity_type
, annotations
):
1740 Id is the placeholder for the application or machine change
1741 corresponding to the entity to be annotated.
1743 :param entity_type EntityType:
1744 EntityType holds the type of the entity, "application" or
1747 :param annotations map[string]string:
1748 Annotations holds the annotations as key/value pairs.
1750 entity_id
= self
.resolve(id_
)
1752 entity
= self
.model
.state
.get_entity(entity_type
, entity_id
)
1754 entity
= await self
.model
._wait
_for
_new
(entity_type
, entity_id
)
1755 return await entity
.set_annotations(annotations
)
1758 class CharmStore(object):
1760 Async wrapper around theblues.charmstore.CharmStore
1762 def __init__(self
, loop
):
1764 self
._cs
= theblues
.charmstore
.CharmStore(timeout
=5)
1766 def __getattr__(self
, name
):
1768 Wrap method calls in coroutines that use run_in_executor to make them
1771 attr
= getattr(self
._cs
, name
)
1772 if not callable(attr
):
1773 wrapper
= partial(getattr, self
._cs
, name
)
1774 setattr(self
, name
, wrapper
)
1776 async def coro(*args
, **kwargs
):
1777 method
= partial(attr
, *args
, **kwargs
)
1778 for attempt
in range(1, 4):
1780 return await self
.loop
.run_in_executor(None, method
)
1781 except theblues
.errors
.ServerError
:
1784 await asyncio
.sleep(1, loop
=self
.loop
)
1785 setattr(self
, name
, coro
)
1790 class CharmArchiveGenerator(object):
1791 def __init__(self
, path
):
1792 self
.path
= os
.path
.abspath(os
.path
.expanduser(path
))
1794 def make_archive(self
, path
):
1795 """Create archive of directory and write to ``path``.
1797 :param path: Path to archive
1801 * build/\* - This is used for packing the charm itself and any
1803 * \*/.\* - Hidden files are all ignored for now. This will most
1804 likely be changed into a specific ignore list
1808 zf
= zipfile
.ZipFile(path
, 'w', zipfile
.ZIP_DEFLATED
)
1809 for dirpath
, dirnames
, filenames
in os
.walk(self
.path
):
1810 relative_path
= dirpath
[len(self
.path
) + 1:]
1811 if relative_path
and not self
._ignore
(relative_path
):
1812 zf
.write(dirpath
, relative_path
)
1813 for name
in filenames
:
1814 archive_name
= os
.path
.join(relative_path
, name
)
1815 if not self
._ignore
(archive_name
):
1816 real_path
= os
.path
.join(dirpath
, name
)
1817 self
._check
_type
(real_path
)
1818 if os
.path
.islink(real_path
):
1819 self
._check
_link
(real_path
)
1820 self
._write
_symlink
(
1821 zf
, os
.readlink(real_path
), archive_name
)
1823 zf
.write(real_path
, archive_name
)
1827 def _check_type(self
, path
):
1831 if stat
.S_ISDIR(s
.st_mode
) or stat
.S_ISREG(s
.st_mode
):
1833 raise ValueError("Invalid Charm at % %s" % (
1834 path
, "Invalid file type for a charm"))
1836 def _check_link(self
, path
):
1837 link_path
= os
.readlink(path
)
1838 if link_path
[0] == "/":
1840 "Invalid Charm at %s: %s" % (
1841 path
, "Absolute links are invalid"))
1842 path_dir
= os
.path
.dirname(path
)
1843 link_path
= os
.path
.join(path_dir
, link_path
)
1844 if not link_path
.startswith(os
.path
.abspath(self
.path
)):
1846 "Invalid charm at %s %s" % (
1847 path
, "Only internal symlinks are allowed"))
1849 def _write_symlink(self
, zf
, link_target
, link_path
):
1850 """Package symlinks with appropriate zipfile metadata."""
1851 info
= zipfile
.ZipInfo()
1852 info
.filename
= link_path
1853 info
.create_system
= 3
1854 # Magic code for symlinks / py2/3 compat
1855 # 27166663808 = (stat.S_IFLNK | 0755) << 16
1856 info
.external_attr
= 2716663808
1857 zf
.writestr(info
, link_target
)
1859 def _ignore(self
, path
):
1860 if path
== "build" or path
.startswith("build/"):
1862 if path
.startswith('.'):