11 from concurrent
.futures
import CancelledError
12 from functools
import partial
13 from pathlib
import Path
16 import theblues
.charmstore
17 import theblues
.errors
19 from .client
import client
20 from .client
import watcher
21 from .client
import connection
22 from .constraints
import parse
as parse_constraints
, normalize_key
23 from .delta
import get_entity_delta
24 from .delta
import get_entity_class
25 from .exceptions
import DeadEntityException
26 from .errors
import JujuError
, JujuAPIError
27 from .placement
import parse
as parse_placement
29 log
= logging
.getLogger(__name__
)
32 class _Observer(object):
33 """Wrapper around an observer callable.
35 This wrapper allows filter criteria to be associated with the
36 callable so that it's only called for changes that meet the criteria.
39 def __init__(self
, callable_
, entity_type
, action
, entity_id
, predicate
):
40 self
.callable_
= callable_
41 self
.entity_type
= entity_type
43 self
.entity_id
= entity_id
44 self
.predicate
= predicate
46 self
.entity_id
= str(self
.entity_id
)
47 if not self
.entity_id
.startswith('^'):
48 self
.entity_id
= '^' + self
.entity_id
49 if not self
.entity_id
.endswith('$'):
52 async def __call__(self
, delta
, old
, new
, model
):
53 await self
.callable_(delta
, old
, new
, model
)
55 def cares_about(self
, delta
):
56 """Return True if this observer "cares about" (i.e. wants to be
57 called) for a this delta.
60 if (self
.entity_id
and delta
.get_id() and
61 not re
.match(self
.entity_id
, str(delta
.get_id()))):
64 if self
.entity_type
and self
.entity_type
!= delta
.entity
:
67 if self
.action
and self
.action
!= delta
.type:
70 if self
.predicate
and not self
.predicate(delta
):
76 class ModelObserver(object):
77 async def __call__(self
, delta
, old
, new
, model
):
78 handler_name
= 'on_{}_{}'.format(delta
.entity
, delta
.type)
79 method
= getattr(self
, handler_name
, self
.on_change
)
80 await method(delta
, old
, new
, model
)
82 async def on_change(self
, delta
, old
, new
, model
):
83 """Generic model-change handler.
85 :param delta: :class:`juju.client.overrides.Delta`
86 :param old: :class:`juju.model.ModelEntity`
87 :param new: :class:`juju.model.ModelEntity`
88 :param model: :class:`juju.model.Model`
94 class ModelState(object):
95 """Holds the state of the model, including the delta history of all
96 entities in the model.
99 def __init__(self
, model
):
103 def _live_entity_map(self
, entity_type
):
104 """Return an id:Entity map of all the living entities of
105 type ``entity_type``.
109 entity_id
: self
.get_entity(entity_type
, entity_id
)
110 for entity_id
, history
in self
.state
.get(entity_type
, {}).items()
111 if history
[-1] is not None
115 def applications(self
):
116 """Return a map of application-name:Application for all applications
117 currently in the model.
120 return self
._live
_entity
_map
('application')
124 """Return a map of machine-id:Machine for all machines currently in
128 return self
._live
_entity
_map
('machine')
132 """Return a map of unit-id:Unit for all units currently in
136 return self
._live
_entity
_map
('unit')
138 def entity_history(self
, entity_type
, entity_id
):
139 """Return the history deque for an entity.
142 return self
.state
[entity_type
][entity_id
]
144 def entity_data(self
, entity_type
, entity_id
, history_index
):
145 """Return the data dict for an entity at a specific index of its
149 return self
.entity_history(entity_type
, entity_id
)[history_index
]
151 def apply_delta(self
, delta
):
152 """Apply delta to our state and return a copy of the
153 affected object as it was before and after the update, e.g.:
155 old_obj, new_obj = self.apply_delta(delta)
157 old_obj may be None if the delta is for the creation of a new object,
158 e.g. a new application or unit is deployed.
160 new_obj will never be None, but may be dead (new_obj.dead == True)
161 if the object was deleted as a result of the delta being applied.
166 .setdefault(delta
.entity
, {})
167 .setdefault(delta
.get_id(), collections
.deque())
170 history
.append(delta
.data
)
171 if delta
.type == 'remove':
174 entity
= self
.get_entity(delta
.entity
, delta
.get_id())
175 return entity
.previous(), entity
178 self
, entity_type
, entity_id
, history_index
=-1, connected
=True):
179 """Return an object instance for the given entity_type and id.
181 By default the object state matches the most recent state from
182 Juju. To get an instance of the object in an older state, pass
183 history_index, an index into the history deque for the entity.
187 if history_index
< 0 and history_index
!= -1:
188 history_index
+= len(self
.entity_history(entity_type
, entity_id
))
189 if history_index
< 0:
193 self
.entity_data(entity_type
, entity_id
, history_index
)
197 entity_class
= get_entity_class(entity_type
)
199 entity_id
, self
.model
, history_index
=history_index
,
203 class ModelEntity(object):
204 """An object in the Model tree"""
206 def __init__(self
, entity_id
, model
, history_index
=-1, connected
=True):
207 """Initialize a new entity
209 :param entity_id str: The unique id of the object in the model
210 :param model: The model instance in whose object tree this
212 :history_index int: The index of this object's state in the model's
213 history deque for this entity
214 :connected bool: Flag indicating whether this object gets live updates
218 self
.entity_id
= entity_id
220 self
._history
_index
= history_index
221 self
.connected
= connected
222 self
.connection
= model
.connection
225 return '<{} entity_id="{}">'.format(type(self
).__name
__,
228 def __getattr__(self
, name
):
229 """Fetch object attributes from the underlying data dict held in the
233 return self
.safe_data
[name
]
236 return bool(self
.data
)
238 def on_change(self
, callable_
):
239 """Add a change observer to this entity.
242 self
.model
.add_observer(
243 callable_
, self
.entity_type
, 'change', self
.entity_id
)
245 def on_remove(self
, callable_
):
246 """Add a remove observer to this entity.
249 self
.model
.add_observer(
250 callable_
, self
.entity_type
, 'remove', self
.entity_id
)
253 def entity_type(self
):
254 """A string identifying the entity type of this object, e.g.
255 'application' or 'unit', etc.
258 return self
.__class
__.__name
__.lower()
262 """Return True if this object represents the current state of the
263 entity in the underlying model.
265 This will be True except when the object represents an entity at a
266 non-latest state in history, e.g. if the object was obtained by calling
267 .previous() on another object.
270 return self
._history
_index
== -1
274 """Returns True if this entity no longer exists in the underlying
280 self
.model
.state
.entity_data(
281 self
.entity_type
, self
.entity_id
, -1) is None
286 """Returns True if this entity still exists in the underlying
294 """The data dictionary for this entity.
297 return self
.model
.state
.entity_data(
298 self
.entity_type
, self
.entity_id
, self
._history
_index
)
302 """The data dictionary for this entity.
304 If this `ModelEntity` points to the dead state, it will
305 raise `DeadEntityException`.
308 if self
.data
is None:
309 raise DeadEntityException(
310 "Entity {}:{} is dead - its attributes can no longer be "
311 "accessed. Use the .previous() method on this object to get "
312 "a copy of the object at its previous state.".format(
313 self
.entity_type
, self
.entity_id
))
317 """Return a copy of this object as was at its previous state in
320 Returns None if this object is new (and therefore has no history).
322 The returned object is always "disconnected", i.e. does not receive
326 return self
.model
.state
.get_entity(
327 self
.entity_type
, self
.entity_id
, self
._history
_index
- 1,
331 """Return a copy of this object at its next state in
334 Returns None if this object is already the latest.
336 The returned object is "disconnected", i.e. does not receive
337 live updates, unless it is current (latest).
340 if self
._history
_index
== -1:
343 new_index
= self
._history
_index
+ 1
345 new_index
== len(self
.model
.state
.entity_history(
346 self
.entity_type
, self
.entity_id
)) - 1
348 return self
.model
.state
.get_entity(
349 self
.entity_type
, self
.entity_id
, self
._history
_index
- 1,
353 """Return a copy of this object at its current state in the model.
355 Returns self if this object is already the latest.
357 The returned object is always "connected", i.e. receives
358 live updates from the model.
361 if self
._history
_index
== -1:
364 return self
.model
.state
.get_entity(self
.entity_type
, self
.entity_id
)
368 def __init__(self
, loop
=None):
369 """Instantiate a new connected Model.
371 :param loop: an asyncio event loop
374 self
.loop
= loop
or asyncio
.get_event_loop()
375 self
.connection
= None
376 self
.observers
= weakref
.WeakValueDictionary()
377 self
.state
= ModelState(self
)
379 self
._watcher
_task
= None
380 self
._watch
_shutdown
= asyncio
.Event(loop
=self
.loop
)
381 self
._watch
_received
= asyncio
.Event(loop
=self
.loop
)
382 self
._charmstore
= CharmStore(self
.loop
)
384 async def connect(self
, *args
, **kw
):
385 """Connect to an arbitrary Juju model.
387 args and kw are passed through to Connection.connect()
391 kw
['loop'] = self
.loop
392 self
.connection
= await connection
.Connection
.connect(*args
, **kw
)
393 await self
._after
_connect
()
395 async def connect_current(self
):
396 """Connect to the current Juju model.
399 self
.connection
= await connection
.Connection
.connect_current(
401 await self
._after
_connect
()
403 async def connect_model(self
, model_name
):
404 """Connect to a specific Juju model by name.
406 :param model_name: Format [controller:][user/]model
409 self
.connection
= await connection
.Connection
.connect_model(model_name
,
411 await self
._after
_connect
()
413 async def _after_connect(self
):
414 """Run initialization steps after connecting to websocket.
418 await self
._watch
_received
.wait()
419 await self
.get_info()
421 async def disconnect(self
):
422 """Shut down the watcher task and close websockets.
425 self
._stop
_watching
()
426 if self
.connection
and self
.connection
.is_open
:
427 await self
._watch
_shutdown
.wait()
428 log
.debug('Closing model connection')
429 await self
.connection
.close()
430 self
.connection
= None
432 async def add_local_charm_dir(self
, charm_dir
, series
):
433 """Upload a local charm to the model.
435 This will automatically generate an archive from
438 :param charm_dir: Path to the charm directory
439 :param series: Charm series
442 fh
= tempfile
.NamedTemporaryFile()
443 CharmArchiveGenerator(charm_dir
).make_archive(fh
.name
)
446 self
.add_local_charm
, fh
, series
, os
.stat(fh
.name
).st_size
)
447 charm_url
= await self
.loop
.run_in_executor(None, func
)
449 log
.debug('Uploaded local charm: %s -> %s', charm_dir
, charm_url
)
452 def add_local_charm(self
, charm_file
, series
, size
=None):
453 """Upload a local charm archive to the model.
455 Returns the 'local:...' url that should be used to deploy the charm.
457 :param charm_file: Path to charm zip archive
458 :param series: Charm series
459 :param size: Size of the archive, in bytes
460 :return str: 'local:...' url for deploying the charm
461 :raises: :class:`JujuError` if the upload fails
463 Uses an https endpoint at the same host:port as the wss.
464 Supports large file uploads.
468 This method will block. Consider using :meth:`add_local_charm_dir`
472 conn
, headers
, path_prefix
= self
.connection
.https_connection()
473 path
= "%s/charms?series=%s" % (path_prefix
, series
)
474 headers
['Content-Type'] = 'application/zip'
476 headers
['Content-Length'] = size
477 conn
.request("POST", path
, charm_file
, headers
)
478 response
= conn
.getresponse()
479 result
= response
.read().decode()
480 if not response
.status
== 200:
481 raise JujuError(result
)
482 result
= json
.loads(result
)
483 return result
['charm-url']
485 def all_units_idle(self
):
486 """Return True if all units are idle.
489 for unit
in self
.units
.values():
490 unit_status
= unit
.data
['agent-status']['current']
491 if unit_status
!= 'idle':
495 async def reset(self
, force
=False):
496 """Reset the model to a clean state.
498 :param bool force: Force-terminate machines.
500 This returns only after the model has reached a clean state. "Clean"
501 means no applications or machines exist in the model.
504 log
.debug('Resetting model')
505 for app
in self
.applications
.values():
507 for machine
in self
.machines
.values():
508 await machine
.destroy(force
=force
)
509 await self
.block_until(
510 lambda: len(self
.machines
) == 0
513 async def block_until(self
, *conditions
, timeout
=None, wait_period
=0.5):
514 """Return only after all conditions are true.
518 while not all(c() for c
in conditions
):
519 await asyncio
.sleep(wait_period
, loop
=self
.loop
)
520 await asyncio
.wait_for(_block(), timeout
, loop
=self
.loop
)
523 def applications(self
):
524 """Return a map of application-name:Application for all applications
525 currently in the model.
528 return self
.state
.applications
532 """Return a map of machine-id:Machine for all machines currently in
536 return self
.state
.machines
540 """Return a map of unit-id:Unit for all units currently in
544 return self
.state
.units
546 async def get_info(self
):
547 """Return a client.ModelInfo object for this Model.
549 Retrieves latest info for this Model from the api server. The
550 return value is cached on the Model.info attribute so that the
551 valued may be accessed again without another api call, if
554 This method is called automatically when the Model is connected,
555 resulting in Model.info being initialized without requiring an
556 explicit call to this method.
559 facade
= client
.ClientFacade()
560 facade
.connect(self
.connection
)
562 self
.info
= await facade
.ModelInfo()
563 log
.debug('Got ModelInfo: %s', vars(self
.info
))
568 self
, callable_
, entity_type
=None, action
=None, entity_id
=None,
570 """Register an "on-model-change" callback
572 Once the model is connected, ``callable_``
573 will be called each time the model changes. ``callable_`` should
574 be Awaitable and accept the following positional arguments:
576 delta - An instance of :class:`juju.delta.EntityDelta`
577 containing the raw delta data recv'd from the Juju
580 old_obj - If the delta modifies an existing object in the model,
581 old_obj will be a copy of that object, as it was before the
582 delta was applied. Will be None if the delta creates a new
585 new_obj - A copy of the new or updated object, after the delta
586 is applied. Will be None if the delta removes an entity
589 model - The :class:`Model` itself.
591 Events for which ``callable_`` is called can be specified by passing
592 entity_type, action, and/or entitiy_id filter criteria, e.g.::
596 entity_type='application', action='add', entity_id='ubuntu')
598 For more complex filtering conditions, pass a predicate function. It
599 will be called with a delta as its only argument. If the predicate
600 function returns True, the ``callable_`` will be called.
603 observer
= _Observer(
604 callable_
, entity_type
, action
, entity_id
, predicate
)
605 self
.observers
[observer
] = callable_
608 """Start an asynchronous watch against this model.
610 See :meth:`add_observer` to register an onchange callback.
613 async def _start_watch():
614 self
._watch
_shutdown
.clear()
616 allwatcher
= watcher
.AllWatcher()
617 self
._watch
_conn
= await self
.connection
.clone()
618 allwatcher
.connect(self
._watch
_conn
)
620 results
= await allwatcher
.Next()
621 for delta
in results
.deltas
:
622 delta
= get_entity_delta(delta
)
623 old_obj
, new_obj
= self
.state
.apply_delta(delta
)
624 # XXX: Might not want to shield at this level
625 # We are shielding because when the watcher is
626 # canceled (on disconnect()), we don't want all of
627 # its children (every observer callback) to be
628 # canceled with it. So we shield them. But this means
629 # they can *never* be canceled.
630 await asyncio
.shield(
631 self
._notify
_observers
(delta
, old_obj
, new_obj
),
633 self
._watch
_received
.set()
634 except CancelledError
:
635 log
.debug('Closing watcher connection')
636 await self
._watch
_conn
.close()
637 self
._watch
_shutdown
.set()
638 self
._watch
_conn
= None
640 log
.debug('Starting watcher task')
641 self
._watcher
_task
= self
.loop
.create_task(_start_watch())
643 def _stop_watching(self
):
644 """Stop the asynchronous watch against this model.
647 log
.debug('Stopping watcher task')
648 if self
._watcher
_task
:
649 self
._watcher
_task
.cancel()
651 async def _notify_observers(self
, delta
, old_obj
, new_obj
):
652 """Call observing callbacks, notifying them of a change in model state
654 :param delta: The raw change from the watcher
655 (:class:`juju.client.overrides.Delta`)
656 :param old_obj: The object in the model that this delta updates.
658 :param new_obj: The object in the model that is created or updated
659 by applying this delta.
662 if new_obj
and not old_obj
:
666 'Model changed: %s %s %s',
667 delta
.entity
, delta
.type, delta
.get_id())
669 for o
in self
.observers
:
670 if o
.cares_about(delta
):
671 asyncio
.ensure_future(o(delta
, old_obj
, new_obj
, self
),
674 async def _wait(self
, entity_type
, entity_id
, action
, predicate
=None):
676 Block the calling routine until a given action has happened to the
679 :param entity_type: The entity's type.
680 :param entity_id: The entity's id.
681 :param action: the type of action (e.g., 'add', 'change', or 'remove')
682 :param predicate: optional callable that must take as an
683 argument a delta, and must return a boolean, indicating
684 whether the delta contains the specific action we're looking
685 for. For example, you might check to see whether a 'change'
686 has a 'completed' status. See the _Observer class for details.
689 q
= asyncio
.Queue(loop
=self
.loop
)
691 async def callback(delta
, old
, new
, model
):
692 await q
.put(delta
.get_id())
694 self
.add_observer(callback
, entity_type
, action
, entity_id
, predicate
)
695 entity_id
= await q
.get()
696 # object might not be in the entity_map if we were waiting for a
698 return self
.state
._live
_entity
_map
(entity_type
).get(entity_id
)
700 async def _wait_for_new(self
, entity_type
, entity_id
=None, predicate
=None):
701 """Wait for a new object to appear in the Model and return it.
703 Waits for an object of type ``entity_type`` with id ``entity_id``.
704 If ``entity_id`` is ``None``, it will wait for the first new entity
707 This coroutine blocks until the new object appears in the model.
710 # if the entity is already in the model, just return it
711 if entity_id
in self
.state
._live
_entity
_map
(entity_type
):
712 return self
.state
._live
_entity
_map
(entity_type
)[entity_id
]
713 # if we know the entity_id, we can trigger on any action that puts
714 # the enitty into the model; otherwise, we have to watch for the
715 # next "add" action on that entity_type
716 action
= 'add' if entity_id
is None else None
717 return await self
._wait
(entity_type
, entity_id
, action
, predicate
)
719 async def wait_for_action(self
, action_id
):
720 """Given an action, wait for it to complete."""
722 if action_id
.startswith("action-"):
723 # if we've been passed action.tag, transform it into the
724 # id that the api deltas will use.
725 action_id
= action_id
[7:]
727 def predicate(delta
):
728 return delta
.data
['status'] in ('completed', 'failed')
730 return await self
._wait
('action', action_id
, 'change', predicate
)
732 async def add_machine(
733 self
, spec
=None, constraints
=None, disks
=None, series
=None):
734 """Start a new, empty machine and optionally a container, or add a
735 container to a machine.
737 :param str spec: Machine specification
740 (None) - starts a new machine
741 'lxd' - starts a new machine with one lxd container
742 'lxd:4' - starts a new lxd container on machine 4
743 'ssh:user@10.10.0.3' - manually provisions a machine with ssh
744 'zone=us-east-1a' - starts a machine in zone us-east-1s on AWS
745 'maas2.name' - acquire machine maas2.name on MAAS
747 :param dict constraints: Machine constraints
754 :param list disks: List of disk constraint dictionaries
763 :param str series: Series, e.g. 'xenial'
765 Supported container types are: lxd, kvm
767 When deploying a container to an existing machine, constraints cannot
771 params
= client
.AddMachineParams()
772 params
.jobs
= ['JobHostUnits']
775 placement
= parse_placement(spec
)
777 params
.placement
= placement
[0]
780 params
.constraints
= client
.Value
.from_json(constraints
)
784 client
.Constraints
.from_json(o
) for o
in disks
]
787 params
.series
= series
789 # Submit the request.
790 client_facade
= client
.ClientFacade()
791 client_facade
.connect(self
.connection
)
792 results
= await client_facade
.AddMachines([params
])
793 error
= results
.machines
[0].error
795 raise ValueError("Error adding machine: %s", error
.message
)
796 machine_id
= results
.machines
[0].machine
797 log
.debug('Added new machine %s', machine_id
)
798 return await self
._wait
_for
_new
('machine', machine_id
)
800 async def add_relation(self
, relation1
, relation2
):
801 """Add a relation between two applications.
803 :param str relation1: '<application>[:<relation_name>]'
804 :param str relation2: '<application>[:<relation_name>]'
807 app_facade
= client
.ApplicationFacade()
808 app_facade
.connect(self
.connection
)
811 'Adding relation %s <-> %s', relation1
, relation2
)
814 result
= await app_facade
.AddRelation([relation1
, relation2
])
815 except JujuAPIError
as e
:
816 if 'relation already exists' not in e
.message
:
819 'Relation %s <-> %s already exists', relation1
, relation2
)
820 # TODO: if relation already exists we should return the
821 # Relation ModelEntity here
824 def predicate(delta
):
826 for endpoint
in delta
.data
['endpoints']:
827 endpoints
[endpoint
['application-name']] = endpoint
['relation']
828 return endpoints
== result
.endpoints
830 return await self
._wait
_for
_new
('relation', None, predicate
)
832 def add_space(self
, name
, *cidrs
):
833 """Add a new network space.
835 Adds a new space with the given name and associates the given
836 (optional) list of existing subnet CIDRs with it.
838 :param str name: Name of the space
839 :param \*cidrs: Optional list of existing subnet CIDRs
842 raise NotImplementedError()
844 def add_ssh_key(self
, key
):
845 """Add a public SSH key to this model.
847 :param str key: The public ssh key
850 raise NotImplementedError()
851 add_ssh_keys
= add_ssh_key
853 def add_subnet(self
, cidr_or_id
, space
, *zones
):
854 """Add an existing subnet to this model.
856 :param str cidr_or_id: CIDR or provider ID of the existing subnet
857 :param str space: Network space with which to associate
858 :param str \*zones: Zone(s) in which the subnet resides
861 raise NotImplementedError()
863 def get_backups(self
):
864 """Retrieve metadata for backups in this model.
867 raise NotImplementedError()
869 def block(self
, *commands
):
870 """Add a new block to this model.
872 :param str \*commands: The commands to block. Valid values are
873 'all-changes', 'destroy-model', 'remove-object'
876 raise NotImplementedError()
878 def get_blocks(self
):
879 """List blocks for this model.
882 raise NotImplementedError()
884 def get_cached_images(self
, arch
=None, kind
=None, series
=None):
885 """Return a list of cached OS images.
887 :param str arch: Filter by image architecture
888 :param str kind: Filter by image kind, e.g. 'lxd'
889 :param str series: Filter by image series, e.g. 'xenial'
892 raise NotImplementedError()
894 def create_backup(self
, note
=None, no_download
=False):
895 """Create a backup of this model.
897 :param str note: A note to store with the backup
898 :param bool no_download: Do not download the backup archive
899 :return str: Path to downloaded archive
902 raise NotImplementedError()
904 def create_storage_pool(self
, name
, provider_type
, **pool_config
):
905 """Create or define a storage pool.
907 :param str name: Name to give the storage pool
908 :param str provider_type: Pool provider type
909 :param \*\*pool_config: key/value pool configuration pairs
912 raise NotImplementedError()
915 self
, no_tail
=False, exclude_module
=None, include_module
=None,
916 include
=None, level
=None, limit
=0, lines
=10, replay
=False,
918 """Get log messages for this model.
920 :param bool no_tail: Stop after returning existing log messages
921 :param list exclude_module: Do not show log messages for these logging
923 :param list include_module: Only show log messages for these logging
925 :param list include: Only show log messages for these entities
926 :param str level: Log level to show, valid options are 'TRACE',
927 'DEBUG', 'INFO', 'WARNING', 'ERROR,
928 :param int limit: Return this many of the most recent (possibly
929 filtered) lines are shown
930 :param int lines: Yield this many of the most recent lines, and keep
932 :param bool replay: Yield the entire log, and keep yielding
933 :param list exclude: Do not show log messages for these entities
936 raise NotImplementedError()
939 self
, entity_url
, application_name
=None, bind
=None, budget
=None,
940 channel
=None, config
=None, constraints
=None, force
=False,
941 num_units
=1, plan
=None, resources
=None, series
=None, storage
=None,
943 """Deploy a new service or bundle.
945 :param str entity_url: Charm or bundle url
946 :param str application_name: Name to give the service
947 :param dict bind: <charm endpoint>:<network space> pairs
948 :param dict budget: <budget name>:<limit> pairs
949 :param str channel: Charm store channel from which to retrieve
950 the charm or bundle, e.g. 'development'
951 :param dict config: Charm configuration dictionary
952 :param constraints: Service constraints
953 :type constraints: :class:`juju.Constraints`
954 :param bool force: Allow charm to be deployed to a machine running
955 an unsupported series
956 :param int num_units: Number of units to deploy
957 :param str plan: Plan under which to deploy charm
958 :param dict resources: <resource name>:<file path> pairs
959 :param str series: Series on which to deploy
960 :param dict storage: Storage constraints TODO how do these look?
961 :param to: Placement directive as a string. For example:
963 '23' - place on machine 23
964 'lxd:7' - place in new lxd container on machine 7
965 '24/lxd/3' - place in container 3 on machine 24
967 If None, a new machine is provisioned.
972 - application_name is required; fill this in automatically if not
974 - series is required; how do we pick a default?
979 k
: client
.Constraints(**v
)
980 for k
, v
in storage
.items()
984 entity_url
.startswith('local:') or
985 os
.path
.isdir(entity_url
)
988 entity_id
= entity_url
990 entity
= await self
.charmstore
.entity(entity_url
)
991 entity_id
= entity
['Id']
993 client_facade
= client
.ClientFacade()
994 client_facade
.connect(self
.connection
)
996 is_bundle
= ((is_local
and
997 (Path(entity_id
) / 'bundle.yaml').exists()) or
998 (not is_local
and 'bundle/' in entity_id
))
1001 handler
= BundleHandler(self
)
1002 await handler
.fetch_plan(entity_id
)
1003 await handler
.execute_plan()
1004 extant_apps
= {app
for app
in self
.applications
}
1005 pending_apps
= set(handler
.applications
) - extant_apps
1007 # new apps will usually be in the model by now, but if some
1008 # haven't made it yet we'll need to wait on them to be added
1009 await asyncio
.gather(*[
1010 asyncio
.ensure_future(
1011 self
._wait
_for
_new
('application', app_name
),
1013 for app_name
in pending_apps
1015 return [app
for name
, app
in self
.applications
.items()
1016 if name
in handler
.applications
]
1019 if not application_name
:
1020 application_name
= entity
['Meta']['charm-metadata']['Name']
1021 if not series
and '/' in entity_url
:
1022 # try to get the series from the provided charm URL
1023 if entity_url
.startswith('cs:'):
1024 parts
= entity_url
[3:].split('/')
1026 parts
= entity_url
.split('/')
1027 if parts
[0].startswith('~'):
1030 # series was specified in the URL
1033 # series was not supplied at all, so use the newest
1034 # supported series according to the charm store
1035 ss
= entity
['Meta']['supported-series']
1036 series
= ss
['SupportedSeries'][0]
1039 await client_facade
.AddCharm(channel
, entity_id
)
1041 # We have a local charm dir that needs to be uploaded
1042 charm_dir
= os
.path
.abspath(
1043 os
.path
.expanduser(entity_id
))
1044 series
= series
or get_charm_series(charm_dir
)
1047 "Couldn't determine series for charm at {}. "
1048 "Pass a 'series' kwarg to Model.deploy().".format(
1050 entity_id
= await self
.add_local_charm_dir(charm_dir
, series
)
1051 return await self
._deploy
(
1052 charm_url
=entity_id
,
1053 application
=application_name
,
1055 config
=config
or {},
1056 constraints
=constraints
,
1057 endpoint_bindings
=bind
,
1058 resources
=resources
,
1061 num_units
=num_units
,
1062 placement
=parse_placement(to
),
1065 async def _deploy(self
, charm_url
, application
, series
, config
,
1066 constraints
, endpoint_bindings
, resources
, storage
,
1067 channel
=None, num_units
=None, placement
=None):
1068 """Logic shared between `Model.deploy` and `BundleHandler.deploy`.
1070 log
.info('Deploying %s', charm_url
)
1072 # stringify all config values for API, and convert to YAML
1073 config
= {k
: str(v
) for k
, v
in config
.items()}
1074 config
= yaml
.dump({application
: config
},
1075 default_flow_style
=False)
1077 app_facade
= client
.ApplicationFacade()
1078 app_facade
.connect(self
.connection
)
1080 app
= client
.ApplicationDeploy(
1081 charm_url
=charm_url
,
1082 application
=application
,
1086 constraints
=parse_constraints(constraints
),
1087 endpoint_bindings
=endpoint_bindings
,
1088 num_units
=num_units
,
1089 resources
=resources
,
1091 placement
=placement
,
1094 result
= await app_facade
.Deploy([app
])
1095 errors
= [r
.error
.message
for r
in result
.results
if r
.error
]
1097 raise JujuError('\n'.join(errors
))
1098 return await self
._wait
_for
_new
('application', application
)
1101 """Terminate all machines and resources for this model.
1104 raise NotImplementedError()
1106 async def destroy_unit(self
, *unit_names
):
1107 """Destroy units by name.
1110 app_facade
= client
.ApplicationFacade()
1111 app_facade
.connect(self
.connection
)
1114 'Destroying unit%s %s',
1115 's' if len(unit_names
) == 1 else '',
1116 ' '.join(unit_names
))
1118 return await app_facade
.DestroyUnits(list(unit_names
))
1119 destroy_units
= destroy_unit
1121 def get_backup(self
, archive_id
):
1122 """Download a backup archive file.
1124 :param str archive_id: The id of the archive to download
1125 :return str: Path to the archive file
1128 raise NotImplementedError()
1131 self
, num_controllers
=0, constraints
=None, series
=None, to
=None):
1132 """Ensure sufficient controllers exist to provide redundancy.
1134 :param int num_controllers: Number of controllers to make available
1135 :param constraints: Constraints to apply to the controller machines
1136 :type constraints: :class:`juju.Constraints`
1137 :param str series: Series of the controller machines
1138 :param list to: Placement directives for controller machines, e.g.::
1141 'lxc:7' - new lxc container on machine 7
1142 '24/lxc/3' - lxc container 3 or machine 24
1144 If None, a new machine is provisioned.
1147 raise NotImplementedError()
1149 def get_config(self
):
1150 """Return the configuration settings for this model.
1153 raise NotImplementedError()
1155 def get_constraints(self
):
1156 """Return the machine constraints for this model.
1159 raise NotImplementedError()
1161 def grant(self
, username
, acl
='read'):
1162 """Grant a user access to this model.
1164 :param str username: Username
1165 :param str acl: Access control ('read' or 'write')
1168 raise NotImplementedError()
1170 def import_ssh_key(self
, identity
):
1171 """Add a public SSH key from a trusted indentity source to this model.
1173 :param str identity: User identity in the form <lp|gh>:<username>
1176 raise NotImplementedError()
1177 import_ssh_keys
= import_ssh_key
1179 def get_machines(self
, machine
, utc
=False):
1180 """Return list of machines in this model.
1182 :param str machine: Machine id, e.g. '0'
1183 :param bool utc: Display time as UTC in RFC3339 format
1186 raise NotImplementedError()
1188 def get_shares(self
):
1189 """Return list of all users with access to this model.
1192 raise NotImplementedError()
1194 def get_spaces(self
):
1195 """Return list of all known spaces, including associated subnets.
1198 raise NotImplementedError()
1200 def get_ssh_key(self
):
1201 """Return known SSH keys for this model.
1204 raise NotImplementedError()
1205 get_ssh_keys
= get_ssh_key
1207 def get_storage(self
, filesystem
=False, volume
=False):
1208 """Return details of storage instances.
1210 :param bool filesystem: Include filesystem storage
1211 :param bool volume: Include volume storage
1214 raise NotImplementedError()
1216 def get_storage_pools(self
, names
=None, providers
=None):
1217 """Return list of storage pools.
1219 :param list names: Only include pools with these names
1220 :param list providers: Only include pools for these providers
1223 raise NotImplementedError()
1225 def get_subnets(self
, space
=None, zone
=None):
1226 """Return list of known subnets.
1228 :param str space: Only include subnets in this space
1229 :param str zone: Only include subnets in this zone
1232 raise NotImplementedError()
1234 def remove_blocks(self
):
1235 """Remove all blocks from this model.
1238 raise NotImplementedError()
1240 def remove_backup(self
, backup_id
):
1243 :param str backup_id: The id of the backup to remove
1246 raise NotImplementedError()
1248 def remove_cached_images(self
, arch
=None, kind
=None, series
=None):
1249 """Remove cached OS images.
1251 :param str arch: Architecture of the images to remove
1252 :param str kind: Image kind to remove, e.g. 'lxd'
1253 :param str series: Image series to remove, e.g. 'xenial'
1256 raise NotImplementedError()
1258 def remove_machine(self
, *machine_ids
):
1259 """Remove a machine from this model.
1261 :param str \*machine_ids: Ids of the machines to remove
1264 raise NotImplementedError()
1265 remove_machines
= remove_machine
1267 def remove_ssh_key(self
, *keys
):
1268 """Remove a public SSH key(s) from this model.
1270 :param str \*keys: Keys to remove
1273 raise NotImplementedError()
1274 remove_ssh_keys
= remove_ssh_key
1277 self
, bootstrap
=False, constraints
=None, archive
=None,
1278 backup_id
=None, upload_tools
=False):
1279 """Restore a backup archive to a new controller.
1281 :param bool bootstrap: Bootstrap a new state machine
1282 :param constraints: Model constraints
1283 :type constraints: :class:`juju.Constraints`
1284 :param str archive: Path to backup archive to restore
1285 :param str backup_id: Id of backup to restore
1286 :param bool upload_tools: Upload tools if bootstrapping a new machine
1289 raise NotImplementedError()
1291 def retry_provisioning(self
):
1292 """Retry provisioning for failed machines.
1295 raise NotImplementedError()
1297 def revoke(self
, username
, acl
='read'):
1298 """Revoke a user's access to this model.
1300 :param str username: Username to revoke
1301 :param str acl: Access control ('read' or 'write')
1304 raise NotImplementedError()
1306 def run(self
, command
, timeout
=None):
1307 """Run command on all machines in this model.
1309 :param str command: The command to run
1310 :param int timeout: Time to wait before command is considered failed
1313 raise NotImplementedError()
1315 def set_config(self
, **config
):
1316 """Set configuration keys on this model.
1318 :param \*\*config: Config key/values
1321 raise NotImplementedError()
1323 def set_constraints(self
, constraints
):
1324 """Set machine constraints on this model.
1326 :param :class:`juju.Constraints` constraints: Machine constraints
1329 raise NotImplementedError()
1331 def get_action_output(self
, action_uuid
, wait
=-1):
1332 """Get the results of an action by ID.
1334 :param str action_uuid: Id of the action
1335 :param int wait: Time in seconds to wait for action to complete
1338 raise NotImplementedError()
1340 def get_action_status(self
, uuid_or_prefix
=None, name
=None):
1341 """Get the status of all actions, filtered by ID, ID prefix, or action name.
1343 :param str uuid_or_prefix: Filter by action uuid or prefix
1344 :param str name: Filter by action name
1347 raise NotImplementedError()
1349 def get_budget(self
, budget_name
):
1350 """Get budget usage info.
1352 :param str budget_name: Name of budget
1355 raise NotImplementedError()
1357 async def get_status(self
, filters
=None, utc
=False):
1358 """Return the status of the model.
1360 :param str filters: Optional list of applications, units, or machines
1361 to include, which can use wildcards ('*').
1362 :param bool utc: Display time as UTC in RFC3339 format
1365 client_facade
= client
.ClientFacade()
1366 client_facade
.connect(self
.connection
)
1367 return await client_facade
.FullStatus(filters
)
1370 self
, all_
=False, destination
=None, dry_run
=False, public
=False,
1371 source
=None, stream
=None, version
=None):
1372 """Copy Juju tools into this model.
1374 :param bool all_: Copy all versions, not just the latest
1375 :param str destination: Path to local destination directory
1376 :param bool dry_run: Don't do the actual copy
1377 :param bool public: Tools are for a public cloud, so generate mirrors
1379 :param str source: Path to local source directory
1380 :param str stream: Simplestreams stream for which to sync metadata
1381 :param str version: Copy a specific major.minor version
1384 raise NotImplementedError()
1386 def unblock(self
, *commands
):
1387 """Unblock an operation that would alter this model.
1389 :param str \*commands: The commands to unblock. Valid values are
1390 'all-changes', 'destroy-model', 'remove-object'
1393 raise NotImplementedError()
1395 def unset_config(self
, *keys
):
1396 """Unset configuration on this model.
1398 :param str \*keys: The keys to unset
1401 raise NotImplementedError()
1403 def upgrade_gui(self
):
1404 """Upgrade the Juju GUI for this model.
1407 raise NotImplementedError()
1410 self
, dry_run
=False, reset_previous_upgrade
=False,
1411 upload_tools
=False, version
=None):
1412 """Upgrade Juju on all machines in a model.
1414 :param bool dry_run: Don't do the actual upgrade
1415 :param bool reset_previous_upgrade: Clear the previous (incomplete)
1417 :param bool upload_tools: Upload local version of tools
1418 :param str version: Upgrade to a specific version
1421 raise NotImplementedError()
1423 def upload_backup(self
, archive_path
):
1424 """Store a backup archive remotely in Juju.
1426 :param str archive_path: Path to local archive
1429 raise NotImplementedError()
1432 def charmstore(self
):
1433 return self
._charmstore
1435 async def get_metrics(self
, *tags
):
1436 """Retrieve metrics.
1438 :param str \*tags: Tags of entities from which to retrieve metrics.
1439 No tags retrieves the metrics of all units in the model.
1440 :return: Dictionary of unit_name:metrics
1443 log
.debug("Retrieving metrics for %s",
1444 ', '.join(tags
) if tags
else "all units")
1446 metrics_facade
= client
.MetricsDebugFacade()
1447 metrics_facade
.connect(self
.connection
)
1449 entities
= [client
.Entity(tag
) for tag
in tags
]
1450 metrics_result
= await metrics_facade
.GetMetrics(entities
)
1452 metrics
= collections
.defaultdict(list)
1454 for entity_metrics
in metrics_result
.results
:
1455 error
= entity_metrics
.error
1457 if "is not a valid tag" in error
:
1458 raise ValueError(error
.message
)
1460 raise Exception(error
.message
)
1462 for metric
in entity_metrics
.metrics
:
1463 metrics
[metric
.unit
].append(vars(metric
))
1468 def get_charm_series(path
):
1469 """Inspects the charm directory at ``path`` and returns a default
1470 series from its metadata.yaml (the first item in the 'series' list).
1472 Returns None if no series can be determined.
1475 md
= Path(path
) / "metadata.yaml"
1478 data
= yaml
.load(md
.open())
1479 series
= data
.get('series')
1480 return series
[0] if series
else None
1483 class BundleHandler(object):
1485 Handle bundles by using the API to translate bundle YAML into a plan of
1486 steps and then dispatching each of those using the API.
1488 def __init__(self
, model
):
1490 self
.charmstore
= model
.charmstore
1492 self
.references
= {}
1493 self
._units
_by
_app
= {}
1494 for unit_name
, unit
in model
.units
.items():
1495 app_units
= self
._units
_by
_app
.setdefault(unit
.application
, [])
1496 app_units
.append(unit_name
)
1497 self
.client_facade
= client
.ClientFacade()
1498 self
.client_facade
.connect(model
.connection
)
1499 self
.app_facade
= client
.ApplicationFacade()
1500 self
.app_facade
.connect(model
.connection
)
1501 self
.ann_facade
= client
.AnnotationsFacade()
1502 self
.ann_facade
.connect(model
.connection
)
1504 async def _handle_local_charms(self
, bundle
):
1505 """Search for references to local charms (i.e. filesystem paths)
1506 in the bundle. Upload the local charms to the model, and replace
1507 the filesystem paths with appropriate 'local:' paths in the bundle.
1509 Return the modified bundle.
1511 :param dict bundle: Bundle dictionary
1512 :return: Modified bundle dictionary
1517 default_series
= bundle
.get('series')
1518 for app_name
in self
.applications
:
1519 app_dict
= bundle
['services'][app_name
]
1520 charm_dir
= os
.path
.abspath(os
.path
.expanduser(app_dict
['charm']))
1521 if not os
.path
.isdir(charm_dir
):
1524 app_dict
.get('series') or
1526 get_charm_series(charm_dir
)
1530 "Couldn't determine series for charm at {}. "
1531 "Add a 'series' key to the bundle.".format(charm_dir
))
1533 # Keep track of what we need to update. We keep a list of apps
1534 # that need to be updated, and a corresponding list of args
1535 # needed to update those apps.
1536 apps
.append(app_name
)
1537 args
.append((charm_dir
, series
))
1540 # If we have apps to update, spawn all the coroutines concurrently
1541 # and wait for them to finish.
1542 charm_urls
= await asyncio
.gather(*[
1543 self
.model
.add_local_charm_dir(*params
)
1545 ], loop
=self
.model
.loop
)
1546 # Update the 'charm:' entry for each app with the new 'local:' url.
1547 for app_name
, charm_url
in zip(apps
, charm_urls
):
1548 bundle
['services'][app_name
]['charm'] = charm_url
1552 async def fetch_plan(self
, entity_id
):
1553 is_local
= not entity_id
.startswith('cs:') and os
.path
.isdir(entity_id
)
1555 bundle_yaml
= (Path(entity_id
) / "bundle.yaml").read_text()
1557 bundle_yaml
= await self
.charmstore
.files(entity_id
,
1558 filename
='bundle.yaml',
1560 self
.bundle
= yaml
.safe_load(bundle_yaml
)
1561 self
.bundle
= await self
._handle
_local
_charms
(self
.bundle
)
1563 self
.plan
= await self
.client_facade
.GetBundleChanges(
1564 yaml
.dump(self
.bundle
))
1566 if self
.plan
.errors
:
1567 raise JujuError('\n'.join(self
.plan
.errors
))
1569 async def execute_plan(self
):
1570 for step
in self
.plan
.changes
:
1571 method
= getattr(self
, step
.method
)
1572 result
= await method(*step
.args
)
1573 self
.references
[step
.id_
] = result
1576 def applications(self
):
1577 return list(self
.bundle
['services'].keys())
1579 def resolve(self
, reference
):
1580 if reference
and reference
.startswith('$'):
1581 reference
= self
.references
[reference
[1:]]
1584 async def addCharm(self
, charm
, series
):
1586 :param charm string:
1587 Charm holds the URL of the charm to be added.
1589 :param series string:
1590 Series holds the series of the charm to be added
1591 if the charm default is not sufficient.
1593 # We don't add local charms because they've already been added
1594 # by self._handle_local_charms
1595 if charm
.startswith('local:'):
1598 entity_id
= await self
.charmstore
.entityId(charm
)
1599 log
.debug('Adding %s', entity_id
)
1600 await self
.client_facade
.AddCharm(None, entity_id
)
1603 async def addMachines(self
, params
=None):
1606 Dictionary specifying the machine to add. All keys are optional.
1609 series: string specifying the machine OS series.
1611 constraints: string holding machine constraints, if any. We'll
1612 parse this into the json friendly dict that the juju api
1615 container_type: string holding the type of the container (for
1616 instance ""lxd" or kvm"). It is not specified for top level
1619 parent_id: string holding a placeholder pointing to another
1620 machine change or to a unit change. This value is only
1621 specified in the case this machine is a container, in
1622 which case also ContainerType is set.
1625 params
= params
or {}
1628 params
= {normalize_key(k
): params
[k
] for k
in params
.keys()}
1630 # Fix up values, as necessary.
1631 if 'parent_id' in params
:
1632 params
['parent_id'] = self
.resolve(params
['parent_id'])
1634 params
['constraints'] = parse_constraints(
1635 params
.get('constraints'))
1636 params
['jobs'] = params
.get('jobs', ['JobHostUnits'])
1638 if params
.get('container_type') == 'lxc':
1639 log
.warning('Juju 2.0 does not support lxc containers. '
1640 'Converting containers to lxd.')
1641 params
['container_type'] = 'lxd'
1643 # Submit the request.
1644 params
= client
.AddMachineParams(**params
)
1645 results
= await self
.client_facade
.AddMachines([params
])
1646 error
= results
.machines
[0].error
1648 raise ValueError("Error adding machine: %s", error
.message
)
1649 machine
= results
.machines
[0].machine
1650 log
.debug('Added new machine %s', machine
)
1653 async def addRelation(self
, endpoint1
, endpoint2
):
1655 :param endpoint1 string:
1656 :param endpoint2 string:
1657 Endpoint1 and Endpoint2 hold relation endpoints in the
1658 "application:interface" form, where the application is always a
1659 placeholder pointing to an application change, and the interface is
1660 optional. Examples are "$deploy-42:web" or just "$deploy-42".
1662 endpoints
= [endpoint1
, endpoint2
]
1663 # resolve indirect references
1664 for i
in range(len(endpoints
)):
1665 parts
= endpoints
[i
].split(':')
1666 parts
[0] = self
.resolve(parts
[0])
1667 endpoints
[i
] = ':'.join(parts
)
1669 log
.info('Relating %s <-> %s', *endpoints
)
1670 return await self
.model
.add_relation(*endpoints
)
1672 async def deploy(self
, charm
, series
, application
, options
, constraints
,
1673 storage
, endpoint_bindings
, resources
):
1675 :param charm string:
1676 Charm holds the URL of the charm to be used to deploy this
1679 :param series string:
1680 Series holds the series of the application to be deployed
1681 if the charm default is not sufficient.
1683 :param application string:
1684 Application holds the application name.
1686 :param options map[string]interface{}:
1687 Options holds application options.
1689 :param constraints string:
1690 Constraints holds the optional application constraints.
1692 :param storage map[string]string:
1693 Storage holds the optional storage constraints.
1695 :param endpoint_bindings map[string]string:
1696 EndpointBindings holds the optional endpoint bindings
1698 :param resources map[string]int:
1699 Resources identifies the revision to use for each resource
1700 of the application's charm.
1702 # resolve indirect references
1703 charm
= self
.resolve(charm
)
1704 await self
.model
._deploy
(
1706 application
=application
,
1709 constraints
=constraints
,
1710 endpoint_bindings
=endpoint_bindings
,
1711 resources
=resources
,
1716 async def addUnit(self
, application
, to
):
1718 :param application string:
1719 Application holds the application placeholder name for which a unit
1723 To holds the optional location where to add the unit, as a
1724 placeholder pointing to another unit change or to a machine change.
1726 application
= self
.resolve(application
)
1727 placement
= self
.resolve(to
)
1728 if self
._units
_by
_app
.get(application
):
1729 # enough units for this application already exist;
1730 # claim one, and carry on
1731 # NB: this should probably honor placement, but the juju client
1732 # doesn't, so we're not bothering, either
1733 unit_name
= self
._units
_by
_app
[application
].pop()
1734 log
.debug('Reusing unit %s for %s', unit_name
, application
)
1735 return self
.model
.units
[unit_name
]
1737 log
.debug('Adding new unit for %s%s', application
,
1738 ' to %s' % placement
if placement
else '')
1739 return await self
.model
.applications
[application
].add_unit(
1744 async def expose(self
, application
):
1746 :param application string:
1747 Application holds the placeholder name of the application that must
1750 application
= self
.resolve(application
)
1751 log
.info('Exposing %s', application
)
1752 return await self
.model
.applications
[application
].expose()
1754 async def setAnnotations(self
, id_
, entity_type
, annotations
):
1757 Id is the placeholder for the application or machine change
1758 corresponding to the entity to be annotated.
1760 :param entity_type EntityType:
1761 EntityType holds the type of the entity, "application" or
1764 :param annotations map[string]string:
1765 Annotations holds the annotations as key/value pairs.
1767 entity_id
= self
.resolve(id_
)
1769 entity
= self
.model
.state
.get_entity(entity_type
, entity_id
)
1771 entity
= await self
.model
._wait
_for
_new
(entity_type
, entity_id
)
1772 return await entity
.set_annotations(annotations
)
1775 class CharmStore(object):
1777 Async wrapper around theblues.charmstore.CharmStore
1779 def __init__(self
, loop
):
1781 self
._cs
= theblues
.charmstore
.CharmStore(timeout
=5)
1783 def __getattr__(self
, name
):
1785 Wrap method calls in coroutines that use run_in_executor to make them
1788 attr
= getattr(self
._cs
, name
)
1789 if not callable(attr
):
1790 wrapper
= partial(getattr, self
._cs
, name
)
1791 setattr(self
, name
, wrapper
)
1793 async def coro(*args
, **kwargs
):
1794 method
= partial(attr
, *args
, **kwargs
)
1795 for attempt
in range(1, 4):
1797 return await self
.loop
.run_in_executor(None, method
)
1798 except theblues
.errors
.ServerError
:
1801 await asyncio
.sleep(1, loop
=self
.loop
)
1802 setattr(self
, name
, coro
)
1807 class CharmArchiveGenerator(object):
1808 def __init__(self
, path
):
1809 self
.path
= os
.path
.abspath(os
.path
.expanduser(path
))
1811 def make_archive(self
, path
):
1812 """Create archive of directory and write to ``path``.
1814 :param path: Path to archive
1818 * build/\* - This is used for packing the charm itself and any
1820 * \*/.\* - Hidden files are all ignored for now. This will most
1821 likely be changed into a specific ignore list
1825 zf
= zipfile
.ZipFile(path
, 'w', zipfile
.ZIP_DEFLATED
)
1826 for dirpath
, dirnames
, filenames
in os
.walk(self
.path
):
1827 relative_path
= dirpath
[len(self
.path
) + 1:]
1828 if relative_path
and not self
._ignore
(relative_path
):
1829 zf
.write(dirpath
, relative_path
)
1830 for name
in filenames
:
1831 archive_name
= os
.path
.join(relative_path
, name
)
1832 if not self
._ignore
(archive_name
):
1833 real_path
= os
.path
.join(dirpath
, name
)
1834 self
._check
_type
(real_path
)
1835 if os
.path
.islink(real_path
):
1836 self
._check
_link
(real_path
)
1837 self
._write
_symlink
(
1838 zf
, os
.readlink(real_path
), archive_name
)
1840 zf
.write(real_path
, archive_name
)
1844 def _check_type(self
, path
):
1848 if stat
.S_ISDIR(s
.st_mode
) or stat
.S_ISREG(s
.st_mode
):
1850 raise ValueError("Invalid Charm at % %s" % (
1851 path
, "Invalid file type for a charm"))
1853 def _check_link(self
, path
):
1854 link_path
= os
.readlink(path
)
1855 if link_path
[0] == "/":
1857 "Invalid Charm at %s: %s" % (
1858 path
, "Absolute links are invalid"))
1859 path_dir
= os
.path
.dirname(path
)
1860 link_path
= os
.path
.join(path_dir
, link_path
)
1861 if not link_path
.startswith(os
.path
.abspath(self
.path
)):
1863 "Invalid charm at %s %s" % (
1864 path
, "Only internal symlinks are allowed"))
1866 def _write_symlink(self
, zf
, link_target
, link_path
):
1867 """Package symlinks with appropriate zipfile metadata."""
1868 info
= zipfile
.ZipInfo()
1869 info
.filename
= link_path
1870 info
.create_system
= 3
1871 # Magic code for symlinks / py2/3 compat
1872 # 27166663808 = (stat.S_IFLNK | 0755) << 16
1873 info
.external_attr
= 2716663808
1874 zf
.writestr(info
, link_target
)
1876 def _ignore(self
, path
):
1877 if path
== "build" or path
.startswith("build/"):
1879 if path
.startswith('.'):