548fc03526d6b572ab8f2343ec4a451f937f9cf8
11 from concurrent
.futures
import CancelledError
12 from functools
import partial
13 from pathlib
import Path
16 from theblues
import charmstore
18 from .client
import client
19 from .client
import watcher
20 from .client
import connection
21 from .constraints
import parse
as parse_constraints
, normalize_key
22 from .delta
import get_entity_delta
23 from .delta
import get_entity_class
24 from .exceptions
import DeadEntityException
25 from .errors
import JujuError
, JujuAPIError
26 from .placement
import parse
as parse_placement
28 log
= logging
.getLogger(__name__
)
31 class _Observer(object):
32 """Wrapper around an observer callable.
34 This wrapper allows filter criteria to be associated with the
35 callable so that it's only called for changes that meet the criteria.
38 def __init__(self
, callable_
, entity_type
, action
, entity_id
, predicate
):
39 self
.callable_
= callable_
40 self
.entity_type
= entity_type
42 self
.entity_id
= entity_id
43 self
.predicate
= predicate
45 self
.entity_id
= str(self
.entity_id
)
46 if not self
.entity_id
.startswith('^'):
47 self
.entity_id
= '^' + self
.entity_id
48 if not self
.entity_id
.endswith('$'):
51 async def __call__(self
, delta
, old
, new
, model
):
52 await self
.callable_(delta
, old
, new
, model
)
54 def cares_about(self
, delta
):
55 """Return True if this observer "cares about" (i.e. wants to be
56 called) for a this delta.
59 if (self
.entity_id
and delta
.get_id() and
60 not re
.match(self
.entity_id
, str(delta
.get_id()))):
63 if self
.entity_type
and self
.entity_type
!= delta
.entity
:
66 if self
.action
and self
.action
!= delta
.type:
69 if self
.predicate
and not self
.predicate(delta
):
75 class ModelObserver(object):
76 async def __call__(self
, delta
, old
, new
, model
):
77 handler_name
= 'on_{}_{}'.format(delta
.entity
, delta
.type)
78 method
= getattr(self
, handler_name
, self
.on_change
)
79 await method(delta
, old
, new
, model
)
81 async def on_change(self
, delta
, old
, new
, model
):
82 """Generic model-change handler.
84 :param delta: :class:`juju.client.overrides.Delta`
85 :param old: :class:`juju.model.ModelEntity`
86 :param new: :class:`juju.model.ModelEntity`
87 :param model: :class:`juju.model.Model`
93 class ModelState(object):
94 """Holds the state of the model, including the delta history of all
95 entities in the model.
98 def __init__(self
, model
):
102 def _live_entity_map(self
, entity_type
):
103 """Return an id:Entity map of all the living entities of
104 type ``entity_type``.
108 entity_id
: self
.get_entity(entity_type
, entity_id
)
109 for entity_id
, history
in self
.state
.get(entity_type
, {}).items()
110 if history
[-1] is not None
114 def applications(self
):
115 """Return a map of application-name:Application for all applications
116 currently in the model.
119 return self
._live
_entity
_map
('application')
123 """Return a map of machine-id:Machine for all machines currently in
127 return self
._live
_entity
_map
('machine')
131 """Return a map of unit-id:Unit for all units currently in
135 return self
._live
_entity
_map
('unit')
137 def entity_history(self
, entity_type
, entity_id
):
138 """Return the history deque for an entity.
141 return self
.state
[entity_type
][entity_id
]
143 def entity_data(self
, entity_type
, entity_id
, history_index
):
144 """Return the data dict for an entity at a specific index of its
148 return self
.entity_history(entity_type
, entity_id
)[history_index
]
150 def apply_delta(self
, delta
):
151 """Apply delta to our state and return a copy of the
152 affected object as it was before and after the update, e.g.:
154 old_obj, new_obj = self.apply_delta(delta)
156 old_obj may be None if the delta is for the creation of a new object,
157 e.g. a new application or unit is deployed.
159 new_obj will never be None, but may be dead (new_obj.dead == True)
160 if the object was deleted as a result of the delta being applied.
165 .setdefault(delta
.entity
, {})
166 .setdefault(delta
.get_id(), collections
.deque())
169 history
.append(delta
.data
)
170 if delta
.type == 'remove':
173 entity
= self
.get_entity(delta
.entity
, delta
.get_id())
174 return entity
.previous(), entity
177 self
, entity_type
, entity_id
, history_index
=-1, connected
=True):
178 """Return an object instance for the given entity_type and id.
180 By default the object state matches the most recent state from
181 Juju. To get an instance of the object in an older state, pass
182 history_index, an index into the history deque for the entity.
186 if history_index
< 0 and history_index
!= -1:
187 history_index
+= len(self
.entity_history(entity_type
, entity_id
))
188 if history_index
< 0:
192 self
.entity_data(entity_type
, entity_id
, history_index
)
196 entity_class
= get_entity_class(entity_type
)
198 entity_id
, self
.model
, history_index
=history_index
,
202 class ModelEntity(object):
203 """An object in the Model tree"""
205 def __init__(self
, entity_id
, model
, history_index
=-1, connected
=True):
206 """Initialize a new entity
208 :param entity_id str: The unique id of the object in the model
209 :param model: The model instance in whose object tree this
211 :history_index int: The index of this object's state in the model's
212 history deque for this entity
213 :connected bool: Flag indicating whether this object gets live updates
217 self
.entity_id
= entity_id
219 self
._history
_index
= history_index
220 self
.connected
= connected
221 self
.connection
= model
.connection
224 return '<{} entity_id="{}">'.format(type(self
).__name
__,
227 def __getattr__(self
, name
):
228 """Fetch object attributes from the underlying data dict held in the
232 return self
.safe_data
[name
]
235 return bool(self
.data
)
237 def on_change(self
, callable_
):
238 """Add a change observer to this entity.
241 self
.model
.add_observer(
242 callable_
, self
.entity_type
, 'change', self
.entity_id
)
244 def on_remove(self
, callable_
):
245 """Add a remove observer to this entity.
248 self
.model
.add_observer(
249 callable_
, self
.entity_type
, 'remove', self
.entity_id
)
252 def entity_type(self
):
253 """A string identifying the entity type of this object, e.g.
254 'application' or 'unit', etc.
257 return self
.__class
__.__name
__.lower()
261 """Return True if this object represents the current state of the
262 entity in the underlying model.
264 This will be True except when the object represents an entity at a
265 non-latest state in history, e.g. if the object was obtained by calling
266 .previous() on another object.
269 return self
._history
_index
== -1
273 """Returns True if this entity no longer exists in the underlying
279 self
.model
.state
.entity_data(
280 self
.entity_type
, self
.entity_id
, -1) is None
285 """Returns True if this entity still exists in the underlying
293 """The data dictionary for this entity.
296 return self
.model
.state
.entity_data(
297 self
.entity_type
, self
.entity_id
, self
._history
_index
)
301 """The data dictionary for this entity.
303 If this `ModelEntity` points to the dead state, it will
304 raise `DeadEntityException`.
307 if self
.data
is None:
308 raise DeadEntityException(
309 "Entity {}:{} is dead - its attributes can no longer be "
310 "accessed. Use the .previous() method on this object to get "
311 "a copy of the object at its previous state.".format(
312 self
.entity_type
, self
.entity_id
))
316 """Return a copy of this object as was at its previous state in
319 Returns None if this object is new (and therefore has no history).
321 The returned object is always "disconnected", i.e. does not receive
325 return self
.model
.state
.get_entity(
326 self
.entity_type
, self
.entity_id
, self
._history
_index
- 1,
330 """Return a copy of this object at its next state in
333 Returns None if this object is already the latest.
335 The returned object is "disconnected", i.e. does not receive
336 live updates, unless it is current (latest).
339 if self
._history
_index
== -1:
342 new_index
= self
._history
_index
+ 1
344 new_index
== len(self
.model
.state
.entity_history(
345 self
.entity_type
, self
.entity_id
)) - 1
347 return self
.model
.state
.get_entity(
348 self
.entity_type
, self
.entity_id
, self
._history
_index
- 1,
352 """Return a copy of this object at its current state in the model.
354 Returns self if this object is already the latest.
356 The returned object is always "connected", i.e. receives
357 live updates from the model.
360 if self
._history
_index
== -1:
363 return self
.model
.state
.get_entity(self
.entity_type
, self
.entity_id
)
367 def __init__(self
, loop
=None):
368 """Instantiate a new connected Model.
370 :param loop: an asyncio event loop
373 self
.loop
= loop
or asyncio
.get_event_loop()
374 self
.connection
= None
375 self
.observers
= weakref
.WeakValueDictionary()
376 self
.state
= ModelState(self
)
378 self
._watcher
_task
= None
379 self
._watch
_shutdown
= asyncio
.Event(loop
=loop
)
380 self
._watch
_received
= asyncio
.Event(loop
=loop
)
381 self
._charmstore
= CharmStore(self
.loop
)
383 async def connect(self
, *args
, **kw
):
384 """Connect to an arbitrary Juju model.
386 args and kw are passed through to Connection.connect()
389 self
.connection
= await connection
.Connection
.connect(*args
, **kw
)
390 await self
._after
_connect
()
392 async def connect_current(self
):
393 """Connect to the current Juju model.
396 self
.connection
= await connection
.Connection
.connect_current()
397 await self
._after
_connect
()
399 async def connect_model(self
, model_name
):
400 """Connect to a specific Juju model by name.
402 :param model_name: Format [controller:][user/]model
405 self
.connection
= await connection
.Connection
.connect_model(model_name
)
406 await self
._after
_connect
()
408 async def _after_connect(self
):
409 """Run initialization steps after connecting to websocket.
413 await self
._watch
_received
.wait()
414 await self
.get_info()
416 async def disconnect(self
):
417 """Shut down the watcher task and close websockets.
420 self
._stop
_watching
()
421 if self
.connection
and self
.connection
.is_open
:
422 await self
._watch
_shutdown
.wait()
423 log
.debug('Closing model connection')
424 await self
.connection
.close()
425 self
.connection
= None
427 async def add_local_charm_dir(self
, charm_dir
, series
):
428 """Upload a local charm to the model.
430 This will automatically generate an archive from
433 :param charm_dir: Path to the charm directory
434 :param series: Charm series
437 fh
= tempfile
.NamedTemporaryFile()
438 CharmArchiveGenerator(charm_dir
).make_archive(fh
.name
)
441 self
.add_local_charm
, fh
, series
, os
.stat(fh
.name
).st_size
)
442 charm_url
= await self
.loop
.run_in_executor(None, func
)
444 log
.debug('Uploaded local charm: %s -> %s', charm_dir
, charm_url
)
447 def add_local_charm(self
, charm_file
, series
, size
=None):
448 """Upload a local charm archive to the model.
450 Returns the 'local:...' url that should be used to deploy the charm.
452 :param charm_file: Path to charm zip archive
453 :param series: Charm series
454 :param size: Size of the archive, in bytes
455 :return str: 'local:...' url for deploying the charm
456 :raises: :class:`JujuError` if the upload fails
458 Uses an https endpoint at the same host:port as the wss.
459 Supports large file uploads.
463 This method will block. Consider using :meth:`add_local_charm_dir`
467 conn
, headers
, path_prefix
= self
.connection
.https_connection()
468 path
= "%s/charms?series=%s" % (path_prefix
, series
)
469 headers
['Content-Type'] = 'application/zip'
471 headers
['Content-Length'] = size
472 conn
.request("POST", path
, charm_file
, headers
)
473 response
= conn
.getresponse()
474 result
= response
.read().decode()
475 if not response
.status
== 200:
476 raise JujuError(result
)
477 result
= json
.loads(result
)
478 return result
['charm-url']
480 def all_units_idle(self
):
481 """Return True if all units are idle.
484 for unit
in self
.units
.values():
485 unit_status
= unit
.data
['agent-status']['current']
486 if unit_status
!= 'idle':
490 async def reset(self
, force
=False):
491 """Reset the model to a clean state.
493 :param bool force: Force-terminate machines.
495 This returns only after the model has reached a clean state. "Clean"
496 means no applications or machines exist in the model.
499 log
.debug('Resetting model')
500 for app
in self
.applications
.values():
502 for machine
in self
.machines
.values():
503 await machine
.destroy(force
=force
)
504 await self
.block_until(
505 lambda: len(self
.machines
) == 0
508 async def block_until(self
, *conditions
, timeout
=None, wait_period
=0.5):
509 """Return only after all conditions are true.
513 while not all(c() for c
in conditions
):
514 await asyncio
.sleep(wait_period
)
515 await asyncio
.wait_for(_block(), timeout
)
518 def applications(self
):
519 """Return a map of application-name:Application for all applications
520 currently in the model.
523 return self
.state
.applications
527 """Return a map of machine-id:Machine for all machines currently in
531 return self
.state
.machines
535 """Return a map of unit-id:Unit for all units currently in
539 return self
.state
.units
541 async def get_info(self
):
542 """Return a client.ModelInfo object for this Model.
544 Retrieves latest info for this Model from the api server. The
545 return value is cached on the Model.info attribute so that the
546 valued may be accessed again without another api call, if
549 This method is called automatically when the Model is connected,
550 resulting in Model.info being initialized without requiring an
551 explicit call to this method.
554 facade
= client
.ClientFacade()
555 facade
.connect(self
.connection
)
557 self
.info
= await facade
.ModelInfo()
558 log
.debug('Got ModelInfo: %s', vars(self
.info
))
563 self
, callable_
, entity_type
=None, action
=None, entity_id
=None,
565 """Register an "on-model-change" callback
567 Once the model is connected, ``callable_``
568 will be called each time the model changes. ``callable_`` should
569 be Awaitable and accept the following positional arguments:
571 delta - An instance of :class:`juju.delta.EntityDelta`
572 containing the raw delta data recv'd from the Juju
575 old_obj - If the delta modifies an existing object in the model,
576 old_obj will be a copy of that object, as it was before the
577 delta was applied. Will be None if the delta creates a new
580 new_obj - A copy of the new or updated object, after the delta
581 is applied. Will be None if the delta removes an entity
584 model - The :class:`Model` itself.
586 Events for which ``callable_`` is called can be specified by passing
587 entity_type, action, and/or entitiy_id filter criteria, e.g.::
591 entity_type='application', action='add', entity_id='ubuntu')
593 For more complex filtering conditions, pass a predicate function. It
594 will be called with a delta as its only argument. If the predicate
595 function returns True, the ``callable_`` will be called.
598 observer
= _Observer(
599 callable_
, entity_type
, action
, entity_id
, predicate
)
600 self
.observers
[observer
] = callable_
603 """Start an asynchronous watch against this model.
605 See :meth:`add_observer` to register an onchange callback.
608 async def _start_watch():
609 self
._watch
_shutdown
.clear()
611 allwatcher
= watcher
.AllWatcher()
612 self
._watch
_conn
= await self
.connection
.clone()
613 allwatcher
.connect(self
._watch
_conn
)
615 results
= await allwatcher
.Next()
616 for delta
in results
.deltas
:
617 delta
= get_entity_delta(delta
)
618 old_obj
, new_obj
= self
.state
.apply_delta(delta
)
619 # XXX: Might not want to shield at this level
620 # We are shielding because when the watcher is
621 # canceled (on disconnect()), we don't want all of
622 # its children (every observer callback) to be
623 # canceled with it. So we shield them. But this means
624 # they can *never* be canceled.
625 await asyncio
.shield(
626 self
._notify
_observers
(delta
, old_obj
, new_obj
))
627 self
._watch
_received
.set()
628 except CancelledError
:
629 log
.debug('Closing watcher connection')
630 await self
._watch
_conn
.close()
631 self
._watch
_shutdown
.set()
632 self
._watch
_conn
= None
634 log
.debug('Starting watcher task')
635 self
._watcher
_task
= self
.loop
.create_task(_start_watch())
637 def _stop_watching(self
):
638 """Stop the asynchronous watch against this model.
641 log
.debug('Stopping watcher task')
642 if self
._watcher
_task
:
643 self
._watcher
_task
.cancel()
645 async def _notify_observers(self
, delta
, old_obj
, new_obj
):
646 """Call observing callbacks, notifying them of a change in model state
648 :param delta: The raw change from the watcher
649 (:class:`juju.client.overrides.Delta`)
650 :param old_obj: The object in the model that this delta updates.
652 :param new_obj: The object in the model that is created or updated
653 by applying this delta.
656 if new_obj
and not old_obj
:
660 'Model changed: %s %s %s',
661 delta
.entity
, delta
.type, delta
.get_id())
663 for o
in self
.observers
:
664 if o
.cares_about(delta
):
665 asyncio
.ensure_future(o(delta
, old_obj
, new_obj
, self
))
667 async def _wait(self
, entity_type
, entity_id
, action
, predicate
=None):
669 Block the calling routine until a given action has happened to the
672 :param entity_type: The entity's type.
673 :param entity_id: The entity's id.
674 :param action: the type of action (e.g., 'add', 'change', or 'remove')
675 :param predicate: optional callable that must take as an
676 argument a delta, and must return a boolean, indicating
677 whether the delta contains the specific action we're looking
678 for. For example, you might check to see whether a 'change'
679 has a 'completed' status. See the _Observer class for details.
682 q
= asyncio
.Queue(loop
=self
.loop
)
684 async def callback(delta
, old
, new
, model
):
685 await q
.put(delta
.get_id())
687 self
.add_observer(callback
, entity_type
, action
, entity_id
, predicate
)
688 entity_id
= await q
.get()
689 # object might not be in the entity_map if we were waiting for a
691 return self
.state
._live
_entity
_map
(entity_type
).get(entity_id
)
693 async def _wait_for_new(self
, entity_type
, entity_id
=None, predicate
=None):
694 """Wait for a new object to appear in the Model and return it.
696 Waits for an object of type ``entity_type`` with id ``entity_id``.
697 If ``entity_id`` is ``None``, it will wait for the first new entity
700 This coroutine blocks until the new object appears in the model.
703 # if the entity is already in the model, just return it
704 if entity_id
in self
.state
._live
_entity
_map
(entity_type
):
705 return self
.state
._live
_entity
_map
(entity_type
)[entity_id
]
706 # if we know the entity_id, we can trigger on any action that puts
707 # the enitty into the model; otherwise, we have to watch for the
708 # next "add" action on that entity_type
709 action
= 'add' if entity_id
is None else None
710 return await self
._wait
(entity_type
, entity_id
, action
, predicate
)
712 async def wait_for_action(self
, action_id
):
713 """Given an action, wait for it to complete."""
715 if action_id
.startswith("action-"):
716 # if we've been passed action.tag, transform it into the
717 # id that the api deltas will use.
718 action_id
= action_id
[7:]
720 def predicate(delta
):
721 return delta
.data
['status'] in ('completed', 'failed')
723 return await self
._wait
('action', action_id
, 'change', predicate
)
725 async def add_machine(
726 self
, spec
=None, constraints
=None, disks
=None, series
=None):
727 """Start a new, empty machine and optionally a container, or add a
728 container to a machine.
730 :param str spec: Machine specification
733 (None) - starts a new machine
734 'lxd' - starts a new machine with one lxd container
735 'lxd:4' - starts a new lxd container on machine 4
736 'ssh:user@10.10.0.3' - manually provisions a machine with ssh
737 'zone=us-east-1a' - starts a machine in zone us-east-1s on AWS
738 'maas2.name' - acquire machine maas2.name on MAAS
740 :param dict constraints: Machine constraints
747 :param list disks: List of disk constraint dictionaries
756 :param str series: Series, e.g. 'xenial'
758 Supported container types are: lxd, kvm
760 When deploying a container to an existing machine, constraints cannot
764 params
= client
.AddMachineParams()
765 params
.jobs
= ['JobHostUnits']
768 placement
= parse_placement(spec
)
770 params
.placement
= placement
[0]
773 params
.constraints
= client
.Value
.from_json(constraints
)
777 client
.Constraints
.from_json(o
) for o
in disks
]
780 params
.series
= series
782 # Submit the request.
783 client_facade
= client
.ClientFacade()
784 client_facade
.connect(self
.connection
)
785 results
= await client_facade
.AddMachines([params
])
786 error
= results
.machines
[0].error
788 raise ValueError("Error adding machine: %s", error
.message
)
789 machine_id
= results
.machines
[0].machine
790 log
.debug('Added new machine %s', machine_id
)
791 return await self
._wait
_for
_new
('machine', machine_id
)
793 async def add_relation(self
, relation1
, relation2
):
794 """Add a relation between two applications.
796 :param str relation1: '<application>[:<relation_name>]'
797 :param str relation2: '<application>[:<relation_name>]'
800 app_facade
= client
.ApplicationFacade()
801 app_facade
.connect(self
.connection
)
804 'Adding relation %s <-> %s', relation1
, relation2
)
807 result
= await app_facade
.AddRelation([relation1
, relation2
])
808 except JujuAPIError
as e
:
809 if 'relation already exists' not in e
.message
:
812 'Relation %s <-> %s already exists', relation1
, relation2
)
813 # TODO: if relation already exists we should return the
814 # Relation ModelEntity here
817 def predicate(delta
):
819 for endpoint
in delta
.data
['endpoints']:
820 endpoints
[endpoint
['application-name']] = endpoint
['relation']
821 return endpoints
== result
.endpoints
823 return await self
._wait
_for
_new
('relation', None, predicate
)
825 def add_space(self
, name
, *cidrs
):
826 """Add a new network space.
828 Adds a new space with the given name and associates the given
829 (optional) list of existing subnet CIDRs with it.
831 :param str name: Name of the space
832 :param \*cidrs: Optional list of existing subnet CIDRs
835 raise NotImplementedError()
837 def add_ssh_key(self
, key
):
838 """Add a public SSH key to this model.
840 :param str key: The public ssh key
843 raise NotImplementedError()
844 add_ssh_keys
= add_ssh_key
846 def add_subnet(self
, cidr_or_id
, space
, *zones
):
847 """Add an existing subnet to this model.
849 :param str cidr_or_id: CIDR or provider ID of the existing subnet
850 :param str space: Network space with which to associate
851 :param str \*zones: Zone(s) in which the subnet resides
854 raise NotImplementedError()
856 def get_backups(self
):
857 """Retrieve metadata for backups in this model.
860 raise NotImplementedError()
862 def block(self
, *commands
):
863 """Add a new block to this model.
865 :param str \*commands: The commands to block. Valid values are
866 'all-changes', 'destroy-model', 'remove-object'
869 raise NotImplementedError()
871 def get_blocks(self
):
872 """List blocks for this model.
875 raise NotImplementedError()
877 def get_cached_images(self
, arch
=None, kind
=None, series
=None):
878 """Return a list of cached OS images.
880 :param str arch: Filter by image architecture
881 :param str kind: Filter by image kind, e.g. 'lxd'
882 :param str series: Filter by image series, e.g. 'xenial'
885 raise NotImplementedError()
887 def create_backup(self
, note
=None, no_download
=False):
888 """Create a backup of this model.
890 :param str note: A note to store with the backup
891 :param bool no_download: Do not download the backup archive
892 :return str: Path to downloaded archive
895 raise NotImplementedError()
897 def create_storage_pool(self
, name
, provider_type
, **pool_config
):
898 """Create or define a storage pool.
900 :param str name: Name to give the storage pool
901 :param str provider_type: Pool provider type
902 :param \*\*pool_config: key/value pool configuration pairs
905 raise NotImplementedError()
908 self
, no_tail
=False, exclude_module
=None, include_module
=None,
909 include
=None, level
=None, limit
=0, lines
=10, replay
=False,
911 """Get log messages for this model.
913 :param bool no_tail: Stop after returning existing log messages
914 :param list exclude_module: Do not show log messages for these logging
916 :param list include_module: Only show log messages for these logging
918 :param list include: Only show log messages for these entities
919 :param str level: Log level to show, valid options are 'TRACE',
920 'DEBUG', 'INFO', 'WARNING', 'ERROR,
921 :param int limit: Return this many of the most recent (possibly
922 filtered) lines are shown
923 :param int lines: Yield this many of the most recent lines, and keep
925 :param bool replay: Yield the entire log, and keep yielding
926 :param list exclude: Do not show log messages for these entities
929 raise NotImplementedError()
932 self
, entity_url
, application_name
=None, bind
=None, budget
=None,
933 channel
=None, config
=None, constraints
=None, force
=False,
934 num_units
=1, plan
=None, resources
=None, series
=None, storage
=None,
936 """Deploy a new service or bundle.
938 :param str entity_url: Charm or bundle url
939 :param str application_name: Name to give the service
940 :param dict bind: <charm endpoint>:<network space> pairs
941 :param dict budget: <budget name>:<limit> pairs
942 :param str channel: Charm store channel from which to retrieve
943 the charm or bundle, e.g. 'development'
944 :param dict config: Charm configuration dictionary
945 :param constraints: Service constraints
946 :type constraints: :class:`juju.Constraints`
947 :param bool force: Allow charm to be deployed to a machine running
948 an unsupported series
949 :param int num_units: Number of units to deploy
950 :param str plan: Plan under which to deploy charm
951 :param dict resources: <resource name>:<file path> pairs
952 :param str series: Series on which to deploy
953 :param dict storage: Storage constraints TODO how do these look?
954 :param to: Placement directive as a string. For example:
956 '23' - place on machine 23
957 'lxd:7' - place in new lxd container on machine 7
958 '24/lxd/3' - place in container 3 on machine 24
960 If None, a new machine is provisioned.
965 - application_name is required; fill this in automatically if not
967 - series is required; how do we pick a default?
971 placement
= parse_placement(to
)
977 k
: client
.Constraints(**v
)
978 for k
, v
in storage
.items()
982 entity_url
.startswith('local:') or
983 os
.path
.isdir(entity_url
)
985 entity_id
= await self
.charmstore
.entityId(entity_url
) \
986 if not is_local
else entity_url
988 app_facade
= client
.ApplicationFacade()
989 client_facade
= client
.ClientFacade()
990 app_facade
.connect(self
.connection
)
991 client_facade
.connect(self
.connection
)
993 is_bundle
= ((is_local
and
994 (Path(entity_id
) / 'bundle.yaml').exists()) or
995 (not is_local
and 'bundle/' in entity_id
))
998 handler
= BundleHandler(self
)
999 await handler
.fetch_plan(entity_id
)
1000 await handler
.execute_plan()
1001 extant_apps
= {app
for app
in self
.applications
}
1002 pending_apps
= set(handler
.applications
) - extant_apps
1004 # new apps will usually be in the model by now, but if some
1005 # haven't made it yet we'll need to wait on them to be added
1006 await asyncio
.gather(*[
1007 asyncio
.ensure_future(
1008 self
._wait
_for
_new
('application', app_name
))
1009 for app_name
in pending_apps
1011 return [app
for name
, app
in self
.applications
.items()
1012 if name
in handler
.applications
]
1015 'Deploying %s', entity_id
)
1018 await client_facade
.AddCharm(channel
, entity_id
)
1019 elif not entity_id
.startswith('local:'):
1020 # We have a local charm dir that needs to be uploaded
1021 charm_dir
= os
.path
.abspath(
1022 os
.path
.expanduser(entity_id
))
1023 series
= series
or get_charm_series(charm_dir
)
1026 "Couldn't determine series for charm at {}. "
1027 "Pass a 'series' kwarg to Model.deploy().".format(
1029 entity_id
= await self
.add_local_charm_dir(charm_dir
, series
)
1031 app
= client
.ApplicationDeploy(
1032 application
=application_name
,
1034 charm_url
=entity_id
,
1036 constraints
=parse_constraints(constraints
),
1037 endpoint_bindings
=bind
,
1038 num_units
=num_units
,
1039 resources
=resources
,
1043 app
.placement
= placement
1045 await app_facade
.Deploy([app
])
1046 return await self
._wait
_for
_new
('application', application_name
)
1049 """Terminate all machines and resources for this model.
1052 raise NotImplementedError()
1054 async def destroy_unit(self
, *unit_names
):
1055 """Destroy units by name.
1058 app_facade
= client
.ApplicationFacade()
1059 app_facade
.connect(self
.connection
)
1062 'Destroying unit%s %s',
1063 's' if len(unit_names
) == 1 else '',
1064 ' '.join(unit_names
))
1066 return await app_facade
.DestroyUnits(list(unit_names
))
1067 destroy_units
= destroy_unit
1069 def get_backup(self
, archive_id
):
1070 """Download a backup archive file.
1072 :param str archive_id: The id of the archive to download
1073 :return str: Path to the archive file
1076 raise NotImplementedError()
1079 self
, num_controllers
=0, constraints
=None, series
=None, to
=None):
1080 """Ensure sufficient controllers exist to provide redundancy.
1082 :param int num_controllers: Number of controllers to make available
1083 :param constraints: Constraints to apply to the controller machines
1084 :type constraints: :class:`juju.Constraints`
1085 :param str series: Series of the controller machines
1086 :param list to: Placement directives for controller machines, e.g.::
1089 'lxc:7' - new lxc container on machine 7
1090 '24/lxc/3' - lxc container 3 or machine 24
1092 If None, a new machine is provisioned.
1095 raise NotImplementedError()
1097 def get_config(self
):
1098 """Return the configuration settings for this model.
1101 raise NotImplementedError()
1103 def get_constraints(self
):
1104 """Return the machine constraints for this model.
1107 raise NotImplementedError()
1109 def grant(self
, username
, acl
='read'):
1110 """Grant a user access to this model.
1112 :param str username: Username
1113 :param str acl: Access control ('read' or 'write')
1116 raise NotImplementedError()
1118 def import_ssh_key(self
, identity
):
1119 """Add a public SSH key from a trusted indentity source to this model.
1121 :param str identity: User identity in the form <lp|gh>:<username>
1124 raise NotImplementedError()
1125 import_ssh_keys
= import_ssh_key
1127 def get_machines(self
, machine
, utc
=False):
1128 """Return list of machines in this model.
1130 :param str machine: Machine id, e.g. '0'
1131 :param bool utc: Display time as UTC in RFC3339 format
1134 raise NotImplementedError()
1136 def get_shares(self
):
1137 """Return list of all users with access to this model.
1140 raise NotImplementedError()
1142 def get_spaces(self
):
1143 """Return list of all known spaces, including associated subnets.
1146 raise NotImplementedError()
1148 def get_ssh_key(self
):
1149 """Return known SSH keys for this model.
1152 raise NotImplementedError()
1153 get_ssh_keys
= get_ssh_key
1155 def get_storage(self
, filesystem
=False, volume
=False):
1156 """Return details of storage instances.
1158 :param bool filesystem: Include filesystem storage
1159 :param bool volume: Include volume storage
1162 raise NotImplementedError()
1164 def get_storage_pools(self
, names
=None, providers
=None):
1165 """Return list of storage pools.
1167 :param list names: Only include pools with these names
1168 :param list providers: Only include pools for these providers
1171 raise NotImplementedError()
1173 def get_subnets(self
, space
=None, zone
=None):
1174 """Return list of known subnets.
1176 :param str space: Only include subnets in this space
1177 :param str zone: Only include subnets in this zone
1180 raise NotImplementedError()
1182 def remove_blocks(self
):
1183 """Remove all blocks from this model.
1186 raise NotImplementedError()
1188 def remove_backup(self
, backup_id
):
1191 :param str backup_id: The id of the backup to remove
1194 raise NotImplementedError()
1196 def remove_cached_images(self
, arch
=None, kind
=None, series
=None):
1197 """Remove cached OS images.
1199 :param str arch: Architecture of the images to remove
1200 :param str kind: Image kind to remove, e.g. 'lxd'
1201 :param str series: Image series to remove, e.g. 'xenial'
1204 raise NotImplementedError()
1206 def remove_machine(self
, *machine_ids
):
1207 """Remove a machine from this model.
1209 :param str \*machine_ids: Ids of the machines to remove
1212 raise NotImplementedError()
1213 remove_machines
= remove_machine
1215 def remove_ssh_key(self
, *keys
):
1216 """Remove a public SSH key(s) from this model.
1218 :param str \*keys: Keys to remove
1221 raise NotImplementedError()
1222 remove_ssh_keys
= remove_ssh_key
1225 self
, bootstrap
=False, constraints
=None, archive
=None,
1226 backup_id
=None, upload_tools
=False):
1227 """Restore a backup archive to a new controller.
1229 :param bool bootstrap: Bootstrap a new state machine
1230 :param constraints: Model constraints
1231 :type constraints: :class:`juju.Constraints`
1232 :param str archive: Path to backup archive to restore
1233 :param str backup_id: Id of backup to restore
1234 :param bool upload_tools: Upload tools if bootstrapping a new machine
1237 raise NotImplementedError()
1239 def retry_provisioning(self
):
1240 """Retry provisioning for failed machines.
1243 raise NotImplementedError()
1245 def revoke(self
, username
, acl
='read'):
1246 """Revoke a user's access to this model.
1248 :param str username: Username to revoke
1249 :param str acl: Access control ('read' or 'write')
1252 raise NotImplementedError()
1254 def run(self
, command
, timeout
=None):
1255 """Run command on all machines in this model.
1257 :param str command: The command to run
1258 :param int timeout: Time to wait before command is considered failed
1261 raise NotImplementedError()
1263 def set_config(self
, **config
):
1264 """Set configuration keys on this model.
1266 :param \*\*config: Config key/values
1269 raise NotImplementedError()
1271 def set_constraints(self
, constraints
):
1272 """Set machine constraints on this model.
1274 :param :class:`juju.Constraints` constraints: Machine constraints
1277 raise NotImplementedError()
1279 def get_action_output(self
, action_uuid
, wait
=-1):
1280 """Get the results of an action by ID.
1282 :param str action_uuid: Id of the action
1283 :param int wait: Time in seconds to wait for action to complete
1286 raise NotImplementedError()
1288 def get_action_status(self
, uuid_or_prefix
=None, name
=None):
1289 """Get the status of all actions, filtered by ID, ID prefix, or action name.
1291 :param str uuid_or_prefix: Filter by action uuid or prefix
1292 :param str name: Filter by action name
1295 raise NotImplementedError()
1297 def get_budget(self
, budget_name
):
1298 """Get budget usage info.
1300 :param str budget_name: Name of budget
1303 raise NotImplementedError()
1305 def get_status(self
, filter_
=None, utc
=False):
1306 """Return the status of the model.
1308 :param str filter_: Service or unit name or wildcard ('*')
1309 :param bool utc: Display time as UTC in RFC3339 format
1312 raise NotImplementedError()
1316 self
, all_
=False, destination
=None, dry_run
=False, public
=False,
1317 source
=None, stream
=None, version
=None):
1318 """Copy Juju tools into this model.
1320 :param bool all_: Copy all versions, not just the latest
1321 :param str destination: Path to local destination directory
1322 :param bool dry_run: Don't do the actual copy
1323 :param bool public: Tools are for a public cloud, so generate mirrors
1325 :param str source: Path to local source directory
1326 :param str stream: Simplestreams stream for which to sync metadata
1327 :param str version: Copy a specific major.minor version
1330 raise NotImplementedError()
1332 def unblock(self
, *commands
):
1333 """Unblock an operation that would alter this model.
1335 :param str \*commands: The commands to unblock. Valid values are
1336 'all-changes', 'destroy-model', 'remove-object'
1339 raise NotImplementedError()
1341 def unset_config(self
, *keys
):
1342 """Unset configuration on this model.
1344 :param str \*keys: The keys to unset
1347 raise NotImplementedError()
1349 def upgrade_gui(self
):
1350 """Upgrade the Juju GUI for this model.
1353 raise NotImplementedError()
1356 self
, dry_run
=False, reset_previous_upgrade
=False,
1357 upload_tools
=False, version
=None):
1358 """Upgrade Juju on all machines in a model.
1360 :param bool dry_run: Don't do the actual upgrade
1361 :param bool reset_previous_upgrade: Clear the previous (incomplete)
1363 :param bool upload_tools: Upload local version of tools
1364 :param str version: Upgrade to a specific version
1367 raise NotImplementedError()
1369 def upload_backup(self
, archive_path
):
1370 """Store a backup archive remotely in Juju.
1372 :param str archive_path: Path to local archive
1375 raise NotImplementedError()
1378 def charmstore(self
):
1379 return self
._charmstore
1381 async def get_metrics(self
, *tags
):
1382 """Retrieve metrics.
1384 :param str \*tags: Tags of entities from which to retrieve metrics.
1385 No tags retrieves the metrics of all units in the model.
1386 :return: Dictionary of unit_name:metrics
1389 log
.debug("Retrieving metrics for %s",
1390 ', '.join(tags
) if tags
else "all units")
1392 metrics_facade
= client
.MetricsDebugFacade()
1393 metrics_facade
.connect(self
.connection
)
1395 entities
= [client
.Entity(tag
) for tag
in tags
]
1396 metrics_result
= await metrics_facade
.GetMetrics(entities
)
1398 metrics
= collections
.defaultdict(list)
1400 for entity_metrics
in metrics_result
.results
:
1401 error
= entity_metrics
.error
1403 if "is not a valid tag" in error
:
1404 raise ValueError(error
.message
)
1406 raise Exception(error
.message
)
1408 for metric
in entity_metrics
.metrics
:
1409 metrics
[metric
.unit
].append(vars(metric
))
1414 def get_charm_series(path
):
1415 """Inspects the charm directory at ``path`` and returns a default
1416 series from its metadata.yaml (the first item in the 'series' list).
1418 Returns None if no series can be determined.
1421 md
= Path(path
) / "metadata.yaml"
1424 data
= yaml
.load(md
.open())
1425 series
= data
.get('series')
1426 return series
[0] if series
else None
1429 class BundleHandler(object):
1431 Handle bundles by using the API to translate bundle YAML into a plan of
1432 steps and then dispatching each of those using the API.
1434 def __init__(self
, model
):
1436 self
.charmstore
= model
.charmstore
1438 self
.references
= {}
1439 self
._units
_by
_app
= {}
1440 for unit_name
, unit
in model
.units
.items():
1441 app_units
= self
._units
_by
_app
.setdefault(unit
.application
, [])
1442 app_units
.append(unit_name
)
1443 self
.client_facade
= client
.ClientFacade()
1444 self
.client_facade
.connect(model
.connection
)
1445 self
.app_facade
= client
.ApplicationFacade()
1446 self
.app_facade
.connect(model
.connection
)
1447 self
.ann_facade
= client
.AnnotationsFacade()
1448 self
.ann_facade
.connect(model
.connection
)
1450 async def _handle_local_charms(self
, bundle
):
1451 """Search for references to local charms (i.e. filesystem paths)
1452 in the bundle. Upload the local charms to the model, and replace
1453 the filesystem paths with appropriate 'local:' paths in the bundle.
1455 Return the modified bundle.
1457 :param dict bundle: Bundle dictionary
1458 :return: Modified bundle dictionary
1463 default_series
= bundle
.get('series')
1464 for app_name
in self
.applications
:
1465 app_dict
= bundle
['services'][app_name
]
1466 charm_dir
= os
.path
.abspath(os
.path
.expanduser(app_dict
['charm']))
1467 if not os
.path
.isdir(charm_dir
):
1470 app_dict
.get('series') or
1472 get_charm_series(charm_dir
)
1476 "Couldn't determine series for charm at {}. "
1477 "Add a 'series' key to the bundle.".format(charm_dir
))
1479 # Keep track of what we need to update. We keep a list of apps
1480 # that need to be updated, and a corresponding list of args
1481 # needed to update those apps.
1482 apps
.append(app_name
)
1483 args
.append((charm_dir
, series
))
1486 # If we have apps to update, spawn all the coroutines concurrently
1487 # and wait for them to finish.
1488 charm_urls
= await asyncio
.gather(*[
1489 self
.model
.add_local_charm_dir(*params
)
1492 # Update the 'charm:' entry for each app with the new 'local:' url.
1493 for app_name
, charm_url
in zip(apps
, charm_urls
):
1494 bundle
['services'][app_name
]['charm'] = charm_url
1498 async def fetch_plan(self
, entity_id
):
1499 is_local
= not entity_id
.startswith('cs:') and os
.path
.isdir(entity_id
)
1501 bundle_yaml
= (Path(entity_id
) / "bundle.yaml").read_text()
1503 bundle_yaml
= await self
.charmstore
.files(entity_id
,
1504 filename
='bundle.yaml',
1506 self
.bundle
= yaml
.safe_load(bundle_yaml
)
1507 self
.bundle
= await self
._handle
_local
_charms
(self
.bundle
)
1509 self
.plan
= await self
.client_facade
.GetBundleChanges(
1510 yaml
.dump(self
.bundle
))
1512 async def execute_plan(self
):
1513 for step
in self
.plan
.changes
:
1514 method
= getattr(self
, step
.method
)
1515 result
= await method(*step
.args
)
1516 self
.references
[step
.id_
] = result
1519 def applications(self
):
1520 return list(self
.bundle
['services'].keys())
1522 def resolve(self
, reference
):
1523 if reference
and reference
.startswith('$'):
1524 reference
= self
.references
[reference
[1:]]
1527 async def addCharm(self
, charm
, series
):
1529 :param charm string:
1530 Charm holds the URL of the charm to be added.
1532 :param series string:
1533 Series holds the series of the charm to be added
1534 if the charm default is not sufficient.
1536 # We don't add local charms because they've already been added
1537 # by self._handle_local_charms
1538 if charm
.startswith('local:'):
1541 entity_id
= await self
.charmstore
.entityId(charm
)
1542 log
.debug('Adding %s', entity_id
)
1543 await self
.client_facade
.AddCharm(None, entity_id
)
1546 async def addMachines(self
, params
=None):
1549 Dictionary specifying the machine to add. All keys are optional.
1552 series: string specifying the machine OS series.
1554 constraints: string holding machine constraints, if any. We'll
1555 parse this into the json friendly dict that the juju api
1558 container_type: string holding the type of the container (for
1559 instance ""lxd" or kvm"). It is not specified for top level
1562 parent_id: string holding a placeholder pointing to another
1563 machine change or to a unit change. This value is only
1564 specified in the case this machine is a container, in
1565 which case also ContainerType is set.
1568 params
= params
or {}
1571 params
= {normalize_key(k
): params
[k
] for k
in params
.keys()}
1573 # Fix up values, as necessary.
1574 if 'parent_id' in params
:
1575 params
['parent_id'] = self
.resolve(params
['parent_id'])
1577 params
['constraints'] = parse_constraints(
1578 params
.get('constraints'))
1579 params
['jobs'] = params
.get('jobs', ['JobHostUnits'])
1581 if params
.get('container_type') == 'lxc':
1582 log
.warning('Juju 2.0 does not support lxc containers. '
1583 'Converting containers to lxd.')
1584 params
['container_type'] = 'lxd'
1586 # Submit the request.
1587 params
= client
.AddMachineParams(**params
)
1588 results
= await self
.client_facade
.AddMachines([params
])
1589 error
= results
.machines
[0].error
1591 raise ValueError("Error adding machine: %s", error
.message
)
1592 machine
= results
.machines
[0].machine
1593 log
.debug('Added new machine %s', machine
)
1596 async def addRelation(self
, endpoint1
, endpoint2
):
1598 :param endpoint1 string:
1599 :param endpoint2 string:
1600 Endpoint1 and Endpoint2 hold relation endpoints in the
1601 "application:interface" form, where the application is always a
1602 placeholder pointing to an application change, and the interface is
1603 optional. Examples are "$deploy-42:web" or just "$deploy-42".
1605 endpoints
= [endpoint1
, endpoint2
]
1606 # resolve indirect references
1607 for i
in range(len(endpoints
)):
1608 parts
= endpoints
[i
].split(':')
1609 parts
[0] = self
.resolve(parts
[0])
1610 endpoints
[i
] = ':'.join(parts
)
1612 log
.info('Relating %s <-> %s', *endpoints
)
1613 return await self
.model
.add_relation(*endpoints
)
1615 async def deploy(self
, charm
, series
, application
, options
, constraints
,
1616 storage
, endpoint_bindings
, resources
):
1618 :param charm string:
1619 Charm holds the URL of the charm to be used to deploy this
1622 :param series string:
1623 Series holds the series of the application to be deployed
1624 if the charm default is not sufficient.
1626 :param application string:
1627 Application holds the application name.
1629 :param options map[string]interface{}:
1630 Options holds application options.
1632 :param constraints string:
1633 Constraints holds the optional application constraints.
1635 :param storage map[string]string:
1636 Storage holds the optional storage constraints.
1638 :param endpoint_bindings map[string]string:
1639 EndpointBindings holds the optional endpoint bindings
1641 :param resources map[string]int:
1642 Resources identifies the revision to use for each resource
1643 of the application's charm.
1645 # resolve indirect references
1646 charm
= self
.resolve(charm
)
1647 # stringify all config values for API, and convert to YAML
1648 options
= {k
: str(v
) for k
, v
in options
.items()}
1649 options
= yaml
.dump({application
: options
}, default_flow_style
=False)
1650 # build param object
1651 app
= client
.ApplicationDeploy(
1654 application
=application
,
1655 # Pass options to config-yaml rather than config, as
1656 # config-yaml invokes a newer codepath that better handles
1657 # empty strings in the options values.
1658 config_yaml
=options
,
1659 constraints
=parse_constraints(constraints
),
1661 endpoint_bindings
=endpoint_bindings
,
1662 resources
=resources
,
1665 log
.info('Deploying %s', charm
)
1666 await self
.app_facade
.Deploy([app
])
1667 # ensure the app is in the model for future operations
1668 await self
.model
._wait
_for
_new
('application', application
)
1671 async def addUnit(self
, application
, to
):
1673 :param application string:
1674 Application holds the application placeholder name for which a unit
1678 To holds the optional location where to add the unit, as a
1679 placeholder pointing to another unit change or to a machine change.
1681 application
= self
.resolve(application
)
1682 placement
= self
.resolve(to
)
1683 if self
._units
_by
_app
.get(application
):
1684 # enough units for this application already exist;
1685 # claim one, and carry on
1686 # NB: this should probably honor placement, but the juju client
1687 # doesn't, so we're not bothering, either
1688 unit_name
= self
._units
_by
_app
[application
].pop()
1689 log
.debug('Reusing unit %s for %s', unit_name
, application
)
1690 return self
.model
.units
[unit_name
]
1692 log
.debug('Adding new unit for %s%s', application
,
1693 ' to %s' % placement
if placement
else '')
1694 return await self
.model
.applications
[application
].add_unit(
1699 async def expose(self
, application
):
1701 :param application string:
1702 Application holds the placeholder name of the application that must
1705 application
= self
.resolve(application
)
1706 log
.info('Exposing %s', application
)
1707 return await self
.model
.applications
[application
].expose()
1709 async def setAnnotations(self
, id_
, entity_type
, annotations
):
1712 Id is the placeholder for the application or machine change
1713 corresponding to the entity to be annotated.
1715 :param entity_type EntityType:
1716 EntityType holds the type of the entity, "application" or
1719 :param annotations map[string]string:
1720 Annotations holds the annotations as key/value pairs.
1722 entity_id
= self
.resolve(id_
)
1724 entity
= self
.model
.state
.get_entity(entity_type
, entity_id
)
1726 entity
= await self
.model
._wait
_for
_new
(entity_type
, entity_id
)
1727 return await entity
.set_annotations(annotations
)
1730 class CharmStore(object):
1732 Async wrapper around theblues.charmstore.CharmStore
1734 def __init__(self
, loop
):
1736 self
._cs
= charmstore
.CharmStore()
1738 def __getattr__(self
, name
):
1740 Wrap method calls in coroutines that use run_in_executor to make them
1743 attr
= getattr(self
._cs
, name
)
1744 if not callable(attr
):
1745 wrapper
= partial(getattr, self
._cs
, name
)
1746 setattr(self
, name
, wrapper
)
1748 async def coro(*args
, **kwargs
):
1749 method
= partial(attr
, *args
, **kwargs
)
1750 return await self
.loop
.run_in_executor(None, method
)
1751 setattr(self
, name
, coro
)
1756 class CharmArchiveGenerator(object):
1757 def __init__(self
, path
):
1758 self
.path
= os
.path
.abspath(os
.path
.expanduser(path
))
1760 def make_archive(self
, path
):
1761 """Create archive of directory and write to ``path``.
1763 :param path: Path to archive
1767 * build/\* - This is used for packing the charm itself and any
1769 * \*/.\* - Hidden files are all ignored for now. This will most
1770 likely be changed into a specific ignore list
1774 zf
= zipfile
.ZipFile(path
, 'w', zipfile
.ZIP_DEFLATED
)
1775 for dirpath
, dirnames
, filenames
in os
.walk(self
.path
):
1776 relative_path
= dirpath
[len(self
.path
) + 1:]
1777 if relative_path
and not self
._ignore
(relative_path
):
1778 zf
.write(dirpath
, relative_path
)
1779 for name
in filenames
:
1780 archive_name
= os
.path
.join(relative_path
, name
)
1781 if not self
._ignore
(archive_name
):
1782 real_path
= os
.path
.join(dirpath
, name
)
1783 self
._check
_type
(real_path
)
1784 if os
.path
.islink(real_path
):
1785 self
._check
_link
(real_path
)
1786 self
._write
_symlink
(
1787 zf
, os
.readlink(real_path
), archive_name
)
1789 zf
.write(real_path
, archive_name
)
1793 def _check_type(self
, path
):
1797 if stat
.S_ISDIR(s
.st_mode
) or stat
.S_ISREG(s
.st_mode
):
1799 raise ValueError("Invalid Charm at % %s" % (
1800 path
, "Invalid file type for a charm"))
1802 def _check_link(self
, path
):
1803 link_path
= os
.readlink(path
)
1804 if link_path
[0] == "/":
1806 "Invalid Charm at %s: %s" % (
1807 path
, "Absolute links are invalid"))
1808 path_dir
= os
.path
.dirname(path
)
1809 link_path
= os
.path
.join(path_dir
, link_path
)
1810 if not link_path
.startswith(os
.path
.abspath(self
.path
)):
1812 "Invalid charm at %s %s" % (
1813 path
, "Only internal symlinks are allowed"))
1815 def _write_symlink(self
, zf
, link_target
, link_path
):
1816 """Package symlinks with appropriate zipfile metadata."""
1817 info
= zipfile
.ZipInfo()
1818 info
.filename
= link_path
1819 info
.create_system
= 3
1820 # Magic code for symlinks / py2/3 compat
1821 # 27166663808 = (stat.S_IFLNK | 0755) << 16
1822 info
.external_attr
= 2716663808
1823 zf
.writestr(info
, link_target
)
1825 def _ignore(self
, path
):
1826 if path
== "build" or path
.startswith("build/"):
1828 if path
.startswith('.'):