| Adam Israel | dcdf82b | 2017-08-15 15:26:43 -0400 | [diff] [blame] | 1 | import asyncio |
| 2 | import base64 |
| 3 | import collections |
| 4 | import hashlib |
| 5 | import json |
| 6 | import logging |
| 7 | import os |
| 8 | import re |
| 9 | import stat |
| 10 | import tempfile |
| 11 | import weakref |
| 12 | import zipfile |
| 13 | from concurrent.futures import CancelledError |
| 14 | from functools import partial |
| 15 | from pathlib import Path |
| 16 | |
| 17 | import websockets |
| 18 | import yaml |
| 19 | import theblues.charmstore |
| 20 | import theblues.errors |
| 21 | |
| 22 | from . import tag, utils |
| 23 | from .client import client |
| 24 | from .client import connection |
| 25 | from .client.client import ConfigValue |
| 26 | from .constraints import parse as parse_constraints, normalize_key |
| 27 | from .delta import get_entity_delta |
| 28 | from .delta import get_entity_class |
| 29 | from .exceptions import DeadEntityException |
| 30 | from .errors import JujuError, JujuAPIError |
| 31 | from .placement import parse as parse_placement |
| 32 | |
| 33 | log = logging.getLogger(__name__) |
| 34 | |
| 35 | |
| 36 | class _Observer(object): |
| 37 | """Wrapper around an observer callable. |
| 38 | |
| 39 | This wrapper allows filter criteria to be associated with the |
| 40 | callable so that it's only called for changes that meet the criteria. |
| 41 | |
| 42 | """ |
| 43 | def __init__(self, callable_, entity_type, action, entity_id, predicate): |
| 44 | self.callable_ = callable_ |
| 45 | self.entity_type = entity_type |
| 46 | self.action = action |
| 47 | self.entity_id = entity_id |
| 48 | self.predicate = predicate |
| 49 | if self.entity_id: |
| 50 | self.entity_id = str(self.entity_id) |
| 51 | if not self.entity_id.startswith('^'): |
| 52 | self.entity_id = '^' + self.entity_id |
| 53 | if not self.entity_id.endswith('$'): |
| 54 | self.entity_id += '$' |
| 55 | |
| 56 | async def __call__(self, delta, old, new, model): |
| 57 | await self.callable_(delta, old, new, model) |
| 58 | |
| 59 | def cares_about(self, delta): |
| 60 | """Return True if this observer "cares about" (i.e. wants to be |
| 61 | called) for a this delta. |
| 62 | |
| 63 | """ |
| 64 | if (self.entity_id and delta.get_id() and |
| 65 | not re.match(self.entity_id, str(delta.get_id()))): |
| 66 | return False |
| 67 | |
| 68 | if self.entity_type and self.entity_type != delta.entity: |
| 69 | return False |
| 70 | |
| 71 | if self.action and self.action != delta.type: |
| 72 | return False |
| 73 | |
| 74 | if self.predicate and not self.predicate(delta): |
| 75 | return False |
| 76 | |
| 77 | return True |
| 78 | |
| 79 | |
| 80 | class ModelObserver(object): |
| 81 | """ |
| 82 | Base class for creating observers that react to changes in a model. |
| 83 | """ |
| 84 | async def __call__(self, delta, old, new, model): |
| 85 | handler_name = 'on_{}_{}'.format(delta.entity, delta.type) |
| 86 | method = getattr(self, handler_name, self.on_change) |
| 87 | await method(delta, old, new, model) |
| 88 | |
| 89 | async def on_change(self, delta, old, new, model): |
| 90 | """Generic model-change handler. |
| 91 | |
| 92 | This should be overridden in a subclass. |
| 93 | |
| 94 | :param delta: :class:`juju.client.overrides.Delta` |
| 95 | :param old: :class:`juju.model.ModelEntity` |
| 96 | :param new: :class:`juju.model.ModelEntity` |
| 97 | :param model: :class:`juju.model.Model` |
| 98 | |
| 99 | """ |
| 100 | pass |
| 101 | |
| 102 | |
| 103 | class ModelState(object): |
| 104 | """Holds the state of the model, including the delta history of all |
| 105 | entities in the model. |
| 106 | |
| 107 | """ |
| 108 | def __init__(self, model): |
| 109 | self.model = model |
| 110 | self.state = dict() |
| 111 | |
| 112 | def _live_entity_map(self, entity_type): |
| 113 | """Return an id:Entity map of all the living entities of |
| 114 | type ``entity_type``. |
| 115 | |
| 116 | """ |
| 117 | return { |
| 118 | entity_id: self.get_entity(entity_type, entity_id) |
| 119 | for entity_id, history in self.state.get(entity_type, {}).items() |
| 120 | if history[-1] is not None |
| 121 | } |
| 122 | |
| 123 | @property |
| 124 | def applications(self): |
| 125 | """Return a map of application-name:Application for all applications |
| 126 | currently in the model. |
| 127 | |
| 128 | """ |
| 129 | return self._live_entity_map('application') |
| 130 | |
| 131 | @property |
| 132 | def machines(self): |
| 133 | """Return a map of machine-id:Machine for all machines currently in |
| 134 | the model. |
| 135 | |
| 136 | """ |
| 137 | return self._live_entity_map('machine') |
| 138 | |
| 139 | @property |
| 140 | def units(self): |
| 141 | """Return a map of unit-id:Unit for all units currently in |
| 142 | the model. |
| 143 | |
| 144 | """ |
| 145 | return self._live_entity_map('unit') |
| 146 | |
| 147 | def entity_history(self, entity_type, entity_id): |
| 148 | """Return the history deque for an entity. |
| 149 | |
| 150 | """ |
| 151 | return self.state[entity_type][entity_id] |
| 152 | |
| 153 | def entity_data(self, entity_type, entity_id, history_index): |
| 154 | """Return the data dict for an entity at a specific index of its |
| 155 | history. |
| 156 | |
| 157 | """ |
| 158 | return self.entity_history(entity_type, entity_id)[history_index] |
| 159 | |
| 160 | def apply_delta(self, delta): |
| 161 | """Apply delta to our state and return a copy of the |
| 162 | affected object as it was before and after the update, e.g.: |
| 163 | |
| 164 | old_obj, new_obj = self.apply_delta(delta) |
| 165 | |
| 166 | old_obj may be None if the delta is for the creation of a new object, |
| 167 | e.g. a new application or unit is deployed. |
| 168 | |
| 169 | new_obj will never be None, but may be dead (new_obj.dead == True) |
| 170 | if the object was deleted as a result of the delta being applied. |
| 171 | |
| 172 | """ |
| 173 | history = ( |
| 174 | self.state |
| 175 | .setdefault(delta.entity, {}) |
| 176 | .setdefault(delta.get_id(), collections.deque()) |
| 177 | ) |
| 178 | |
| 179 | history.append(delta.data) |
| 180 | if delta.type == 'remove': |
| 181 | history.append(None) |
| 182 | |
| 183 | entity = self.get_entity(delta.entity, delta.get_id()) |
| 184 | return entity.previous(), entity |
| 185 | |
| 186 | def get_entity( |
| 187 | self, entity_type, entity_id, history_index=-1, connected=True): |
| 188 | """Return an object instance for the given entity_type and id. |
| 189 | |
| 190 | By default the object state matches the most recent state from |
| 191 | Juju. To get an instance of the object in an older state, pass |
| 192 | history_index, an index into the history deque for the entity. |
| 193 | |
| 194 | """ |
| 195 | |
| 196 | if history_index < 0 and history_index != -1: |
| 197 | history_index += len(self.entity_history(entity_type, entity_id)) |
| 198 | if history_index < 0: |
| 199 | return None |
| 200 | |
| 201 | try: |
| 202 | self.entity_data(entity_type, entity_id, history_index) |
| 203 | except IndexError: |
| 204 | return None |
| 205 | |
| 206 | entity_class = get_entity_class(entity_type) |
| 207 | return entity_class( |
| 208 | entity_id, self.model, history_index=history_index, |
| 209 | connected=connected) |
| 210 | |
| 211 | |
| 212 | class ModelEntity(object): |
| 213 | """An object in the Model tree""" |
| 214 | |
| 215 | def __init__(self, entity_id, model, history_index=-1, connected=True): |
| 216 | """Initialize a new entity |
| 217 | |
| 218 | :param entity_id str: The unique id of the object in the model |
| 219 | :param model: The model instance in whose object tree this |
| 220 | entity resides |
| 221 | :history_index int: The index of this object's state in the model's |
| 222 | history deque for this entity |
| 223 | :connected bool: Flag indicating whether this object gets live updates |
| 224 | from the model. |
| 225 | |
| 226 | """ |
| 227 | self.entity_id = entity_id |
| 228 | self.model = model |
| 229 | self._history_index = history_index |
| 230 | self.connected = connected |
| 231 | self.connection = model.connection |
| 232 | |
| 233 | def __repr__(self): |
| 234 | return '<{} entity_id="{}">'.format(type(self).__name__, |
| 235 | self.entity_id) |
| 236 | |
| 237 | def __getattr__(self, name): |
| 238 | """Fetch object attributes from the underlying data dict held in the |
| 239 | model. |
| 240 | |
| 241 | """ |
| 242 | try: |
| 243 | return self.safe_data[name] |
| 244 | except KeyError: |
| 245 | name = name.replace('_', '-') |
| 246 | if name in self.safe_data: |
| 247 | return self.safe_data[name] |
| 248 | else: |
| 249 | raise |
| 250 | |
| 251 | def __bool__(self): |
| 252 | return bool(self.data) |
| 253 | |
| 254 | def on_change(self, callable_): |
| 255 | """Add a change observer to this entity. |
| 256 | |
| 257 | """ |
| 258 | self.model.add_observer( |
| 259 | callable_, self.entity_type, 'change', self.entity_id) |
| 260 | |
| 261 | def on_remove(self, callable_): |
| 262 | """Add a remove observer to this entity. |
| 263 | |
| 264 | """ |
| 265 | self.model.add_observer( |
| 266 | callable_, self.entity_type, 'remove', self.entity_id) |
| 267 | |
| 268 | @property |
| 269 | def entity_type(self): |
| 270 | """A string identifying the entity type of this object, e.g. |
| 271 | 'application' or 'unit', etc. |
| 272 | |
| 273 | """ |
| 274 | return self.__class__.__name__.lower() |
| 275 | |
| 276 | @property |
| 277 | def current(self): |
| 278 | """Return True if this object represents the current state of the |
| 279 | entity in the underlying model. |
| 280 | |
| 281 | This will be True except when the object represents an entity at a |
| 282 | non-latest state in history, e.g. if the object was obtained by calling |
| 283 | .previous() on another object. |
| 284 | |
| 285 | """ |
| 286 | return self._history_index == -1 |
| 287 | |
| 288 | @property |
| 289 | def dead(self): |
| 290 | """Returns True if this entity no longer exists in the underlying |
| 291 | model. |
| 292 | |
| 293 | """ |
| 294 | return ( |
| 295 | self.data is None or |
| 296 | self.model.state.entity_data( |
| 297 | self.entity_type, self.entity_id, -1) is None |
| 298 | ) |
| 299 | |
| 300 | @property |
| 301 | def alive(self): |
| 302 | """Returns True if this entity still exists in the underlying |
| 303 | model. |
| 304 | |
| 305 | """ |
| 306 | return not self.dead |
| 307 | |
| 308 | @property |
| 309 | def data(self): |
| 310 | """The data dictionary for this entity. |
| 311 | |
| 312 | """ |
| 313 | return self.model.state.entity_data( |
| 314 | self.entity_type, self.entity_id, self._history_index) |
| 315 | |
| 316 | @property |
| 317 | def safe_data(self): |
| 318 | """The data dictionary for this entity. |
| 319 | |
| 320 | If this `ModelEntity` points to the dead state, it will |
| 321 | raise `DeadEntityException`. |
| 322 | |
| 323 | """ |
| 324 | if self.data is None: |
| 325 | raise DeadEntityException( |
| 326 | "Entity {}:{} is dead - its attributes can no longer be " |
| 327 | "accessed. Use the .previous() method on this object to get " |
| 328 | "a copy of the object at its previous state.".format( |
| 329 | self.entity_type, self.entity_id)) |
| 330 | return self.data |
| 331 | |
| 332 | def previous(self): |
| 333 | """Return a copy of this object as was at its previous state in |
| 334 | history. |
| 335 | |
| 336 | Returns None if this object is new (and therefore has no history). |
| 337 | |
| 338 | The returned object is always "disconnected", i.e. does not receive |
| 339 | live updates. |
| 340 | |
| 341 | """ |
| 342 | return self.model.state.get_entity( |
| 343 | self.entity_type, self.entity_id, self._history_index - 1, |
| 344 | connected=False) |
| 345 | |
| 346 | def next(self): |
| 347 | """Return a copy of this object at its next state in |
| 348 | history. |
| 349 | |
| 350 | Returns None if this object is already the latest. |
| 351 | |
| 352 | The returned object is "disconnected", i.e. does not receive |
| 353 | live updates, unless it is current (latest). |
| 354 | |
| 355 | """ |
| 356 | if self._history_index == -1: |
| 357 | return None |
| 358 | |
| 359 | new_index = self._history_index + 1 |
| 360 | connected = ( |
| 361 | new_index == len(self.model.state.entity_history( |
| 362 | self.entity_type, self.entity_id)) - 1 |
| 363 | ) |
| 364 | return self.model.state.get_entity( |
| 365 | self.entity_type, self.entity_id, self._history_index - 1, |
| 366 | connected=connected) |
| 367 | |
| 368 | def latest(self): |
| 369 | """Return a copy of this object at its current state in the model. |
| 370 | |
| 371 | Returns self if this object is already the latest. |
| 372 | |
| 373 | The returned object is always "connected", i.e. receives |
| 374 | live updates from the model. |
| 375 | |
| 376 | """ |
| 377 | if self._history_index == -1: |
| 378 | return self |
| 379 | |
| 380 | return self.model.state.get_entity(self.entity_type, self.entity_id) |
| 381 | |
| 382 | |
| 383 | class Model(object): |
| 384 | """ |
| 385 | The main API for interacting with a Juju model. |
| 386 | """ |
| 387 | def __init__(self, loop=None, |
| 388 | max_frame_size=connection.Connection.DEFAULT_FRAME_SIZE): |
| 389 | """Instantiate a new connected Model. |
| 390 | |
| 391 | :param loop: an asyncio event loop |
| 392 | :param max_frame_size: See |
| 393 | `juju.client.connection.Connection.MAX_FRAME_SIZE` |
| 394 | |
| 395 | """ |
| 396 | self.loop = loop or asyncio.get_event_loop() |
| 397 | self.max_frame_size = max_frame_size |
| 398 | self.connection = None |
| 399 | self.observers = weakref.WeakValueDictionary() |
| 400 | self.state = ModelState(self) |
| 401 | self.info = None |
| 402 | self._watch_stopping = asyncio.Event(loop=self.loop) |
| 403 | self._watch_stopped = asyncio.Event(loop=self.loop) |
| 404 | self._watch_received = asyncio.Event(loop=self.loop) |
| 405 | self._charmstore = CharmStore(self.loop) |
| 406 | |
| 407 | async def __aenter__(self): |
| 408 | await self.connect_current() |
| 409 | return self |
| 410 | |
| 411 | async def __aexit__(self, exc_type, exc, tb): |
| 412 | await self.disconnect() |
| 413 | |
| 414 | if exc_type is not None: |
| 415 | return False |
| 416 | |
| 417 | async def connect(self, *args, **kw): |
| 418 | """Connect to an arbitrary Juju model. |
| 419 | |
| 420 | args and kw are passed through to Connection.connect() |
| 421 | |
| 422 | """ |
| 423 | if 'loop' not in kw: |
| 424 | kw['loop'] = self.loop |
| 425 | if 'max_frame_size' not in kw: |
| 426 | kw['max_frame_size'] = self.max_frame_size |
| 427 | self.connection = await connection.Connection.connect(*args, **kw) |
| 428 | await self._after_connect() |
| 429 | |
| 430 | async def connect_current(self): |
| 431 | """Connect to the current Juju model. |
| 432 | |
| 433 | """ |
| 434 | self.connection = await connection.Connection.connect_current( |
| 435 | self.loop, max_frame_size=self.max_frame_size) |
| 436 | await self._after_connect() |
| 437 | |
| 438 | async def connect_model(self, model_name): |
| 439 | """Connect to a specific Juju model by name. |
| 440 | |
| 441 | :param model_name: Format [controller:][user/]model |
| 442 | |
| 443 | """ |
| 444 | self.connection = await connection.Connection.connect_model( |
| 445 | model_name, self.loop, self.max_frame_size) |
| 446 | await self._after_connect() |
| 447 | |
| 448 | async def _after_connect(self): |
| 449 | """Run initialization steps after connecting to websocket. |
| 450 | |
| 451 | """ |
| 452 | self._watch() |
| 453 | await self._watch_received.wait() |
| 454 | await self.get_info() |
| 455 | |
| 456 | async def disconnect(self): |
| 457 | """Shut down the watcher task and close websockets. |
| 458 | |
| 459 | """ |
| 460 | if self.connection and self.connection.is_open: |
| 461 | log.debug('Stopping watcher task') |
| 462 | self._watch_stopping.set() |
| 463 | await self._watch_stopped.wait() |
| 464 | log.debug('Closing model connection') |
| 465 | await self.connection.close() |
| 466 | self.connection = None |
| 467 | |
| 468 | async def add_local_charm_dir(self, charm_dir, series): |
| 469 | """Upload a local charm to the model. |
| 470 | |
| 471 | This will automatically generate an archive from |
| 472 | the charm dir. |
| 473 | |
| 474 | :param charm_dir: Path to the charm directory |
| 475 | :param series: Charm series |
| 476 | |
| 477 | """ |
| 478 | fh = tempfile.NamedTemporaryFile() |
| 479 | CharmArchiveGenerator(charm_dir).make_archive(fh.name) |
| 480 | with fh: |
| 481 | func = partial( |
| 482 | self.add_local_charm, fh, series, os.stat(fh.name).st_size) |
| 483 | charm_url = await self.loop.run_in_executor(None, func) |
| 484 | |
| 485 | log.debug('Uploaded local charm: %s -> %s', charm_dir, charm_url) |
| 486 | return charm_url |
| 487 | |
| 488 | def add_local_charm(self, charm_file, series, size=None): |
| 489 | """Upload a local charm archive to the model. |
| 490 | |
| 491 | Returns the 'local:...' url that should be used to deploy the charm. |
| 492 | |
| 493 | :param charm_file: Path to charm zip archive |
| 494 | :param series: Charm series |
| 495 | :param size: Size of the archive, in bytes |
| 496 | :return str: 'local:...' url for deploying the charm |
| 497 | :raises: :class:`JujuError` if the upload fails |
| 498 | |
| 499 | Uses an https endpoint at the same host:port as the wss. |
| 500 | Supports large file uploads. |
| 501 | |
| 502 | .. warning:: |
| 503 | |
| 504 | This method will block. Consider using :meth:`add_local_charm_dir` |
| 505 | instead. |
| 506 | |
| 507 | """ |
| 508 | conn, headers, path_prefix = self.connection.https_connection() |
| 509 | path = "%s/charms?series=%s" % (path_prefix, series) |
| 510 | headers['Content-Type'] = 'application/zip' |
| 511 | if size: |
| 512 | headers['Content-Length'] = size |
| 513 | conn.request("POST", path, charm_file, headers) |
| 514 | response = conn.getresponse() |
| 515 | result = response.read().decode() |
| 516 | if not response.status == 200: |
| 517 | raise JujuError(result) |
| 518 | result = json.loads(result) |
| 519 | return result['charm-url'] |
| 520 | |
| 521 | def all_units_idle(self): |
| 522 | """Return True if all units are idle. |
| 523 | |
| 524 | """ |
| 525 | for unit in self.units.values(): |
| 526 | unit_status = unit.data['agent-status']['current'] |
| 527 | if unit_status != 'idle': |
| 528 | return False |
| 529 | return True |
| 530 | |
| 531 | async def reset(self, force=False): |
| 532 | """Reset the model to a clean state. |
| 533 | |
| 534 | :param bool force: Force-terminate machines. |
| 535 | |
| 536 | This returns only after the model has reached a clean state. "Clean" |
| 537 | means no applications or machines exist in the model. |
| 538 | |
| 539 | """ |
| 540 | log.debug('Resetting model') |
| 541 | for app in self.applications.values(): |
| 542 | await app.destroy() |
| 543 | for machine in self.machines.values(): |
| 544 | await machine.destroy(force=force) |
| 545 | await self.block_until( |
| 546 | lambda: len(self.machines) == 0 |
| 547 | ) |
| 548 | |
| 549 | async def block_until(self, *conditions, timeout=None, wait_period=0.5): |
| 550 | """Return only after all conditions are true. |
| 551 | |
| 552 | """ |
| 553 | async def _block(): |
| 554 | while not all(c() for c in conditions): |
| 555 | if not (self.connection and self.connection.is_open): |
| 556 | raise websockets.ConnectionClosed(1006, 'no reason') |
| 557 | await asyncio.sleep(wait_period, loop=self.loop) |
| 558 | await asyncio.wait_for(_block(), timeout, loop=self.loop) |
| 559 | |
| 560 | @property |
| 561 | def applications(self): |
| 562 | """Return a map of application-name:Application for all applications |
| 563 | currently in the model. |
| 564 | |
| 565 | """ |
| 566 | return self.state.applications |
| 567 | |
| 568 | @property |
| 569 | def machines(self): |
| 570 | """Return a map of machine-id:Machine for all machines currently in |
| 571 | the model. |
| 572 | |
| 573 | """ |
| 574 | return self.state.machines |
| 575 | |
| 576 | @property |
| 577 | def units(self): |
| 578 | """Return a map of unit-id:Unit for all units currently in |
| 579 | the model. |
| 580 | |
| 581 | """ |
| 582 | return self.state.units |
| 583 | |
| 584 | async def get_info(self): |
| 585 | """Return a client.ModelInfo object for this Model. |
| 586 | |
| 587 | Retrieves latest info for this Model from the api server. The |
| 588 | return value is cached on the Model.info attribute so that the |
| 589 | valued may be accessed again without another api call, if |
| 590 | desired. |
| 591 | |
| 592 | This method is called automatically when the Model is connected, |
| 593 | resulting in Model.info being initialized without requiring an |
| 594 | explicit call to this method. |
| 595 | |
| 596 | """ |
| 597 | facade = client.ClientFacade.from_connection(self.connection) |
| 598 | |
| 599 | self.info = await facade.ModelInfo() |
| 600 | log.debug('Got ModelInfo: %s', vars(self.info)) |
| 601 | |
| 602 | return self.info |
| 603 | |
| 604 | def add_observer( |
| 605 | self, callable_, entity_type=None, action=None, entity_id=None, |
| 606 | predicate=None): |
| 607 | """Register an "on-model-change" callback |
| 608 | |
| 609 | Once the model is connected, ``callable_`` |
| 610 | will be called each time the model changes. ``callable_`` should |
| 611 | be Awaitable and accept the following positional arguments: |
| 612 | |
| 613 | delta - An instance of :class:`juju.delta.EntityDelta` |
| 614 | containing the raw delta data recv'd from the Juju |
| 615 | websocket. |
| 616 | |
| 617 | old_obj - If the delta modifies an existing object in the model, |
| 618 | old_obj will be a copy of that object, as it was before the |
| 619 | delta was applied. Will be None if the delta creates a new |
| 620 | entity in the model. |
| 621 | |
| 622 | new_obj - A copy of the new or updated object, after the delta |
| 623 | is applied. Will be None if the delta removes an entity |
| 624 | from the model. |
| 625 | |
| 626 | model - The :class:`Model` itself. |
| 627 | |
| 628 | Events for which ``callable_`` is called can be specified by passing |
| 629 | entity_type, action, and/or entitiy_id filter criteria, e.g.:: |
| 630 | |
| 631 | add_observer( |
| 632 | myfunc, |
| 633 | entity_type='application', action='add', entity_id='ubuntu') |
| 634 | |
| 635 | For more complex filtering conditions, pass a predicate function. It |
| 636 | will be called with a delta as its only argument. If the predicate |
| 637 | function returns True, the ``callable_`` will be called. |
| 638 | |
| 639 | """ |
| 640 | observer = _Observer( |
| 641 | callable_, entity_type, action, entity_id, predicate) |
| 642 | self.observers[observer] = callable_ |
| 643 | |
| 644 | def _watch(self): |
| 645 | """Start an asynchronous watch against this model. |
| 646 | |
| 647 | See :meth:`add_observer` to register an onchange callback. |
| 648 | |
| 649 | """ |
| 650 | async def _all_watcher(): |
| 651 | try: |
| 652 | allwatcher = client.AllWatcherFacade.from_connection( |
| 653 | self.connection) |
| 654 | while not self._watch_stopping.is_set(): |
| 655 | try: |
| 656 | results = await utils.run_with_interrupt( |
| 657 | allwatcher.Next(), |
| 658 | self._watch_stopping, |
| 659 | self.loop) |
| 660 | except JujuAPIError as e: |
| 661 | if 'watcher was stopped' not in str(e): |
| 662 | raise |
| 663 | if self._watch_stopping.is_set(): |
| 664 | # this shouldn't ever actually happen, because |
| 665 | # the event should trigger before the controller |
| 666 | # has a chance to tell us the watcher is stopped |
| 667 | # but handle it gracefully, just in case |
| 668 | break |
| 669 | # controller stopped our watcher for some reason |
| 670 | # but we're not actually stopping, so just restart it |
| 671 | log.warning( |
| 672 | 'Watcher: watcher stopped, restarting') |
| 673 | del allwatcher.Id |
| 674 | continue |
| 675 | except websockets.ConnectionClosed: |
| 676 | monitor = self.connection.monitor |
| 677 | if monitor.status == monitor.ERROR: |
| 678 | # closed unexpectedly, try to reopen |
| 679 | log.warning( |
| 680 | 'Watcher: connection closed, reopening') |
| 681 | await self.connection.reconnect() |
| 682 | del allwatcher.Id |
| 683 | continue |
| 684 | else: |
| 685 | # closed on request, go ahead and shutdown |
| 686 | break |
| 687 | if self._watch_stopping.is_set(): |
| 688 | await allwatcher.Stop() |
| 689 | break |
| 690 | for delta in results.deltas: |
| 691 | delta = get_entity_delta(delta) |
| 692 | old_obj, new_obj = self.state.apply_delta(delta) |
| 693 | await self._notify_observers(delta, old_obj, new_obj) |
| 694 | self._watch_received.set() |
| 695 | except CancelledError: |
| 696 | pass |
| 697 | except Exception: |
| 698 | log.exception('Error in watcher') |
| 699 | raise |
| 700 | finally: |
| 701 | self._watch_stopped.set() |
| 702 | |
| 703 | log.debug('Starting watcher task') |
| 704 | self._watch_received.clear() |
| 705 | self._watch_stopping.clear() |
| 706 | self._watch_stopped.clear() |
| 707 | self.loop.create_task(_all_watcher()) |
| 708 | |
| 709 | async def _notify_observers(self, delta, old_obj, new_obj): |
| 710 | """Call observing callbacks, notifying them of a change in model state |
| 711 | |
| 712 | :param delta: The raw change from the watcher |
| 713 | (:class:`juju.client.overrides.Delta`) |
| 714 | :param old_obj: The object in the model that this delta updates. |
| 715 | May be None. |
| 716 | :param new_obj: The object in the model that is created or updated |
| 717 | by applying this delta. |
| 718 | |
| 719 | """ |
| 720 | if new_obj and not old_obj: |
| 721 | delta.type = 'add' |
| 722 | |
| 723 | log.debug( |
| 724 | 'Model changed: %s %s %s', |
| 725 | delta.entity, delta.type, delta.get_id()) |
| 726 | |
| 727 | for o in self.observers: |
| 728 | if o.cares_about(delta): |
| 729 | asyncio.ensure_future(o(delta, old_obj, new_obj, self), |
| 730 | loop=self.loop) |
| 731 | |
| 732 | async def _wait(self, entity_type, entity_id, action, predicate=None): |
| 733 | """ |
| 734 | Block the calling routine until a given action has happened to the |
| 735 | given entity |
| 736 | |
| 737 | :param entity_type: The entity's type. |
| 738 | :param entity_id: The entity's id. |
| 739 | :param action: the type of action (e.g., 'add', 'change', or 'remove') |
| 740 | :param predicate: optional callable that must take as an |
| 741 | argument a delta, and must return a boolean, indicating |
| 742 | whether the delta contains the specific action we're looking |
| 743 | for. For example, you might check to see whether a 'change' |
| 744 | has a 'completed' status. See the _Observer class for details. |
| 745 | |
| 746 | """ |
| 747 | q = asyncio.Queue(loop=self.loop) |
| 748 | |
| 749 | async def callback(delta, old, new, model): |
| 750 | await q.put(delta.get_id()) |
| 751 | |
| 752 | self.add_observer(callback, entity_type, action, entity_id, predicate) |
| 753 | entity_id = await q.get() |
| 754 | # object might not be in the entity_map if we were waiting for a |
| 755 | # 'remove' action |
| 756 | return self.state._live_entity_map(entity_type).get(entity_id) |
| 757 | |
| 758 | async def _wait_for_new(self, entity_type, entity_id=None, predicate=None): |
| 759 | """Wait for a new object to appear in the Model and return it. |
| 760 | |
| 761 | Waits for an object of type ``entity_type`` with id ``entity_id``. |
| 762 | If ``entity_id`` is ``None``, it will wait for the first new entity |
| 763 | of the correct type. |
| 764 | |
| 765 | This coroutine blocks until the new object appears in the model. |
| 766 | |
| 767 | """ |
| 768 | # if the entity is already in the model, just return it |
| 769 | if entity_id in self.state._live_entity_map(entity_type): |
| 770 | return self.state._live_entity_map(entity_type)[entity_id] |
| 771 | # if we know the entity_id, we can trigger on any action that puts |
| 772 | # the enitty into the model; otherwise, we have to watch for the |
| 773 | # next "add" action on that entity_type |
| 774 | action = 'add' if entity_id is None else None |
| 775 | return await self._wait(entity_type, entity_id, action, predicate) |
| 776 | |
| 777 | async def wait_for_action(self, action_id): |
| 778 | """Given an action, wait for it to complete.""" |
| 779 | |
| 780 | if action_id.startswith("action-"): |
| 781 | # if we've been passed action.tag, transform it into the |
| 782 | # id that the api deltas will use. |
| 783 | action_id = action_id[7:] |
| 784 | |
| 785 | def predicate(delta): |
| 786 | return delta.data['status'] in ('completed', 'failed') |
| 787 | |
| 788 | return await self._wait('action', action_id, 'change', predicate) |
| 789 | |
| 790 | async def add_machine( |
| 791 | self, spec=None, constraints=None, disks=None, series=None): |
| 792 | """Start a new, empty machine and optionally a container, or add a |
| 793 | container to a machine. |
| 794 | |
| 795 | :param str spec: Machine specification |
| 796 | Examples:: |
| 797 | |
| 798 | (None) - starts a new machine |
| 799 | 'lxd' - starts a new machine with one lxd container |
| 800 | 'lxd:4' - starts a new lxd container on machine 4 |
| 801 | 'ssh:user@10.10.0.3' - manually provisions a machine with ssh |
| 802 | 'zone=us-east-1a' - starts a machine in zone us-east-1s on AWS |
| 803 | 'maas2.name' - acquire machine maas2.name on MAAS |
| 804 | |
| 805 | :param dict constraints: Machine constraints, which can contain the |
| 806 | the following keys:: |
| 807 | |
| 808 | arch : str |
| 809 | container : str |
| 810 | cores : int |
| 811 | cpu_power : int |
| 812 | instance_type : str |
| 813 | mem : int |
| 814 | root_disk : int |
| 815 | spaces : list(str) |
| 816 | tags : list(str) |
| 817 | virt_type : str |
| 818 | |
| 819 | Example:: |
| 820 | |
| 821 | constraints={ |
| 822 | 'mem': 256 * MB, |
| 823 | 'tags': ['virtual'], |
| 824 | } |
| 825 | |
| 826 | :param list disks: List of disk constraint dictionaries, which can |
| 827 | contain the following keys:: |
| 828 | |
| 829 | count : int |
| 830 | pool : str |
| 831 | size : int |
| 832 | |
| 833 | Example:: |
| 834 | |
| 835 | disks=[{ |
| 836 | 'pool': 'rootfs', |
| 837 | 'size': 10 * GB, |
| 838 | 'count': 1, |
| 839 | }] |
| 840 | |
| 841 | :param str series: Series, e.g. 'xenial' |
| 842 | |
| 843 | Supported container types are: lxd, kvm |
| 844 | |
| 845 | When deploying a container to an existing machine, constraints cannot |
| 846 | be used. |
| 847 | |
| 848 | """ |
| 849 | params = client.AddMachineParams() |
| 850 | params.jobs = ['JobHostUnits'] |
| 851 | |
| 852 | if spec: |
| 853 | placement = parse_placement(spec) |
| 854 | if placement: |
| 855 | params.placement = placement[0] |
| 856 | |
| 857 | if constraints: |
| 858 | params.constraints = client.Value.from_json(constraints) |
| 859 | |
| 860 | if disks: |
| 861 | params.disks = [ |
| 862 | client.Constraints.from_json(o) for o in disks] |
| 863 | |
| 864 | if series: |
| 865 | params.series = series |
| 866 | |
| 867 | # Submit the request. |
| 868 | client_facade = client.ClientFacade.from_connection(self.connection) |
| 869 | results = await client_facade.AddMachines([params]) |
| 870 | error = results.machines[0].error |
| 871 | if error: |
| 872 | raise ValueError("Error adding machine: %s" % error.message) |
| 873 | machine_id = results.machines[0].machine |
| 874 | log.debug('Added new machine %s', machine_id) |
| 875 | return await self._wait_for_new('machine', machine_id) |
| 876 | |
| 877 | async def add_relation(self, relation1, relation2): |
| 878 | """Add a relation between two applications. |
| 879 | |
| 880 | :param str relation1: '<application>[:<relation_name>]' |
| 881 | :param str relation2: '<application>[:<relation_name>]' |
| 882 | |
| 883 | """ |
| 884 | app_facade = client.ApplicationFacade.from_connection(self.connection) |
| 885 | |
| 886 | log.debug( |
| 887 | 'Adding relation %s <-> %s', relation1, relation2) |
| 888 | |
| 889 | try: |
| 890 | result = await app_facade.AddRelation([relation1, relation2]) |
| 891 | except JujuAPIError as e: |
| 892 | if 'relation already exists' not in e.message: |
| 893 | raise |
| 894 | log.debug( |
| 895 | 'Relation %s <-> %s already exists', relation1, relation2) |
| 896 | # TODO: if relation already exists we should return the |
| 897 | # Relation ModelEntity here |
| 898 | return None |
| 899 | |
| 900 | def predicate(delta): |
| 901 | endpoints = {} |
| 902 | for endpoint in delta.data['endpoints']: |
| 903 | endpoints[endpoint['application-name']] = endpoint['relation'] |
| 904 | return endpoints == result.endpoints |
| 905 | |
| 906 | return await self._wait_for_new('relation', None, predicate) |
| 907 | |
| 908 | def add_space(self, name, *cidrs): |
| 909 | """Add a new network space. |
| 910 | |
| 911 | Adds a new space with the given name and associates the given |
| 912 | (optional) list of existing subnet CIDRs with it. |
| 913 | |
| 914 | :param str name: Name of the space |
| 915 | :param \*cidrs: Optional list of existing subnet CIDRs |
| 916 | |
| 917 | """ |
| 918 | raise NotImplementedError() |
| 919 | |
| 920 | async def add_ssh_key(self, user, key): |
| 921 | """Add a public SSH key to this model. |
| 922 | |
| 923 | :param str user: The username of the user |
| 924 | :param str key: The public ssh key |
| 925 | |
| 926 | """ |
| 927 | key_facade = client.KeyManagerFacade.from_connection(self.connection) |
| 928 | return await key_facade.AddKeys([key], user) |
| 929 | add_ssh_keys = add_ssh_key |
| 930 | |
| 931 | def add_subnet(self, cidr_or_id, space, *zones): |
| 932 | """Add an existing subnet to this model. |
| 933 | |
| 934 | :param str cidr_or_id: CIDR or provider ID of the existing subnet |
| 935 | :param str space: Network space with which to associate |
| 936 | :param str \*zones: Zone(s) in which the subnet resides |
| 937 | |
| 938 | """ |
| 939 | raise NotImplementedError() |
| 940 | |
| 941 | def get_backups(self): |
| 942 | """Retrieve metadata for backups in this model. |
| 943 | |
| 944 | """ |
| 945 | raise NotImplementedError() |
| 946 | |
| 947 | def block(self, *commands): |
| 948 | """Add a new block to this model. |
| 949 | |
| 950 | :param str \*commands: The commands to block. Valid values are |
| 951 | 'all-changes', 'destroy-model', 'remove-object' |
| 952 | |
| 953 | """ |
| 954 | raise NotImplementedError() |
| 955 | |
| 956 | def get_blocks(self): |
| 957 | """List blocks for this model. |
| 958 | |
| 959 | """ |
| 960 | raise NotImplementedError() |
| 961 | |
| 962 | def get_cached_images(self, arch=None, kind=None, series=None): |
| 963 | """Return a list of cached OS images. |
| 964 | |
| 965 | :param str arch: Filter by image architecture |
| 966 | :param str kind: Filter by image kind, e.g. 'lxd' |
| 967 | :param str series: Filter by image series, e.g. 'xenial' |
| 968 | |
| 969 | """ |
| 970 | raise NotImplementedError() |
| 971 | |
| 972 | def create_backup(self, note=None, no_download=False): |
| 973 | """Create a backup of this model. |
| 974 | |
| 975 | :param str note: A note to store with the backup |
| 976 | :param bool no_download: Do not download the backup archive |
| 977 | :return str: Path to downloaded archive |
| 978 | |
| 979 | """ |
| 980 | raise NotImplementedError() |
| 981 | |
| 982 | def create_storage_pool(self, name, provider_type, **pool_config): |
| 983 | """Create or define a storage pool. |
| 984 | |
| 985 | :param str name: Name to give the storage pool |
| 986 | :param str provider_type: Pool provider type |
| 987 | :param \*\*pool_config: key/value pool configuration pairs |
| 988 | |
| 989 | """ |
| 990 | raise NotImplementedError() |
| 991 | |
| 992 | def debug_log( |
| 993 | self, no_tail=False, exclude_module=None, include_module=None, |
| 994 | include=None, level=None, limit=0, lines=10, replay=False, |
| 995 | exclude=None): |
| 996 | """Get log messages for this model. |
| 997 | |
| 998 | :param bool no_tail: Stop after returning existing log messages |
| 999 | :param list exclude_module: Do not show log messages for these logging |
| 1000 | modules |
| 1001 | :param list include_module: Only show log messages for these logging |
| 1002 | modules |
| 1003 | :param list include: Only show log messages for these entities |
| 1004 | :param str level: Log level to show, valid options are 'TRACE', |
| 1005 | 'DEBUG', 'INFO', 'WARNING', 'ERROR, |
| 1006 | :param int limit: Return this many of the most recent (possibly |
| 1007 | filtered) lines are shown |
| 1008 | :param int lines: Yield this many of the most recent lines, and keep |
| 1009 | yielding |
| 1010 | :param bool replay: Yield the entire log, and keep yielding |
| 1011 | :param list exclude: Do not show log messages for these entities |
| 1012 | |
| 1013 | """ |
| 1014 | raise NotImplementedError() |
| 1015 | |
| 1016 | def _get_series(self, entity_url, entity): |
| 1017 | # try to get the series from the provided charm URL |
| 1018 | if entity_url.startswith('cs:'): |
| 1019 | parts = entity_url[3:].split('/') |
| 1020 | else: |
| 1021 | parts = entity_url.split('/') |
| 1022 | if parts[0].startswith('~'): |
| 1023 | parts.pop(0) |
| 1024 | if len(parts) > 1: |
| 1025 | # series was specified in the URL |
| 1026 | return parts[0] |
| 1027 | # series was not supplied at all, so use the newest |
| 1028 | # supported series according to the charm store |
| 1029 | ss = entity['Meta']['supported-series'] |
| 1030 | return ss['SupportedSeries'][0] |
| 1031 | |
| 1032 | async def deploy( |
| 1033 | self, entity_url, application_name=None, bind=None, budget=None, |
| 1034 | channel=None, config=None, constraints=None, force=False, |
| 1035 | num_units=1, plan=None, resources=None, series=None, storage=None, |
| 1036 | to=None): |
| 1037 | """Deploy a new service or bundle. |
| 1038 | |
| 1039 | :param str entity_url: Charm or bundle url |
| 1040 | :param str application_name: Name to give the service |
| 1041 | :param dict bind: <charm endpoint>:<network space> pairs |
| 1042 | :param dict budget: <budget name>:<limit> pairs |
| 1043 | :param str channel: Charm store channel from which to retrieve |
| 1044 | the charm or bundle, e.g. 'edge' |
| 1045 | :param dict config: Charm configuration dictionary |
| 1046 | :param constraints: Service constraints |
| 1047 | :type constraints: :class:`juju.Constraints` |
| 1048 | :param bool force: Allow charm to be deployed to a machine running |
| 1049 | an unsupported series |
| 1050 | :param int num_units: Number of units to deploy |
| 1051 | :param str plan: Plan under which to deploy charm |
| 1052 | :param dict resources: <resource name>:<file path> pairs |
| 1053 | :param str series: Series on which to deploy |
| 1054 | :param dict storage: Storage constraints TODO how do these look? |
| 1055 | :param to: Placement directive as a string. For example: |
| 1056 | |
| 1057 | '23' - place on machine 23 |
| 1058 | 'lxd:7' - place in new lxd container on machine 7 |
| 1059 | '24/lxd/3' - place in container 3 on machine 24 |
| 1060 | |
| 1061 | If None, a new machine is provisioned. |
| 1062 | |
| 1063 | |
| 1064 | TODO:: |
| 1065 | |
| 1066 | - support local resources |
| 1067 | |
| 1068 | """ |
| 1069 | if storage: |
| 1070 | storage = { |
| 1071 | k: client.Constraints(**v) |
| 1072 | for k, v in storage.items() |
| 1073 | } |
| 1074 | |
| 1075 | is_local = ( |
| 1076 | entity_url.startswith('local:') or |
| 1077 | os.path.isdir(entity_url) |
| 1078 | ) |
| 1079 | if is_local: |
| 1080 | entity_id = entity_url.replace('local:', '') |
| 1081 | else: |
| 1082 | entity = await self.charmstore.entity(entity_url, channel=channel) |
| 1083 | entity_id = entity['Id'] |
| 1084 | |
| 1085 | client_facade = client.ClientFacade.from_connection(self.connection) |
| 1086 | |
| 1087 | is_bundle = ((is_local and |
| 1088 | (Path(entity_id) / 'bundle.yaml').exists()) or |
| 1089 | (not is_local and 'bundle/' in entity_id)) |
| 1090 | |
| 1091 | if is_bundle: |
| 1092 | handler = BundleHandler(self) |
| 1093 | await handler.fetch_plan(entity_id) |
| 1094 | await handler.execute_plan() |
| 1095 | extant_apps = {app for app in self.applications} |
| 1096 | pending_apps = set(handler.applications) - extant_apps |
| 1097 | if pending_apps: |
| 1098 | # new apps will usually be in the model by now, but if some |
| 1099 | # haven't made it yet we'll need to wait on them to be added |
| 1100 | await asyncio.gather(*[ |
| 1101 | asyncio.ensure_future( |
| 1102 | self._wait_for_new('application', app_name), |
| 1103 | loop=self.loop) |
| 1104 | for app_name in pending_apps |
| 1105 | ], loop=self.loop) |
| 1106 | return [app for name, app in self.applications.items() |
| 1107 | if name in handler.applications] |
| 1108 | else: |
| 1109 | if not is_local: |
| 1110 | if not application_name: |
| 1111 | application_name = entity['Meta']['charm-metadata']['Name'] |
| 1112 | if not series: |
| 1113 | series = self._get_series(entity_url, entity) |
| 1114 | await client_facade.AddCharm(channel, entity_id) |
| 1115 | # XXX: we're dropping local resources here, but we don't |
| 1116 | # actually support them yet anyway |
| 1117 | resources = await self._add_store_resources(application_name, |
| 1118 | entity_id, |
| 1119 | entity) |
| 1120 | else: |
| 1121 | # We have a local charm dir that needs to be uploaded |
| 1122 | charm_dir = os.path.abspath( |
| 1123 | os.path.expanduser(entity_id)) |
| 1124 | series = series or get_charm_series(charm_dir) |
| 1125 | if not series: |
| 1126 | raise JujuError( |
| 1127 | "Couldn't determine series for charm at {}. " |
| 1128 | "Pass a 'series' kwarg to Model.deploy().".format( |
| 1129 | charm_dir)) |
| 1130 | entity_id = await self.add_local_charm_dir(charm_dir, series) |
| 1131 | return await self._deploy( |
| 1132 | charm_url=entity_id, |
| 1133 | application=application_name, |
| 1134 | series=series, |
| 1135 | config=config or {}, |
| 1136 | constraints=constraints, |
| 1137 | endpoint_bindings=bind, |
| 1138 | resources=resources, |
| 1139 | storage=storage, |
| 1140 | channel=channel, |
| 1141 | num_units=num_units, |
| 1142 | placement=parse_placement(to) |
| 1143 | ) |
| 1144 | |
| 1145 | async def _add_store_resources(self, application, entity_url, entity=None): |
| 1146 | if not entity: |
| 1147 | # avoid extra charm store call if one was already made |
| 1148 | entity = await self.charmstore.entity(entity_url) |
| 1149 | resources = [ |
| 1150 | { |
| 1151 | 'description': resource['Description'], |
| 1152 | 'fingerprint': resource['Fingerprint'], |
| 1153 | 'name': resource['Name'], |
| 1154 | 'path': resource['Path'], |
| 1155 | 'revision': resource['Revision'], |
| 1156 | 'size': resource['Size'], |
| 1157 | 'type_': resource['Type'], |
| 1158 | 'origin': 'store', |
| 1159 | } for resource in entity['Meta']['resources'] |
| 1160 | ] |
| 1161 | |
| 1162 | if not resources: |
| 1163 | return None |
| 1164 | |
| 1165 | resources_facade = client.ResourcesFacade.from_connection( |
| 1166 | self.connection) |
| 1167 | response = await resources_facade.AddPendingResources( |
| 1168 | tag.application(application), |
| 1169 | entity_url, |
| 1170 | [client.CharmResource(**resource) for resource in resources]) |
| 1171 | resource_map = {resource['name']: pid |
| 1172 | for resource, pid |
| 1173 | in zip(resources, response.pending_ids)} |
| 1174 | return resource_map |
| 1175 | |
| 1176 | async def _deploy(self, charm_url, application, series, config, |
| 1177 | constraints, endpoint_bindings, resources, storage, |
| 1178 | channel=None, num_units=None, placement=None): |
| 1179 | """Logic shared between `Model.deploy` and `BundleHandler.deploy`. |
| 1180 | """ |
| 1181 | log.info('Deploying %s', charm_url) |
| 1182 | |
| 1183 | # stringify all config values for API, and convert to YAML |
| 1184 | config = {k: str(v) for k, v in config.items()} |
| 1185 | config = yaml.dump({application: config}, |
| 1186 | default_flow_style=False) |
| 1187 | |
| 1188 | app_facade = client.ApplicationFacade.from_connection( |
| 1189 | self.connection) |
| 1190 | |
| 1191 | app = client.ApplicationDeploy( |
| 1192 | charm_url=charm_url, |
| 1193 | application=application, |
| 1194 | series=series, |
| 1195 | channel=channel, |
| 1196 | config_yaml=config, |
| 1197 | constraints=parse_constraints(constraints), |
| 1198 | endpoint_bindings=endpoint_bindings, |
| 1199 | num_units=num_units, |
| 1200 | resources=resources, |
| 1201 | storage=storage, |
| 1202 | placement=placement |
| 1203 | ) |
| 1204 | |
| 1205 | result = await app_facade.Deploy([app]) |
| 1206 | errors = [r.error.message for r in result.results if r.error] |
| 1207 | if errors: |
| 1208 | raise JujuError('\n'.join(errors)) |
| 1209 | return await self._wait_for_new('application', application) |
| 1210 | |
| 1211 | async def destroy(self): |
| 1212 | """Terminate all machines and resources for this model. |
| 1213 | Is already implemented in controller.py. |
| 1214 | """ |
| 1215 | raise NotImplementedError() |
| 1216 | |
| 1217 | async def destroy_unit(self, *unit_names): |
| 1218 | """Destroy units by name. |
| 1219 | |
| 1220 | """ |
| 1221 | app_facade = client.ApplicationFacade.from_connection(self.connection) |
| 1222 | |
| 1223 | log.debug( |
| 1224 | 'Destroying unit%s %s', |
| 1225 | 's' if len(unit_names) == 1 else '', |
| 1226 | ' '.join(unit_names)) |
| 1227 | |
| 1228 | return await app_facade.DestroyUnits(list(unit_names)) |
| 1229 | destroy_units = destroy_unit |
| 1230 | |
| 1231 | def get_backup(self, archive_id): |
| 1232 | """Download a backup archive file. |
| 1233 | |
| 1234 | :param str archive_id: The id of the archive to download |
| 1235 | :return str: Path to the archive file |
| 1236 | |
| 1237 | """ |
| 1238 | raise NotImplementedError() |
| 1239 | |
| 1240 | def enable_ha( |
| 1241 | self, num_controllers=0, constraints=None, series=None, to=None): |
| 1242 | """Ensure sufficient controllers exist to provide redundancy. |
| 1243 | |
| 1244 | :param int num_controllers: Number of controllers to make available |
| 1245 | :param constraints: Constraints to apply to the controller machines |
| 1246 | :type constraints: :class:`juju.Constraints` |
| 1247 | :param str series: Series of the controller machines |
| 1248 | :param list to: Placement directives for controller machines, e.g.:: |
| 1249 | |
| 1250 | '23' - machine 23 |
| 1251 | 'lxc:7' - new lxc container on machine 7 |
| 1252 | '24/lxc/3' - lxc container 3 or machine 24 |
| 1253 | |
| 1254 | If None, a new machine is provisioned. |
| 1255 | |
| 1256 | """ |
| 1257 | raise NotImplementedError() |
| 1258 | |
| 1259 | async def get_config(self): |
| 1260 | """Return the configuration settings for this model. |
| 1261 | |
| 1262 | :returns: A ``dict`` mapping keys to `ConfigValue` instances, |
| 1263 | which have `source` and `value` attributes. |
| 1264 | """ |
| 1265 | config_facade = client.ModelConfigFacade.from_connection( |
| 1266 | self.connection |
| 1267 | ) |
| 1268 | result = await config_facade.ModelGet() |
| 1269 | config = result.config |
| 1270 | for key, value in config.items(): |
| 1271 | config[key] = ConfigValue.from_json(value) |
| 1272 | return config |
| 1273 | |
| 1274 | def get_constraints(self): |
| 1275 | """Return the machine constraints for this model. |
| 1276 | |
| 1277 | """ |
| 1278 | raise NotImplementedError() |
| 1279 | |
| 1280 | async def grant(self, username, acl='read'): |
| 1281 | """Grant a user access to this model. |
| 1282 | |
| 1283 | :param str username: Username |
| 1284 | :param str acl: Access control ('read' or 'write') |
| 1285 | |
| 1286 | """ |
| 1287 | controller_conn = await self.connection.controller() |
| 1288 | model_facade = client.ModelManagerFacade.from_connection( |
| 1289 | controller_conn) |
| 1290 | user = tag.user(username) |
| 1291 | model = tag.model(self.info.uuid) |
| 1292 | changes = client.ModifyModelAccess(acl, 'grant', model, user) |
| 1293 | await self.revoke(username) |
| 1294 | return await model_facade.ModifyModelAccess([changes]) |
| 1295 | |
| 1296 | def import_ssh_key(self, identity): |
| 1297 | """Add a public SSH key from a trusted indentity source to this model. |
| 1298 | |
| 1299 | :param str identity: User identity in the form <lp|gh>:<username> |
| 1300 | |
| 1301 | """ |
| 1302 | raise NotImplementedError() |
| 1303 | import_ssh_keys = import_ssh_key |
| 1304 | |
| 1305 | async def get_machines(self): |
| 1306 | """Return list of machines in this model. |
| 1307 | |
| 1308 | """ |
| 1309 | return list(self.state.machines.keys()) |
| 1310 | |
| 1311 | def get_shares(self): |
| 1312 | """Return list of all users with access to this model. |
| 1313 | |
| 1314 | """ |
| 1315 | raise NotImplementedError() |
| 1316 | |
| 1317 | def get_spaces(self): |
| 1318 | """Return list of all known spaces, including associated subnets. |
| 1319 | |
| 1320 | """ |
| 1321 | raise NotImplementedError() |
| 1322 | |
| 1323 | async def get_ssh_key(self, raw_ssh=False): |
| 1324 | """Return known SSH keys for this model. |
| 1325 | :param bool raw_ssh: if True, returns the raw ssh key, |
| 1326 | else it's fingerprint |
| 1327 | |
| 1328 | """ |
| 1329 | key_facade = client.KeyManagerFacade.from_connection(self.connection) |
| 1330 | entity = {'tag': tag.model(self.info.uuid)} |
| 1331 | entities = client.Entities([entity]) |
| 1332 | return await key_facade.ListKeys(entities, raw_ssh) |
| 1333 | get_ssh_keys = get_ssh_key |
| 1334 | |
| 1335 | def get_storage(self, filesystem=False, volume=False): |
| 1336 | """Return details of storage instances. |
| 1337 | |
| 1338 | :param bool filesystem: Include filesystem storage |
| 1339 | :param bool volume: Include volume storage |
| 1340 | |
| 1341 | """ |
| 1342 | raise NotImplementedError() |
| 1343 | |
| 1344 | def get_storage_pools(self, names=None, providers=None): |
| 1345 | """Return list of storage pools. |
| 1346 | |
| 1347 | :param list names: Only include pools with these names |
| 1348 | :param list providers: Only include pools for these providers |
| 1349 | |
| 1350 | """ |
| 1351 | raise NotImplementedError() |
| 1352 | |
| 1353 | def get_subnets(self, space=None, zone=None): |
| 1354 | """Return list of known subnets. |
| 1355 | |
| 1356 | :param str space: Only include subnets in this space |
| 1357 | :param str zone: Only include subnets in this zone |
| 1358 | |
| 1359 | """ |
| 1360 | raise NotImplementedError() |
| 1361 | |
| 1362 | def remove_blocks(self): |
| 1363 | """Remove all blocks from this model. |
| 1364 | |
| 1365 | """ |
| 1366 | raise NotImplementedError() |
| 1367 | |
| 1368 | def remove_backup(self, backup_id): |
| 1369 | """Delete a backup. |
| 1370 | |
| 1371 | :param str backup_id: The id of the backup to remove |
| 1372 | |
| 1373 | """ |
| 1374 | raise NotImplementedError() |
| 1375 | |
| 1376 | def remove_cached_images(self, arch=None, kind=None, series=None): |
| 1377 | """Remove cached OS images. |
| 1378 | |
| 1379 | :param str arch: Architecture of the images to remove |
| 1380 | :param str kind: Image kind to remove, e.g. 'lxd' |
| 1381 | :param str series: Image series to remove, e.g. 'xenial' |
| 1382 | |
| 1383 | """ |
| 1384 | raise NotImplementedError() |
| 1385 | |
| 1386 | def remove_machine(self, *machine_ids): |
| 1387 | """Remove a machine from this model. |
| 1388 | |
| 1389 | :param str \*machine_ids: Ids of the machines to remove |
| 1390 | |
| 1391 | """ |
| 1392 | raise NotImplementedError() |
| 1393 | remove_machines = remove_machine |
| 1394 | |
| 1395 | async def remove_ssh_key(self, user, key): |
| 1396 | """Remove a public SSH key(s) from this model. |
| 1397 | |
| 1398 | :param str key: Full ssh key |
| 1399 | :param str user: Juju user to which the key is registered |
| 1400 | |
| 1401 | """ |
| 1402 | key_facade = client.KeyManagerFacade.from_connection(self.connection) |
| 1403 | key = base64.b64decode(bytes(key.strip().split()[1].encode('ascii'))) |
| 1404 | key = hashlib.md5(key).hexdigest() |
| 1405 | key = ':'.join(a+b for a, b in zip(key[::2], key[1::2])) |
| 1406 | await key_facade.DeleteKeys([key], user) |
| 1407 | remove_ssh_keys = remove_ssh_key |
| 1408 | |
| 1409 | def restore_backup( |
| 1410 | self, bootstrap=False, constraints=None, archive=None, |
| 1411 | backup_id=None, upload_tools=False): |
| 1412 | """Restore a backup archive to a new controller. |
| 1413 | |
| 1414 | :param bool bootstrap: Bootstrap a new state machine |
| 1415 | :param constraints: Model constraints |
| 1416 | :type constraints: :class:`juju.Constraints` |
| 1417 | :param str archive: Path to backup archive to restore |
| 1418 | :param str backup_id: Id of backup to restore |
| 1419 | :param bool upload_tools: Upload tools if bootstrapping a new machine |
| 1420 | |
| 1421 | """ |
| 1422 | raise NotImplementedError() |
| 1423 | |
| 1424 | def retry_provisioning(self): |
| 1425 | """Retry provisioning for failed machines. |
| 1426 | |
| 1427 | """ |
| 1428 | raise NotImplementedError() |
| 1429 | |
| 1430 | async def revoke(self, username): |
| 1431 | """Revoke a user's access to this model. |
| 1432 | |
| 1433 | :param str username: Username to revoke |
| 1434 | |
| 1435 | """ |
| 1436 | controller_conn = await self.connection.controller() |
| 1437 | model_facade = client.ModelManagerFacade.from_connection( |
| 1438 | controller_conn) |
| 1439 | user = tag.user(username) |
| 1440 | model = tag.model(self.info.uuid) |
| 1441 | changes = client.ModifyModelAccess('read', 'revoke', model, user) |
| 1442 | return await model_facade.ModifyModelAccess([changes]) |
| 1443 | |
| 1444 | def run(self, command, timeout=None): |
| 1445 | """Run command on all machines in this model. |
| 1446 | |
| 1447 | :param str command: The command to run |
| 1448 | :param int timeout: Time to wait before command is considered failed |
| 1449 | |
| 1450 | """ |
| 1451 | raise NotImplementedError() |
| 1452 | |
| 1453 | async def set_config(self, config): |
| 1454 | """Set configuration keys on this model. |
| 1455 | |
| 1456 | :param dict config: Mapping of config keys to either string values or |
| 1457 | `ConfigValue` instances, as returned by `get_config`. |
| 1458 | """ |
| 1459 | config_facade = client.ModelConfigFacade.from_connection( |
| 1460 | self.connection |
| 1461 | ) |
| 1462 | for key, value in config.items(): |
| 1463 | if isinstance(value, ConfigValue): |
| 1464 | config[key] = value.value |
| 1465 | await config_facade.ModelSet(config) |
| 1466 | |
| 1467 | def set_constraints(self, constraints): |
| 1468 | """Set machine constraints on this model. |
| 1469 | |
| 1470 | :param :class:`juju.Constraints` constraints: Machine constraints |
| 1471 | |
| 1472 | """ |
| 1473 | raise NotImplementedError() |
| 1474 | |
| 1475 | def get_action_output(self, action_uuid, wait=-1): |
| 1476 | """Get the results of an action by ID. |
| 1477 | |
| 1478 | :param str action_uuid: Id of the action |
| 1479 | :param int wait: Time in seconds to wait for action to complete |
| 1480 | |
| 1481 | """ |
| 1482 | raise NotImplementedError() |
| 1483 | |
| 1484 | def get_action_status(self, uuid_or_prefix=None, name=None): |
| 1485 | """Get the status of all actions, filtered by ID, ID prefix, or action name. |
| 1486 | |
| 1487 | :param str uuid_or_prefix: Filter by action uuid or prefix |
| 1488 | :param str name: Filter by action name |
| 1489 | |
| 1490 | """ |
| 1491 | raise NotImplementedError() |
| 1492 | |
| 1493 | def get_budget(self, budget_name): |
| 1494 | """Get budget usage info. |
| 1495 | |
| 1496 | :param str budget_name: Name of budget |
| 1497 | |
| 1498 | """ |
| 1499 | raise NotImplementedError() |
| 1500 | |
| 1501 | async def get_status(self, filters=None, utc=False): |
| 1502 | """Return the status of the model. |
| 1503 | |
| 1504 | :param str filters: Optional list of applications, units, or machines |
| 1505 | to include, which can use wildcards ('*'). |
| 1506 | :param bool utc: Display time as UTC in RFC3339 format |
| 1507 | |
| 1508 | """ |
| 1509 | client_facade = client.ClientFacade.from_connection(self.connection) |
| 1510 | return await client_facade.FullStatus(filters) |
| 1511 | |
| 1512 | def sync_tools( |
| 1513 | self, all_=False, destination=None, dry_run=False, public=False, |
| 1514 | source=None, stream=None, version=None): |
| 1515 | """Copy Juju tools into this model. |
| 1516 | |
| 1517 | :param bool all_: Copy all versions, not just the latest |
| 1518 | :param str destination: Path to local destination directory |
| 1519 | :param bool dry_run: Don't do the actual copy |
| 1520 | :param bool public: Tools are for a public cloud, so generate mirrors |
| 1521 | information |
| 1522 | :param str source: Path to local source directory |
| 1523 | :param str stream: Simplestreams stream for which to sync metadata |
| 1524 | :param str version: Copy a specific major.minor version |
| 1525 | |
| 1526 | """ |
| 1527 | raise NotImplementedError() |
| 1528 | |
| 1529 | def unblock(self, *commands): |
| 1530 | """Unblock an operation that would alter this model. |
| 1531 | |
| 1532 | :param str \*commands: The commands to unblock. Valid values are |
| 1533 | 'all-changes', 'destroy-model', 'remove-object' |
| 1534 | |
| 1535 | """ |
| 1536 | raise NotImplementedError() |
| 1537 | |
| 1538 | def unset_config(self, *keys): |
| 1539 | """Unset configuration on this model. |
| 1540 | |
| 1541 | :param str \*keys: The keys to unset |
| 1542 | |
| 1543 | """ |
| 1544 | raise NotImplementedError() |
| 1545 | |
| 1546 | def upgrade_gui(self): |
| 1547 | """Upgrade the Juju GUI for this model. |
| 1548 | |
| 1549 | """ |
| 1550 | raise NotImplementedError() |
| 1551 | |
| 1552 | def upgrade_juju( |
| 1553 | self, dry_run=False, reset_previous_upgrade=False, |
| 1554 | upload_tools=False, version=None): |
| 1555 | """Upgrade Juju on all machines in a model. |
| 1556 | |
| 1557 | :param bool dry_run: Don't do the actual upgrade |
| 1558 | :param bool reset_previous_upgrade: Clear the previous (incomplete) |
| 1559 | upgrade status |
| 1560 | :param bool upload_tools: Upload local version of tools |
| 1561 | :param str version: Upgrade to a specific version |
| 1562 | |
| 1563 | """ |
| 1564 | raise NotImplementedError() |
| 1565 | |
| 1566 | def upload_backup(self, archive_path): |
| 1567 | """Store a backup archive remotely in Juju. |
| 1568 | |
| 1569 | :param str archive_path: Path to local archive |
| 1570 | |
| 1571 | """ |
| 1572 | raise NotImplementedError() |
| 1573 | |
| 1574 | @property |
| 1575 | def charmstore(self): |
| 1576 | return self._charmstore |
| 1577 | |
| 1578 | async def get_metrics(self, *tags): |
| 1579 | """Retrieve metrics. |
| 1580 | |
| 1581 | :param str \*tags: Tags of entities from which to retrieve metrics. |
| 1582 | No tags retrieves the metrics of all units in the model. |
| 1583 | :return: Dictionary of unit_name:metrics |
| 1584 | |
| 1585 | """ |
| 1586 | log.debug("Retrieving metrics for %s", |
| 1587 | ', '.join(tags) if tags else "all units") |
| 1588 | |
| 1589 | metrics_facade = client.MetricsDebugFacade.from_connection( |
| 1590 | self.connection) |
| 1591 | |
| 1592 | entities = [client.Entity(tag) for tag in tags] |
| 1593 | metrics_result = await metrics_facade.GetMetrics(entities) |
| 1594 | |
| 1595 | metrics = collections.defaultdict(list) |
| 1596 | |
| 1597 | for entity_metrics in metrics_result.results: |
| 1598 | error = entity_metrics.error |
| 1599 | if error: |
| 1600 | if "is not a valid tag" in error: |
| 1601 | raise ValueError(error.message) |
| 1602 | else: |
| 1603 | raise Exception(error.message) |
| 1604 | |
| 1605 | for metric in entity_metrics.metrics: |
| 1606 | metrics[metric.unit].append(vars(metric)) |
| 1607 | |
| 1608 | return metrics |
| 1609 | |
| 1610 | |
| 1611 | def get_charm_series(path): |
| 1612 | """Inspects the charm directory at ``path`` and returns a default |
| 1613 | series from its metadata.yaml (the first item in the 'series' list). |
| 1614 | |
| 1615 | Returns None if no series can be determined. |
| 1616 | |
| 1617 | """ |
| 1618 | md = Path(path) / "metadata.yaml" |
| 1619 | if not md.exists(): |
| 1620 | return None |
| 1621 | data = yaml.load(md.open()) |
| 1622 | series = data.get('series') |
| 1623 | return series[0] if series else None |
| 1624 | |
| 1625 | |
| 1626 | class BundleHandler(object): |
| 1627 | """ |
| 1628 | Handle bundles by using the API to translate bundle YAML into a plan of |
| 1629 | steps and then dispatching each of those using the API. |
| 1630 | """ |
| 1631 | def __init__(self, model): |
| 1632 | self.model = model |
| 1633 | self.charmstore = model.charmstore |
| 1634 | self.plan = [] |
| 1635 | self.references = {} |
| 1636 | self._units_by_app = {} |
| 1637 | for unit_name, unit in model.units.items(): |
| 1638 | app_units = self._units_by_app.setdefault(unit.application, []) |
| 1639 | app_units.append(unit_name) |
| 1640 | self.client_facade = client.ClientFacade.from_connection( |
| 1641 | model.connection) |
| 1642 | self.app_facade = client.ApplicationFacade.from_connection( |
| 1643 | model.connection) |
| 1644 | self.ann_facade = client.AnnotationsFacade.from_connection( |
| 1645 | model.connection) |
| 1646 | |
| 1647 | async def _handle_local_charms(self, bundle): |
| 1648 | """Search for references to local charms (i.e. filesystem paths) |
| 1649 | in the bundle. Upload the local charms to the model, and replace |
| 1650 | the filesystem paths with appropriate 'local:' paths in the bundle. |
| 1651 | |
| 1652 | Return the modified bundle. |
| 1653 | |
| 1654 | :param dict bundle: Bundle dictionary |
| 1655 | :return: Modified bundle dictionary |
| 1656 | |
| 1657 | """ |
| 1658 | apps, args = [], [] |
| 1659 | |
| 1660 | default_series = bundle.get('series') |
| 1661 | for app_name in self.applications: |
| 1662 | app_dict = bundle['services'][app_name] |
| 1663 | charm_dir = os.path.abspath(os.path.expanduser(app_dict['charm'])) |
| 1664 | if not os.path.isdir(charm_dir): |
| 1665 | continue |
| 1666 | series = ( |
| 1667 | app_dict.get('series') or |
| 1668 | default_series or |
| 1669 | get_charm_series(charm_dir) |
| 1670 | ) |
| 1671 | if not series: |
| 1672 | raise JujuError( |
| 1673 | "Couldn't determine series for charm at {}. " |
| 1674 | "Add a 'series' key to the bundle.".format(charm_dir)) |
| 1675 | |
| 1676 | # Keep track of what we need to update. We keep a list of apps |
| 1677 | # that need to be updated, and a corresponding list of args |
| 1678 | # needed to update those apps. |
| 1679 | apps.append(app_name) |
| 1680 | args.append((charm_dir, series)) |
| 1681 | |
| 1682 | if apps: |
| 1683 | # If we have apps to update, spawn all the coroutines concurrently |
| 1684 | # and wait for them to finish. |
| 1685 | charm_urls = await asyncio.gather(*[ |
| 1686 | self.model.add_local_charm_dir(*params) |
| 1687 | for params in args |
| 1688 | ], loop=self.model.loop) |
| 1689 | # Update the 'charm:' entry for each app with the new 'local:' url. |
| 1690 | for app_name, charm_url in zip(apps, charm_urls): |
| 1691 | bundle['services'][app_name]['charm'] = charm_url |
| 1692 | |
| 1693 | return bundle |
| 1694 | |
| 1695 | async def fetch_plan(self, entity_id): |
| 1696 | is_local = not entity_id.startswith('cs:') and os.path.isdir(entity_id) |
| 1697 | if is_local: |
| 1698 | bundle_yaml = (Path(entity_id) / "bundle.yaml").read_text() |
| 1699 | else: |
| 1700 | bundle_yaml = await self.charmstore.files(entity_id, |
| 1701 | filename='bundle.yaml', |
| 1702 | read_file=True) |
| 1703 | self.bundle = yaml.safe_load(bundle_yaml) |
| 1704 | self.bundle = await self._handle_local_charms(self.bundle) |
| 1705 | |
| 1706 | self.plan = await self.client_facade.GetBundleChanges( |
| 1707 | yaml.dump(self.bundle)) |
| 1708 | |
| 1709 | async def execute_plan(self): |
| 1710 | for step in self.plan.changes: |
| 1711 | method = getattr(self, step.method) |
| 1712 | result = await method(*step.args) |
| 1713 | self.references[step.id_] = result |
| 1714 | |
| 1715 | @property |
| 1716 | def applications(self): |
| 1717 | return list(self.bundle['services'].keys()) |
| 1718 | |
| 1719 | def resolve(self, reference): |
| 1720 | if reference and reference.startswith('$'): |
| 1721 | reference = self.references[reference[1:]] |
| 1722 | return reference |
| 1723 | |
| 1724 | async def addCharm(self, charm, series): |
| 1725 | """ |
| 1726 | :param charm string: |
| 1727 | Charm holds the URL of the charm to be added. |
| 1728 | |
| 1729 | :param series string: |
| 1730 | Series holds the series of the charm to be added |
| 1731 | if the charm default is not sufficient. |
| 1732 | """ |
| 1733 | # We don't add local charms because they've already been added |
| 1734 | # by self._handle_local_charms |
| 1735 | if charm.startswith('local:'): |
| 1736 | return charm |
| 1737 | |
| 1738 | entity_id = await self.charmstore.entityId(charm) |
| 1739 | log.debug('Adding %s', entity_id) |
| 1740 | await self.client_facade.AddCharm(None, entity_id) |
| 1741 | return entity_id |
| 1742 | |
| 1743 | async def addMachines(self, params=None): |
| 1744 | """ |
| 1745 | :param params dict: |
| 1746 | Dictionary specifying the machine to add. All keys are optional. |
| 1747 | Keys include: |
| 1748 | |
| 1749 | series: string specifying the machine OS series. |
| 1750 | |
| 1751 | constraints: string holding machine constraints, if any. We'll |
| 1752 | parse this into the json friendly dict that the juju api |
| 1753 | expects. |
| 1754 | |
| 1755 | container_type: string holding the type of the container (for |
| 1756 | instance ""lxd" or kvm"). It is not specified for top level |
| 1757 | machines. |
| 1758 | |
| 1759 | parent_id: string holding a placeholder pointing to another |
| 1760 | machine change or to a unit change. This value is only |
| 1761 | specified in the case this machine is a container, in |
| 1762 | which case also ContainerType is set. |
| 1763 | |
| 1764 | """ |
| 1765 | params = params or {} |
| 1766 | |
| 1767 | # Normalize keys |
| 1768 | params = {normalize_key(k): params[k] for k in params.keys()} |
| 1769 | |
| 1770 | # Fix up values, as necessary. |
| 1771 | if 'parent_id' in params: |
| 1772 | params['parent_id'] = self.resolve(params['parent_id']) |
| 1773 | |
| 1774 | params['constraints'] = parse_constraints( |
| 1775 | params.get('constraints')) |
| 1776 | params['jobs'] = params.get('jobs', ['JobHostUnits']) |
| 1777 | |
| 1778 | if params.get('container_type') == 'lxc': |
| 1779 | log.warning('Juju 2.0 does not support lxc containers. ' |
| 1780 | 'Converting containers to lxd.') |
| 1781 | params['container_type'] = 'lxd' |
| 1782 | |
| 1783 | # Submit the request. |
| 1784 | params = client.AddMachineParams(**params) |
| 1785 | results = await self.client_facade.AddMachines([params]) |
| 1786 | error = results.machines[0].error |
| 1787 | if error: |
| 1788 | raise ValueError("Error adding machine: %s" % error.message) |
| 1789 | machine = results.machines[0].machine |
| 1790 | log.debug('Added new machine %s', machine) |
| 1791 | return machine |
| 1792 | |
| 1793 | async def addRelation(self, endpoint1, endpoint2): |
| 1794 | """ |
| 1795 | :param endpoint1 string: |
| 1796 | :param endpoint2 string: |
| 1797 | Endpoint1 and Endpoint2 hold relation endpoints in the |
| 1798 | "application:interface" form, where the application is always a |
| 1799 | placeholder pointing to an application change, and the interface is |
| 1800 | optional. Examples are "$deploy-42:web" or just "$deploy-42". |
| 1801 | """ |
| 1802 | endpoints = [endpoint1, endpoint2] |
| 1803 | # resolve indirect references |
| 1804 | for i in range(len(endpoints)): |
| 1805 | parts = endpoints[i].split(':') |
| 1806 | parts[0] = self.resolve(parts[0]) |
| 1807 | endpoints[i] = ':'.join(parts) |
| 1808 | |
| 1809 | log.info('Relating %s <-> %s', *endpoints) |
| 1810 | return await self.model.add_relation(*endpoints) |
| 1811 | |
| 1812 | async def deploy(self, charm, series, application, options, constraints, |
| 1813 | storage, endpoint_bindings, resources): |
| 1814 | """ |
| 1815 | :param charm string: |
| 1816 | Charm holds the URL of the charm to be used to deploy this |
| 1817 | application. |
| 1818 | |
| 1819 | :param series string: |
| 1820 | Series holds the series of the application to be deployed |
| 1821 | if the charm default is not sufficient. |
| 1822 | |
| 1823 | :param application string: |
| 1824 | Application holds the application name. |
| 1825 | |
| 1826 | :param options map[string]interface{}: |
| 1827 | Options holds application options. |
| 1828 | |
| 1829 | :param constraints string: |
| 1830 | Constraints holds the optional application constraints. |
| 1831 | |
| 1832 | :param storage map[string]string: |
| 1833 | Storage holds the optional storage constraints. |
| 1834 | |
| 1835 | :param endpoint_bindings map[string]string: |
| 1836 | EndpointBindings holds the optional endpoint bindings |
| 1837 | |
| 1838 | :param resources map[string]int: |
| 1839 | Resources identifies the revision to use for each resource |
| 1840 | of the application's charm. |
| 1841 | """ |
| 1842 | # resolve indirect references |
| 1843 | charm = self.resolve(charm) |
| 1844 | # the bundle plan doesn't actually do anything with resources, even |
| 1845 | # though it ostensibly gives us something (None) for that param |
| 1846 | if not charm.startswith('local:'): |
| 1847 | resources = await self.model._add_store_resources(application, |
| 1848 | charm) |
| 1849 | await self.model._deploy( |
| 1850 | charm_url=charm, |
| 1851 | application=application, |
| 1852 | series=series, |
| 1853 | config=options, |
| 1854 | constraints=constraints, |
| 1855 | endpoint_bindings=endpoint_bindings, |
| 1856 | resources=resources, |
| 1857 | storage=storage, |
| 1858 | ) |
| 1859 | return application |
| 1860 | |
| 1861 | async def addUnit(self, application, to): |
| 1862 | """ |
| 1863 | :param application string: |
| 1864 | Application holds the application placeholder name for which a unit |
| 1865 | is added. |
| 1866 | |
| 1867 | :param to string: |
| 1868 | To holds the optional location where to add the unit, as a |
| 1869 | placeholder pointing to another unit change or to a machine change. |
| 1870 | """ |
| 1871 | application = self.resolve(application) |
| 1872 | placement = self.resolve(to) |
| 1873 | if self._units_by_app.get(application): |
| 1874 | # enough units for this application already exist; |
| 1875 | # claim one, and carry on |
| 1876 | # NB: this should probably honor placement, but the juju client |
| 1877 | # doesn't, so we're not bothering, either |
| 1878 | unit_name = self._units_by_app[application].pop() |
| 1879 | log.debug('Reusing unit %s for %s', unit_name, application) |
| 1880 | return self.model.units[unit_name] |
| 1881 | |
| 1882 | log.debug('Adding new unit for %s%s', application, |
| 1883 | ' to %s' % placement if placement else '') |
| 1884 | return await self.model.applications[application].add_unit( |
| 1885 | count=1, |
| 1886 | to=placement, |
| 1887 | ) |
| 1888 | |
| 1889 | async def expose(self, application): |
| 1890 | """ |
| 1891 | :param application string: |
| 1892 | Application holds the placeholder name of the application that must |
| 1893 | be exposed. |
| 1894 | """ |
| 1895 | application = self.resolve(application) |
| 1896 | log.info('Exposing %s', application) |
| 1897 | return await self.model.applications[application].expose() |
| 1898 | |
| 1899 | async def setAnnotations(self, id_, entity_type, annotations): |
| 1900 | """ |
| 1901 | :param id_ string: |
| 1902 | Id is the placeholder for the application or machine change |
| 1903 | corresponding to the entity to be annotated. |
| 1904 | |
| 1905 | :param entity_type EntityType: |
| 1906 | EntityType holds the type of the entity, "application" or |
| 1907 | "machine". |
| 1908 | |
| 1909 | :param annotations map[string]string: |
| 1910 | Annotations holds the annotations as key/value pairs. |
| 1911 | """ |
| 1912 | entity_id = self.resolve(id_) |
| 1913 | try: |
| 1914 | entity = self.model.state.get_entity(entity_type, entity_id) |
| 1915 | except KeyError: |
| 1916 | entity = await self.model._wait_for_new(entity_type, entity_id) |
| 1917 | return await entity.set_annotations(annotations) |
| 1918 | |
| 1919 | |
| 1920 | class CharmStore(object): |
| 1921 | """ |
| 1922 | Async wrapper around theblues.charmstore.CharmStore |
| 1923 | """ |
| 1924 | def __init__(self, loop): |
| 1925 | self.loop = loop |
| 1926 | self._cs = theblues.charmstore.CharmStore(timeout=5) |
| 1927 | |
| 1928 | def __getattr__(self, name): |
| 1929 | """ |
| 1930 | Wrap method calls in coroutines that use run_in_executor to make them |
| 1931 | async. |
| 1932 | """ |
| 1933 | attr = getattr(self._cs, name) |
| 1934 | if not callable(attr): |
| 1935 | wrapper = partial(getattr, self._cs, name) |
| 1936 | setattr(self, name, wrapper) |
| 1937 | else: |
| 1938 | async def coro(*args, **kwargs): |
| 1939 | method = partial(attr, *args, **kwargs) |
| 1940 | for attempt in range(1, 4): |
| 1941 | try: |
| 1942 | return await self.loop.run_in_executor(None, method) |
| 1943 | except theblues.errors.ServerError: |
| 1944 | if attempt == 3: |
| 1945 | raise |
| 1946 | await asyncio.sleep(1, loop=self.loop) |
| 1947 | setattr(self, name, coro) |
| 1948 | wrapper = coro |
| 1949 | return wrapper |
| 1950 | |
| 1951 | |
| 1952 | class CharmArchiveGenerator(object): |
| 1953 | """ |
| 1954 | Create a Zip archive of a local charm directory for upload to a controller. |
| 1955 | |
| 1956 | This is used automatically by |
| 1957 | `Model.add_local_charm_dir <#juju.model.Model.add_local_charm_dir>`_. |
| 1958 | """ |
| 1959 | def __init__(self, path): |
| 1960 | self.path = os.path.abspath(os.path.expanduser(path)) |
| 1961 | |
| 1962 | def make_archive(self, path): |
| 1963 | """Create archive of directory and write to ``path``. |
| 1964 | |
| 1965 | :param path: Path to archive |
| 1966 | |
| 1967 | Ignored:: |
| 1968 | |
| 1969 | * build/\* - This is used for packing the charm itself and any |
| 1970 | similar tasks. |
| 1971 | * \*/.\* - Hidden files are all ignored for now. This will most |
| 1972 | likely be changed into a specific ignore list |
| 1973 | (.bzr, etc) |
| 1974 | |
| 1975 | """ |
| 1976 | zf = zipfile.ZipFile(path, 'w', zipfile.ZIP_DEFLATED) |
| 1977 | for dirpath, dirnames, filenames in os.walk(self.path): |
| 1978 | relative_path = dirpath[len(self.path) + 1:] |
| 1979 | if relative_path and not self._ignore(relative_path): |
| 1980 | zf.write(dirpath, relative_path) |
| 1981 | for name in filenames: |
| 1982 | archive_name = os.path.join(relative_path, name) |
| 1983 | if not self._ignore(archive_name): |
| 1984 | real_path = os.path.join(dirpath, name) |
| 1985 | self._check_type(real_path) |
| 1986 | if os.path.islink(real_path): |
| 1987 | self._check_link(real_path) |
| 1988 | self._write_symlink( |
| 1989 | zf, os.readlink(real_path), archive_name) |
| 1990 | else: |
| 1991 | zf.write(real_path, archive_name) |
| 1992 | zf.close() |
| 1993 | return path |
| 1994 | |
| 1995 | def _check_type(self, path): |
| 1996 | """Check the path |
| 1997 | """ |
| 1998 | s = os.stat(path) |
| 1999 | if stat.S_ISDIR(s.st_mode) or stat.S_ISREG(s.st_mode): |
| 2000 | return path |
| 2001 | raise ValueError("Invalid Charm at % %s" % ( |
| 2002 | path, "Invalid file type for a charm")) |
| 2003 | |
| 2004 | def _check_link(self, path): |
| 2005 | link_path = os.readlink(path) |
| 2006 | if link_path[0] == "/": |
| 2007 | raise ValueError( |
| 2008 | "Invalid Charm at %s: %s" % ( |
| 2009 | path, "Absolute links are invalid")) |
| 2010 | path_dir = os.path.dirname(path) |
| 2011 | link_path = os.path.join(path_dir, link_path) |
| 2012 | if not link_path.startswith(os.path.abspath(self.path)): |
| 2013 | raise ValueError( |
| 2014 | "Invalid charm at %s %s" % ( |
| 2015 | path, "Only internal symlinks are allowed")) |
| 2016 | |
| 2017 | def _write_symlink(self, zf, link_target, link_path): |
| 2018 | """Package symlinks with appropriate zipfile metadata.""" |
| 2019 | info = zipfile.ZipInfo() |
| 2020 | info.filename = link_path |
| 2021 | info.create_system = 3 |
| 2022 | # Magic code for symlinks / py2/3 compat |
| 2023 | # 27166663808 = (stat.S_IFLNK | 0755) << 16 |
| 2024 | info.external_attr = 2716663808 |
| 2025 | zf.writestr(info, link_target) |
| 2026 | |
| 2027 | def _ignore(self, path): |
| 2028 | if path == "build" or path.startswith("build/"): |
| 2029 | return True |
| 2030 | if path.startswith('.'): |
| 2031 | return True |