[WIP] Multi-vdu, multi-charm support
[osm/N2VC.git] / tests / base.py
1 #!/usr/bin/env python3
2 import asyncio
3 import functools
4
5 import logging
6 import n2vc.vnf
7 import pylxd
8 import pytest
9 import os
10 import shlex
11 import shutil
12 import subprocess
13 import tempfile
14 import time
15 import uuid
16 import yaml
17
18 from juju.controller import Controller
19
20 # Disable InsecureRequestWarning w/LXD
21 import urllib3
22 urllib3.disable_warnings()
23 logging.getLogger("urllib3").setLevel(logging.WARNING)
24
25 here = os.path.dirname(os.path.realpath(__file__))
26
27
28 def is_bootstrapped():
29 result = subprocess.run(['juju', 'switch'], stdout=subprocess.PIPE)
30 return (
31 result.returncode == 0 and
32 len(result.stdout.decode().strip()) > 0)
33
34
35 bootstrapped = pytest.mark.skipif(
36 not is_bootstrapped(),
37 reason='bootstrapped Juju environment required')
38
39
40 class CleanController():
41 """
42 Context manager that automatically connects and disconnects from
43 the currently active controller.
44
45 Note: Unlike CleanModel, this will not create a new controller for you,
46 and an active controller must already be available.
47 """
48 def __init__(self):
49 self._controller = None
50
51 async def __aenter__(self):
52 self._controller = Controller()
53 await self._controller.connect()
54 return self._controller
55
56 async def __aexit__(self, exc_type, exc, tb):
57 await self._controller.disconnect()
58
59
60 def get_charm_path():
61 return "{}/charms".format(here)
62
63
64 def get_layer_path():
65 return "{}/charms/layers".format(here)
66
67
68 def parse_metrics(application, results):
69 """Parse the returned metrics into a dict."""
70
71 # We'll receive the results for all units, to look for the one we want
72 # Caveat: we're grabbing results from the first unit of the application,
73 # which is enough for testing, since we're only deploying a single unit.
74 retval = {}
75 for unit in results:
76 if unit.startswith(application):
77 for result in results[unit]:
78 retval[result['key']] = result['value']
79 return retval
80
81
82 def collect_metrics(application):
83 """Invoke Juju's metrics collector.
84
85 Caveat: this shells out to the `juju collect-metrics` command, rather than
86 making an API call. At the time of writing, that API is not exposed through
87 the client library.
88 """
89
90 try:
91 subprocess.check_call(['juju', 'collect-metrics', application])
92 except subprocess.CalledProcessError as e:
93 raise Exception("Unable to collect metrics: {}".format(e))
94
95
96 def has_metrics(charm):
97 """Check if a charm has metrics defined."""
98 metricsyaml = "{}/{}/metrics.yaml".format(
99 get_layer_path(),
100 charm,
101 )
102 if os.path.exists(metricsyaml):
103 return True
104 return False
105
106
107 def get_descriptor(descriptor):
108 desc = None
109 try:
110 tmp = yaml.load(descriptor)
111
112 # Remove the envelope
113 root = list(tmp.keys())[0]
114 if root == "nsd:nsd-catalog":
115 desc = tmp['nsd:nsd-catalog']['nsd'][0]
116 elif root == "vnfd:vnfd-catalog":
117 desc = tmp['vnfd:vnfd-catalog']['vnfd'][0]
118 except ValueError:
119 assert False
120 return desc
121
122
123 def get_n2vc(loop=None):
124 """Return an instance of N2VC.VNF."""
125 log = logging.getLogger()
126 log.level = logging.DEBUG
127
128 # Running under tox/pytest makes getting env variables harder.
129
130 # Extract parameters from the environment in order to run our test
131 vca_host = os.getenv('VCA_HOST', '127.0.0.1')
132 vca_port = os.getenv('VCA_PORT', 17070)
133 vca_user = os.getenv('VCA_USER', 'admin')
134 vca_charms = os.getenv('VCA_CHARMS', None)
135 vca_secret = os.getenv('VCA_SECRET', None)
136
137 client = n2vc.vnf.N2VC(
138 log=log,
139 server=vca_host,
140 port=vca_port,
141 user=vca_user,
142 secret=vca_secret,
143 artifacts=vca_charms,
144 loop=loop
145 )
146 return client
147
148
149 def create_lxd_container(public_key=None, name="test_name"):
150 """
151 Returns a container object
152
153 If public_key isn't set, we'll use the Juju ssh key
154
155 :param public_key: The public key to inject into the container
156 :param name: The name of the test being run
157 """
158 container = None
159
160 # Format name so it's valid
161 name = name.replace("_", "-").replace(".", "")
162
163 client = get_lxd_client()
164 test_machine = "test-{}-{}".format(
165 uuid.uuid4().hex[-4:],
166 name,
167 )
168
169 private_key_path, public_key_path = find_juju_ssh_keys()
170
171 # create profile w/cloud-init and juju ssh key
172 if not public_key:
173 public_key = ""
174 with open(public_key_path, "r") as f:
175 public_key = f.readline()
176
177 client.profiles.create(
178 test_machine,
179 config={
180 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key)},
181 devices={
182 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
183 'eth0': {
184 'nictype': 'bridged',
185 'parent': 'lxdbr0',
186 'type': 'nic'
187 }
188 }
189 )
190
191 # create lxc machine
192 config = {
193 'name': test_machine,
194 'source': {
195 'type': 'image',
196 'alias': 'xenial',
197 'mode': 'pull',
198 'protocol': 'simplestreams',
199 'server': 'https://cloud-images.ubuntu.com/releases',
200 },
201 'profiles': [test_machine],
202 }
203 container = client.containers.create(config, wait=True)
204 container.start(wait=True)
205
206 def wait_for_network(container, timeout=30):
207 """Wait for eth0 to have an ipv4 address."""
208 starttime = time.time()
209 while(time.time() < starttime + timeout):
210 time.sleep(1)
211 if 'eth0' in container.state().network:
212 addresses = container.state().network['eth0']['addresses']
213 if len(addresses) > 0:
214 if addresses[0]['family'] == 'inet':
215 return addresses[0]
216 return None
217
218 wait_for_network(container)
219
220 # HACK: We need to give sshd a chance to bind to the interface,
221 # and pylxd's container.execute seems to be broken and fails and/or
222 # hangs trying to properly check if the service is up.
223 time.sleep(5)
224 client = None
225
226 return container
227
228
229 def destroy_lxd_container(container):
230 """Stop and delete a LXD container."""
231 name = container.name
232 client = get_lxd_client()
233
234 def wait_for_stop(timeout=30):
235 """Wait for eth0 to have an ipv4 address."""
236 starttime = time.time()
237 while(time.time() < starttime + timeout):
238 time.sleep(1)
239 if container.state == "Stopped":
240 return
241
242 def wait_for_delete(timeout=30):
243 starttime = time.time()
244 while(time.time() < starttime + timeout):
245 time.sleep(1)
246 if client.containers.exists(name) is False:
247 return
248
249 container.stop(wait=False)
250 wait_for_stop()
251
252 container.delete(wait=False)
253 wait_for_delete()
254
255 # Delete the profile created for this container
256 profile = client.profiles.get(name)
257 if profile:
258 profile.delete()
259
260
261 def find_lxd_config():
262 """Find the LXD configuration directory."""
263 paths = []
264 paths.append(os.path.expanduser("~/.config/lxc"))
265 paths.append(os.path.expanduser("~/snap/lxd/current/.config/lxc"))
266
267 for path in paths:
268 if os.path.exists(path):
269 crt = os.path.expanduser("{}/client.crt".format(path))
270 key = os.path.expanduser("{}/client.key".format(path))
271 if os.path.exists(crt) and os.path.exists(key):
272 return (crt, key)
273 return (None, None)
274
275
276 def find_juju_ssh_keys():
277 """Find the Juju ssh keys."""
278
279 paths = []
280 paths.append(os.path.expanduser("~/.local/share/juju/ssh/"))
281
282 for path in paths:
283 if os.path.exists(path):
284 private = os.path.expanduser("{}/juju_id_rsa".format(path))
285 public = os.path.expanduser("{}/juju_id_rsa.pub".format(path))
286 if os.path.exists(private) and os.path.exists(public):
287 return (private, public)
288 return (None, None)
289
290
291 def get_juju_private_key():
292 keys = find_juju_ssh_keys()
293 return keys[0]
294
295
296 def get_lxd_client(host="127.0.0.1", port="8443", verify=False):
297 """ Get the LXD client."""
298 client = None
299 (crt, key) = find_lxd_config()
300
301 if crt and key:
302 client = pylxd.Client(
303 endpoint="https://{}:{}".format(host, port),
304 cert=(crt, key),
305 verify=verify,
306 )
307
308 return client
309
310 # TODO: This is marked serial but can be run in parallel with work, including:
311 # - Fixing an event loop issue; seems that all tests stop when one test stops?
312
313
314 @pytest.mark.serial
315 class TestN2VC(object):
316 """TODO:
317 1. Validator Validation
318
319 Automatically validate the descriptors we're using here, unless the test author explicitly wants to skip them. Useful to make sure tests aren't being run against invalid descriptors, validating functionality that may fail against a properly written descriptor.
320
321 We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model.
322 """
323
324 @classmethod
325 def setup_class(self):
326 """ setup any state specific to the execution of the given class (which
327 usually contains tests).
328 """
329 # Initialize instance variable(s)
330 # self.container = None
331
332 # Track internal state for each test run
333 self.state = {}
334
335 # Parse the test's descriptors
336 self.nsd = get_descriptor(self.NSD_YAML)
337 self.vnfd = get_descriptor(self.VNFD_YAML)
338
339 self.ns_name = self.nsd['name']
340 self.vnf_name = self.vnfd['name']
341
342 self.charms = {}
343 self.parse_vnf_descriptor()
344 assert self.charms is not {}
345
346 # Track artifacts, like compiled charms, that will need to be removed
347 self.artifacts = {}
348
349 # Build the charm(s) needed for this test
350 for charm in self.get_charm_names():
351 self.get_charm(charm)
352
353 # A bit of a hack, in order to allow the N2VC callback to run parallel
354 # to pytest. Test(s) should wait for this flag to change to False
355 # before returning.
356 self._running = True
357
358 @classmethod
359 def teardown_class(self):
360 """ teardown any state that was previously setup with a call to
361 setup_class.
362 """
363 for application in self.state:
364 logging.warn(
365 "Destroying container for application {}".format(application)
366 )
367 if self.state[application]['container']:
368 destroy_lxd_container(self.state[application]['container'])
369
370 # Clean up any artifacts created during the test
371 logging.debug("Artifacts: {}".format(self.artifacts))
372 for charm in self.artifacts:
373 artifact = self.artifacts[charm]
374 if os.path.exists(artifact['tmpdir']):
375 logging.debug("Removing directory '{}'".format(artifact))
376 shutil.rmtree(artifact['tmpdir'])
377 #
378 # Logout of N2VC
379 if self.n2vc:
380 asyncio.ensure_future(self.n2vc.logout())
381 logging.debug("Tearing down")
382 pass
383
384 @classmethod
385 def all_charms_active(self):
386 """Determine if the all deployed charms are active."""
387 active = 0
388 for application in self.charms:
389 if self.charms[application]['status'] == 'active':
390 active += 1
391
392 if active == len(self.charms):
393 logging.warn("All charms active!")
394 return True
395
396 return False
397
398 @classmethod
399 def running(self, timeout=600):
400 """Returns if the test is still running.
401
402 @param timeout The time, in seconds, to wait for the test to complete.
403 """
404
405 # if start + now > start > timeout:
406 # self.stop_test()
407 return self._running
408
409 @classmethod
410 def get_charm(self, charm):
411 """Build and return the path to the test charm.
412
413 Builds one of the charms in tests/charms/layers and returns the path
414 to the compiled charm. The charm will automatically be removed when
415 when the test is complete.
416
417 Returns: The path to the built charm or None if `charm build` failed.
418 """
419
420 # Make sure the charm snap is installed
421 try:
422 subprocess.check_call(['which', 'charm'])
423 except subprocess.CalledProcessError as e:
424 raise Exception("charm snap not installed.")
425
426 if charm not in self.artifacts:
427 try:
428 # Note: This builds the charm under N2VC/tests/charms/
429 # The snap-installed command only has write access to the users $HOME
430 # so writing to /tmp isn't possible at the moment.
431 builds = tempfile.mkdtemp(dir=get_charm_path())
432
433 cmd = "charm build {}/{} -o {}/".format(
434 get_layer_path(),
435 charm,
436 builds,
437 )
438 logging.debug(cmd)
439
440 subprocess.check_call(shlex.split(cmd))
441
442 self.artifacts[charm] = {
443 'tmpdir': builds,
444 'charm': "{}/builds/{}".format(builds, charm),
445 }
446 except subprocess.CalledProcessError as e:
447 raise Exception("charm build failed: {}.".format(e))
448
449 return self.artifacts[charm]['charm']
450
451 @classmethod
452 async def deploy(self, vnf_index, charm, params, loop):
453 """An inner function to do the deployment of a charm from
454 either a vdu or vnf.
455 """
456
457 self.n2vc = get_n2vc(loop=loop)
458
459 vnf_name = self.n2vc.FormatApplicationName(
460 self.ns_name,
461 self.vnf_name,
462 str(vnf_index),
463 )
464 logging.debug("Deploying charm at {}".format(self.artifacts[charm]))
465
466 await self.n2vc.DeployCharms(
467 self.ns_name,
468 vnf_name,
469 self.vnfd,
470 self.get_charm(charm),
471 params,
472 {},
473 self.n2vc_callback
474 )
475
476 @classmethod
477 def parse_vnf_descriptor(self):
478 """Parse the VNF descriptor to make running tests easier.
479
480 Parse the charm information in the descriptor to make it easy to write
481 tests to run again it.
482
483 Each charm becomes a dictionary in a list:
484 [
485 'is-proxy': True,
486 'vnf-member-index': 1,
487 'vnf-name': '',
488 'charm-name': '',
489 'initial-config-primitive': {},
490 'config-primitive': {}
491 ]
492 - charm name
493 - is this a proxy charm?
494 - what are the initial-config-primitives (day 1)?
495 - what are the config primitives (day 2)?
496
497 """
498 charms = {}
499
500 # You'd think this would be explicit, but it's just an incremental
501 # value that should be consistent.
502 vnf_member_index = 0
503
504 """Get all vdu and/or vdu config in a descriptor."""
505 config = self.get_config()
506 for cfg in config:
507 if 'juju' in cfg:
508
509 # Get the name to be used for the deployed application
510 application_name = n2vc.vnf.N2VC().FormatApplicationName(
511 self.ns_name,
512 self.vnf_name,
513 str(vnf_member_index),
514 )
515
516 charm = {
517 'application-name': application_name,
518 'proxy': True,
519 'vnf-member-index': vnf_member_index,
520 'vnf-name': self.vnf_name,
521 'name': None,
522 'initial-config-primitive': {},
523 'config-primitive': {},
524 }
525
526 juju = cfg['juju']
527 charm['name'] = juju['charm']
528
529 if 'proxy' in juju:
530 charm['proxy'] = juju['proxy']
531
532 if 'initial-config-primitive' in cfg:
533 charm['initial-config-primitive'] = \
534 cfg['initial-config-primitive']
535
536 if 'config-primitive' in cfg:
537 charm['config-primitive'] = cfg['config-primitive']
538
539 charms[application_name] = charm
540
541 # Increment the vnf-member-index
542 vnf_member_index += 1
543
544 self.charms = charms
545
546 @classmethod
547 def isproxy(self, application_name):
548
549 assert application_name in self.charms
550 assert 'proxy' in self.charms[application_name]
551 assert type(self.charms[application_name]['proxy']) is bool
552
553 # logging.debug(self.charms[application_name])
554 return self.charms[application_name]['proxy']
555
556 @classmethod
557 def get_config(self):
558 """Return an iterable list of config items (vdu and vnf).
559
560 As far as N2VC is concerned, the config section for vdu and vnf are
561 identical. This joins them together so tests only need to iterate
562 through one list.
563 """
564 configs = []
565
566 """Get all vdu and/or vdu config in a descriptor."""
567 vnf_config = self.vnfd.get("vnf-configuration")
568 if vnf_config:
569 juju = vnf_config['juju']
570 if juju:
571 configs.append(vnf_config)
572
573 for vdu in self.vnfd['vdu']:
574 vdu_config = vdu.get('vdu-configuration')
575 if vdu_config:
576 juju = vdu_config['juju']
577 if juju:
578 configs.append(vdu_config)
579
580 return configs
581
582 @classmethod
583 def get_charm_names(self):
584 """Return a list of charms used by the test descriptor."""
585
586 charms = {}
587
588 # Check if the VDUs in this VNF have a charm
589 for config in self.get_config():
590 juju = config['juju']
591
592 name = juju['charm']
593 if name not in charms:
594 charms[name] = 1
595
596 return charms.keys()
597
598 @classmethod
599 async def CreateContainer(self, *args):
600 """Create a LXD container for use with a proxy charm.abs
601
602 1. Get the public key from the charm via `get-ssh-public-key` action
603 2. Create container with said key injected for the ubuntu user
604 """
605 # Create and configure a LXD container for use with a proxy charm.
606 (model, application, _, _) = args
607 # self.state[application_name]
608
609 print("trying to create container")
610 if self.state[application]['container'] is None:
611 logging.debug(
612 "Creating container for application {}".format(application)
613 )
614 # HACK: Set this so the n2vc_callback knows
615 # there's a container being created
616 self.state[application]['container'] = True
617
618 # Execute 'get-ssh-public-key' primitive and get returned value
619 uuid = await self.n2vc.ExecutePrimitive(
620 model,
621 application,
622 "get-ssh-public-key",
623 None,
624 )
625 result = await self.n2vc.GetPrimitiveOutput(model, uuid)
626 pubkey = result['pubkey']
627
628 self.state[application]['container'] = create_lxd_container(
629 public_key=pubkey,
630 name=os.path.basename(__file__)
631 )
632
633 return self.state[application]['container']
634
635 @classmethod
636 async def stop():
637 """Stop the test.
638
639 - Remove charms
640 - Stop and delete containers
641 - Logout of N2VC
642 """
643 logging.warning("Stop the test.")
644 assert True
645 for application in self.charms:
646 try:
647 logging.warn("Removing charm")
648 await self.n2vc.RemoveCharms(model, application)
649
650 logging.warn(
651 "Destroying container for application {}".format(application)
652 )
653 if self.state[application]['container']:
654 destroy_lxd_container(self.state[application]['container'])
655 except Exception as e:
656 logging.warn("Error while deleting container: {}".format(e))
657
658 # Clean up any artifacts created during the test
659 logging.debug("Artifacts: {}".format(self.artifacts))
660 for charm in self.artifacts:
661 artifact = self.artifacts[charm]
662 if os.path.exists(artifact['tmpdir']):
663 logging.debug("Removing directory '{}'".format(artifact))
664 shutil.rmtree(artifact['tmpdir'])
665
666 # Logout of N2VC
667 await self.n2vc.logout()
668 self.n2vc = None
669
670 self._running = False
671
672 @classmethod
673 def get_container_ip(self, container):
674 """Return the IPv4 address of container's eth0 interface."""
675 ipaddr = None
676 if container:
677 addresses = container.state().network['eth0']['addresses']
678 # The interface may have more than one address, but we only need
679 # the first one for testing purposes.
680 ipaddr = addresses[0]['address']
681
682 return ipaddr
683
684 @classmethod
685 def n2vc_callback(self, *args, **kwargs):
686 """Monitor and react to changes in the charm state.
687
688 This is where we will monitor the state of the charm:
689 - is it active?
690 - is it in error?
691 - is it waiting on input to continue?
692
693 When the state changes, we respond appropriately:
694 - configuring ssh credentials for a proxy charm
695 - running a service primitive
696
697 Lastly, when the test has finished we begin the teardown, removing the
698 charm and associated LXD container, and notify pytest that this test
699 is over.
700
701 Args are expected to contain four values, received from N2VC:
702 - str, the name of the model
703 - str, the name of the application
704 - str, the workload status as reported by Juju
705 - str, the workload message as reported by Juju
706 """
707 (model, application, status, message) = args
708 # logging.warn("Callback for {}/{} - {} ({})".format(
709 # model,
710 # application,
711 # status,
712 # message
713 # ))
714
715 if application not in self.state:
716 # Initialize the state of the application
717 self.state[application] = {
718 'status': None,
719 'container': None,
720 }
721
722 # Make sure we're only receiving valid status. This will catch charms
723 # that aren't setting their workload state and appear as "unknown"
724 # assert status not in ["active", "blocked", "waiting", "maintenance"]
725
726 task = None
727 if kwargs and 'task' in kwargs:
728 task = kwargs['task']
729 # logging.debug("Got task: {}".format(task))
730
731 # if application in self.charms:
732 self.state[application]['status'] = status
733
734 # Closures and inner functions, oh my.
735 def is_active():
736 """Is the charm in an active state?"""
737 if status in ["active"]:
738 return True
739 return False
740
741 def is_blocked():
742 """Is the charm waiting for us?"""
743 if status in ["blocked"]:
744 return True
745 return False
746
747 def configure_ssh_proxy(task):
748 """Configure the proxy charm to use the lxd container."""
749 logging.debug("configure_ssh_proxy({})".format(task))
750
751 mgmtaddr = self.get_container_ip(
752 self.state[application]['container'],
753 )
754
755 logging.debug(
756 "Setting config ssh-hostname={}".format(mgmtaddr)
757 )
758
759 # task = asyncio.ensure_future(
760 # stop_test,
761 # )
762 # return
763
764 task = asyncio.ensure_future(
765 self.n2vc.ExecutePrimitive(
766 model,
767 application,
768 "config",
769 None,
770 params={
771 'ssh-hostname': mgmtaddr,
772 'ssh-username': 'ubuntu',
773 }
774 )
775 )
776
777 # Execute the VNFD's 'initial-config-primitive'
778 task.add_done_callback(functools.partial(
779 execute_initial_config_primitives,
780 ))
781
782 def execute_initial_config_primitives(task=None):
783 logging.debug("execute_initial_config_primitives({})".format(task))
784
785 init_config = self.charms[application]
786
787 """
788 The initial-config-primitive is run during deploy but may fail
789 on some steps because proxy charm access isn't configured.
790
791 At this stage, we'll re-run those actions.
792 """
793
794 task = asyncio.ensure_future(
795 self.n2vc.ExecuteInitialPrimitives(
796 model,
797 application,
798 init_config,
799 )
800 )
801
802 """
803 ExecutePrimitives will return a list of uuids. We need to check the
804 status of each. The test continues if all Actions succeed, and
805 fails if any of them fail.
806 """
807 task.add_done_callback(functools.partial(wait_for_uuids))
808
809 def check_metrics():
810 task = asyncio.ensure_future(
811 self.n2vc.GetMetrics(
812 model,
813 application,
814 )
815 )
816
817 task.add_done_callback(
818 functools.partial(
819 verify_metrics,
820 )
821 )
822
823 def verify_metrics(task):
824 logging.debug("Verifying metrics!")
825 # Check if task returned metrics
826 results = task.result()
827
828 metrics = parse_metrics(application, results)
829 logging.debug(metrics)
830
831 if len(metrics):
832 logging.warn("[metrics] removing charms")
833 task = asyncio.ensure_future(
834 self.n2vc.RemoveCharms(model, application)
835 )
836
837 task.add_done_callback(functools.partial(self.stop))
838
839 else:
840 # TODO: Ran into a case where it took 9 attempts before metrics
841 # were available; the controller is slow sometimes.
842 time.sleep(60)
843 check_metrics()
844
845 def wait_for_uuids(task):
846 logging.debug("wait_for_uuids({})".format(task))
847 uuids = task.result()
848
849 waitfor = len(uuids)
850 finished = 0
851
852 def get_primitive_result(uuid, task):
853 logging.debug("Got result from action")
854 # completed, failed, or running
855 result = task.result()
856
857 if status in result and result['status'] \
858 in ["completed", "failed"]:
859
860 # It's over
861 logging.debug("Action {} is {}".format(
862 uuid,
863 task.result['status'])
864 )
865 pass
866 else:
867 logging.debug("action is still running")
868
869 def get_primitive_status(uuid, task):
870 result = task.result()
871
872 if result == "completed":
873 # Make sure all primitives are finished
874 global finished
875 finished += 1
876
877 if waitfor == finished:
878 if self.all_charms_active():
879 logging.debug("Action complete; removing charm")
880 task = asyncio.ensure_future(
881 self.stop,
882 )
883 # task = asyncio.ensure_future(
884 # self.n2vc.RemoveCharms(model, application)
885 # )
886 # task.add_done_callback(functools.partial(stop_test))
887 else:
888 logging.warn("Not all charms in an active state.")
889 elif result == "failed":
890 logging.debug("Action failed; removing charm")
891 task = asyncio.ensure_future(
892 self.stop,
893 )
894 # task = asyncio.ensure_future(
895 # self.n2vc.RemoveCharms(model, application)
896 # )
897 # task.add_done_callback(functools.partial(stop_test))
898
899 # assert False
900 # self._running = False
901 # return
902 else:
903 # logging.debug("action is still running: {}".format(result))
904 # logging.debug(result)
905 # pass
906 # The primitive is running; try again.
907 task = asyncio.ensure_future(
908 self.n2vc.GetPrimitiveStatus(model, uuid)
909 )
910 task.add_done_callback(functools.partial(
911 get_primitive_result,
912 uuid,
913 ))
914
915 for actionid in uuids:
916 task = asyncio.ensure_future(
917 self.n2vc.GetPrimitiveStatus(model, actionid)
918 )
919 task.add_done_callback(functools.partial(
920 get_primitive_result,
921 actionid,
922 ))
923
924 # def stop_test(task):
925 # """Stop the test.
926 #
927 # When the test has either succeeded or reached a failing state,
928 # begin the process of removing the test fixtures.
929 # """
930 # for application in self.charms:
931 # asyncio.ensure_future(
932 # self.n2vc.RemoveCharms(model, application)
933 # )
934 #
935 # self._running = False
936
937 if is_blocked():
938 # Container only applies to proxy charms.
939 if self.isproxy(application):
940
941 if self.state[application]['container'] is None:
942 logging.warn("Creating new container")
943 # Create the new LXD container
944
945 task = asyncio.ensure_future(self.CreateContainer(*args))
946
947 # Configure the proxy charm to use the container when ready
948 task.add_done_callback(functools.partial(
949 configure_ssh_proxy,
950 ))
951
952 # task.add_done_callback(functools.partial(
953 # stop_test,
954 # ))
955 # create_lxd_container()
956 # self.container = True
957 # else:
958 # logging.warn("{} already has container".format(application))
959 #
960 # task = asyncio.ensure_future(
961 # self.n2vc.RemoveCharms(model, application)
962 # )
963 # task.add_done_callback(functools.partial(stop_test))
964
965 else:
966 # A charm may validly be in a blocked state if it's waiting for
967 # relations or some other kind of manual intervention
968 # logging.debug("This is not a proxy charm.")
969 # TODO: needs testing
970 task = asyncio.ensure_future(
971 execute_initial_config_primitives()
972 )
973
974 # task.add_done_callback(functools.partial(stop_test))
975
976 elif is_active():
977 # Does the charm have metrics defined?
978 if has_metrics(self.charms[application]['name']):
979 # logging.debug("metrics.yaml defined in the layer!")
980
981 # Force a run of the metric collector, so we don't have
982 # to wait for it's normal 5 minute interval run.
983 # NOTE: this shouldn't be done outside of CI
984 collect_metrics(application)
985
986 # get the current metrics
987 check_metrics()
988 else:
989 # When the charm reaches an active state and hasn't been
990 # handled (metrics collection, etc)., the test has succeded.
991 # logging.debug("Charm is active! Removing charm...")
992 if self.all_charms_active():
993 logging.warn("All charms active!")
994 task = asyncio.ensure_future(
995 self.stop(),
996 )
997
998 # task = asyncio.ensure_future(
999 # self.n2vc.RemoveCharms(model, application)
1000 # )
1001 # task.add_done_callback(functools.partial(stop_test))
1002 else:
1003 logging.warning("Waiting for all charms to be active.")
1004 # task = asyncio.ensure_future(
1005 # self.n2vc.RemoveCharms(model, application)
1006 # )
1007 # task.add_done_callback(functools.partial(stop_test))