Tox + Integration testing
[osm/N2VC.git] / tests / base.py
1 #!/usr/bin/env python3
2 import asyncio
3 import functools
4
5 import logging
6 import n2vc.vnf
7 import pylxd
8 import pytest
9 import os
10 import shlex
11 import shutil
12 import subprocess
13 import tempfile
14 import time
15 import uuid
16 import yaml
17
18 from juju.controller import Controller
19
20 # Disable InsecureRequestWarning w/LXD
21 import urllib3
22 urllib3.disable_warnings()
23 logging.getLogger("urllib3").setLevel(logging.WARNING)
24
25 here = os.path.dirname(os.path.realpath(__file__))
26
27
28 def is_bootstrapped():
29 result = subprocess.run(['juju', 'switch'], stdout=subprocess.PIPE)
30 return (
31 result.returncode == 0 and
32 len(result.stdout.decode().strip()) > 0)
33
34
35 bootstrapped = pytest.mark.skipif(
36 not is_bootstrapped(),
37 reason='bootstrapped Juju environment required')
38
39
40 class CleanController():
41 """
42 Context manager that automatically connects and disconnects from
43 the currently active controller.
44
45 Note: Unlike CleanModel, this will not create a new controller for you,
46 and an active controller must already be available.
47 """
48 def __init__(self):
49 self._controller = None
50
51 async def __aenter__(self):
52 self._controller = Controller()
53 await self._controller.connect()
54 return self._controller
55
56 async def __aexit__(self, exc_type, exc, tb):
57 await self._controller.disconnect()
58
59
60 def get_charm_path():
61 return "{}/charms".format(here)
62
63
64 def get_layer_path():
65 return "{}/charms/layers".format(here)
66
67
68 def parse_metrics(application, results):
69 """Parse the returned metrics into a dict."""
70
71 # We'll receive the results for all units, to look for the one we want
72 # Caveat: we're grabbing results from the first unit of the application,
73 # which is enough for testing, since we're only deploying a single unit.
74 retval = {}
75 for unit in results:
76 if unit.startswith(application):
77 for result in results[unit]:
78 retval[result['key']] = result['value']
79 return retval
80
81
82 def collect_metrics(application):
83 """Invoke Juju's metrics collector.
84
85 Caveat: this shells out to the `juju collect-metrics` command, rather than
86 making an API call. At the time of writing, that API is not exposed through
87 the client library.
88 """
89
90 try:
91 subprocess.check_call(['juju', 'collect-metrics', application])
92 except subprocess.CalledProcessError as e:
93 raise Exception("Unable to collect metrics: {}".format(e))
94
95
96 def has_metrics(charm):
97 """Check if a charm has metrics defined."""
98 metricsyaml = "{}/{}/metrics.yaml".format(
99 get_layer_path(),
100 charm,
101 )
102 if os.path.exists(metricsyaml):
103 return True
104 return False
105
106
107 def get_descriptor(descriptor):
108 desc = None
109 try:
110 tmp = yaml.load(descriptor)
111
112 # Remove the envelope
113 root = list(tmp.keys())[0]
114 if root == "nsd:nsd-catalog":
115 desc = tmp['nsd:nsd-catalog']['nsd'][0]
116 elif root == "vnfd:vnfd-catalog":
117 desc = tmp['vnfd:vnfd-catalog']['vnfd'][0]
118 except ValueError:
119 assert False
120 return desc
121
122
123 def get_n2vc(loop=None):
124 """Return an instance of N2VC.VNF."""
125 log = logging.getLogger()
126 log.level = logging.DEBUG
127
128 # Running under tox/pytest makes getting env variables harder.
129
130 # Extract parameters from the environment in order to run our test
131 vca_host = os.getenv('VCA_HOST', '127.0.0.1')
132 vca_port = os.getenv('VCA_PORT', 17070)
133 vca_user = os.getenv('VCA_USER', 'admin')
134 vca_charms = os.getenv('VCA_CHARMS', None)
135 vca_secret = os.getenv('VCA_SECRET', None)
136
137 client = n2vc.vnf.N2VC(
138 log=log,
139 server=vca_host,
140 port=vca_port,
141 user=vca_user,
142 secret=vca_secret,
143 artifacts=vca_charms,
144 loop=loop
145 )
146 return client
147
148
149 def create_lxd_container(public_key=None, name="test_name"):
150 """
151 Returns a container object
152
153 If public_key isn't set, we'll use the Juju ssh key
154
155 :param public_key: The public key to inject into the container
156 :param name: The name of the test being run
157 """
158 container = None
159
160 # Format name so it's valid
161 name = name.replace("_", "-").replace(".", "")
162
163 client = get_lxd_client()
164 test_machine = "test-{}-{}".format(
165 uuid.uuid4().hex[-4:],
166 name,
167 )
168
169 private_key_path, public_key_path = find_juju_ssh_keys()
170
171 # create profile w/cloud-init and juju ssh key
172 if not public_key:
173 public_key = ""
174 with open(public_key_path, "r") as f:
175 public_key = f.readline()
176
177 client.profiles.create(
178 test_machine,
179 config={
180 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key)},
181 devices={
182 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
183 'eth0': {
184 'nictype': 'bridged',
185 'parent': 'lxdbr0',
186 'type': 'nic'
187 }
188 }
189 )
190
191 # create lxc machine
192 config = {
193 'name': test_machine,
194 'source': {
195 'type': 'image',
196 'alias': 'xenial',
197 'mode': 'pull',
198 'protocol': 'simplestreams',
199 'server': 'https://cloud-images.ubuntu.com/releases',
200 },
201 'profiles': [test_machine],
202 }
203 container = client.containers.create(config, wait=True)
204 container.start(wait=True)
205
206 def wait_for_network(container, timeout=30):
207 """Wait for eth0 to have an ipv4 address."""
208 starttime = time.time()
209 while(time.time() < starttime + timeout):
210 time.sleep(1)
211 if 'eth0' in container.state().network:
212 addresses = container.state().network['eth0']['addresses']
213 if len(addresses) > 0:
214 if addresses[0]['family'] == 'inet':
215 return addresses[0]
216 return None
217
218 wait_for_network(container)
219
220 # HACK: We need to give sshd a chance to bind to the interface,
221 # and pylxd's container.execute seems to be broken and fails and/or
222 # hangs trying to properly check if the service is up.
223 time.sleep(5)
224 client = None
225
226 return container
227
228
229 def destroy_lxd_container(container):
230 """Stop and delete a LXD container."""
231 name = container.name
232 client = get_lxd_client()
233
234 def wait_for_stop(timeout=30):
235 """Wait for eth0 to have an ipv4 address."""
236 starttime = time.time()
237 while(time.time() < starttime + timeout):
238 time.sleep(1)
239 if container.state == "Stopped":
240 return
241
242 def wait_for_delete(timeout=30):
243 starttime = time.time()
244 while(time.time() < starttime + timeout):
245 time.sleep(1)
246 if client.containers.exists(name) is False:
247 return
248
249 container.stop(wait=False)
250 wait_for_stop()
251
252 container.delete(wait=False)
253 wait_for_delete()
254
255 # Delete the profile created for this container
256 profile = client.profiles.get(name)
257 if profile:
258 profile.delete()
259
260
261 def find_lxd_config():
262 """Find the LXD configuration directory."""
263 paths = []
264 paths.append(os.path.expanduser("~/.config/lxc"))
265 paths.append(os.path.expanduser("~/snap/lxd/current/.config/lxc"))
266
267 for path in paths:
268 if os.path.exists(path):
269 crt = os.path.expanduser("{}/client.crt".format(path))
270 key = os.path.expanduser("{}/client.key".format(path))
271 if os.path.exists(crt) and os.path.exists(key):
272 return (crt, key)
273 return (None, None)
274
275
276 def find_juju_ssh_keys():
277 """Find the Juju ssh keys."""
278
279 paths = []
280 paths.append(os.path.expanduser("~/.local/share/juju/ssh/"))
281
282 for path in paths:
283 if os.path.exists(path):
284 private = os.path.expanduser("{}/juju_id_rsa".format(path))
285 public = os.path.expanduser("{}/juju_id_rsa.pub".format(path))
286 if os.path.exists(private) and os.path.exists(public):
287 return (private, public)
288 return (None, None)
289
290
291 def get_juju_private_key():
292 keys = find_juju_ssh_keys()
293 return keys[0]
294
295
296 def get_lxd_client(host="127.0.0.1", port="8443", verify=False):
297 """ Get the LXD client."""
298 client = None
299 (crt, key) = find_lxd_config()
300
301 if crt and key:
302 client = pylxd.Client(
303 endpoint="https://{}:{}".format(host, port),
304 cert=(crt, key),
305 verify=verify,
306 )
307
308 return client
309
310 # TODO: This is marked serial but can be run in parallel with work, including:
311 # - Fixing an event loop issue; seems that all tests stop when one test stops?
312
313
314 @pytest.mark.serial
315 class TestN2VC(object):
316 """TODO:
317 1. Validator Validation
318
319 Automatically validate the descriptors we're using here, unless the test author explicitly wants to skip them. Useful to make sure tests aren't being run against invalid descriptors, validating functionality that may fail against a properly written descriptor.
320
321 We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model.
322 """
323
324 @classmethod
325 def setup_class(self):
326 """ setup any state specific to the execution of the given class (which
327 usually contains tests).
328 """
329 # Initialize instance variable(s)
330 self.container = None
331
332 # Parse the test's descriptors
333 self.nsd = get_descriptor(self.NSD_YAML)
334 self.vnfd = get_descriptor(self.VNFD_YAML)
335
336 self.ns_name = self.nsd['name']
337 self.vnf_name = self.vnfd['name']
338
339 self.charms = {}
340 self.parse_vnf_descriptor()
341 assert self.charms is not {}
342
343 # Track artifacts, like compiled charms, that will need to be removed
344 self.artifacts = {}
345
346 # Build the charm(s) needed for this test
347 for charm in self.get_charm_names():
348 self.get_charm(charm)
349
350 # A bit of a hack, in order to allow the N2VC callback to run parallel
351 # to pytest. Test(s) should wait for this flag to change to False
352 # before returning.
353 self._running = True
354
355 @classmethod
356 def teardown_class(self):
357 """ teardown any state that was previously setup with a call to
358 setup_class.
359 """
360 if self.container:
361 destroy_lxd_container(self.container)
362
363 # Clean up any artifacts created during the test
364 logging.debug("Artifacts: {}".format(self.artifacts))
365 for charm in self.artifacts:
366 artifact = self.artifacts[charm]
367 if os.path.exists(artifact['tmpdir']):
368 logging.debug("Removing directory '{}'".format(artifact))
369 shutil.rmtree(artifact['tmpdir'])
370
371 # Logout of N2VC
372 asyncio.ensure_future(self.n2vc.logout())
373
374 @classmethod
375 def running(self, timeout=600):
376 """Returns if the test is still running.
377
378 @param timeout The time, in seconds, to wait for the test to complete.
379 """
380
381 # if start + now > start > timeout:
382 # self.stop_test()
383 return self._running
384
385 @classmethod
386 def get_charm(self, charm):
387 """Build and return the path to the test charm.
388
389 Builds one of the charms in tests/charms/layers and returns the path
390 to the compiled charm. The charm will automatically be removed when
391 when the test is complete.
392
393 Returns: The path to the built charm or None if `charm build` failed.
394 """
395
396 # Make sure the charm snap is installed
397 try:
398 subprocess.check_call(['which', 'charm'])
399 except subprocess.CalledProcessError as e:
400 raise Exception("charm snap not installed.")
401
402 if charm not in self.artifacts:
403 try:
404 # Note: This builds the charm under N2VC/tests/charms/
405 # The snap-installed command only has write access to the users $HOME
406 # so writing to /tmp isn't possible at the moment.
407 builds = tempfile.mkdtemp(dir=get_charm_path())
408
409 cmd = "charm build {}/{} -o {}/".format(
410 get_layer_path(),
411 charm,
412 builds,
413 )
414 logging.debug(cmd)
415
416 subprocess.check_call(shlex.split(cmd))
417
418 self.artifacts[charm] = {
419 'tmpdir': builds,
420 'charm': "{}/builds/{}".format(builds, charm),
421 }
422 except subprocess.CalledProcessError as e:
423 raise Exception("charm build failed: {}.".format(e))
424
425 return self.artifacts[charm]['charm']
426
427 @classmethod
428 async def deploy(self, vnf_index, charm, params, loop):
429 """An inner function to do the deployment of a charm from
430 either a vdu or vnf.
431 """
432
433 self.n2vc = get_n2vc(loop=loop)
434
435 vnf_name = self.n2vc.FormatApplicationName(
436 self.ns_name,
437 self.vnf_name,
438 str(vnf_index),
439 )
440 logging.debug("Deploying charm at {}".format(self.artifacts[charm]))
441
442 await self.n2vc.DeployCharms(
443 self.ns_name,
444 vnf_name,
445 self.vnfd,
446 self.get_charm(charm),
447 params,
448 {},
449 self.n2vc_callback
450 )
451
452 @classmethod
453 def parse_vnf_descriptor(self):
454 """Parse the VNF descriptor to make running tests easier.
455
456 Parse the charm information in the descriptor to make it easy to write
457 tests to run again it.
458
459 Each charm becomes a dictionary in a list:
460 [
461 'is-proxy': True,
462 'vnf-member-index': 1,
463 'vnf-name': '',
464 'charm-name': '',
465
466 'initial-config-primitive': {},
467 'config-primitive': {}
468 ]
469 - charm name
470 - is this a proxy charm?
471 - what are the initial-config-primitives (day 1)?
472 - what are the config primitives (day 2)?
473
474 """
475 charms = {}
476
477 # You'd think this would be explicit, but it's just an incremental
478 # value that should be consistent.
479 vnf_member_index = 0
480
481 """Get all vdu and/or vdu config in a descriptor."""
482 config = self.get_config()
483 for cfg in config:
484 if 'juju' in cfg:
485
486 # Get the name to be used for the deployed application
487 application_name = n2vc.vnf.N2VC().FormatApplicationName(
488 self.ns_name,
489 self.vnf_name,
490 str(vnf_member_index),
491 )
492
493 charm = {
494 'application-name': application_name,
495 'proxy': True,
496 'vnf-member-index': vnf_member_index,
497 'vnf-name': self.vnf_name,
498 'name': None,
499 'initial-config-primitive': {},
500 'config-primitive': {},
501 }
502
503 juju = cfg['juju']
504 charm['name'] = juju['charm']
505
506 if 'proxy' in juju:
507 charm['proxy'] = juju['proxy']
508
509 if 'initial-config-primitive' in cfg:
510 charm['initial-config-primitive'] = \
511 cfg['initial-config-primitive']
512
513 if 'config-primitive' in cfg:
514 charm['config-primitive'] = cfg['config-primitive']
515
516 charms[application_name] = charm
517
518 # Increment the vnf-member-index
519 vnf_member_index += 1
520
521 self.charms = charms
522
523 @classmethod
524 def isproxy(self, application_name):
525
526 assert application_name in self.charms
527 assert 'proxy' in self.charms[application_name]
528 assert type(self.charms[application_name]['proxy']) is bool
529
530 # logging.debug(self.charms[application_name])
531 return self.charms[application_name]['proxy']
532
533 @classmethod
534 def get_config(self):
535 """Return an iterable list of config items (vdu and vnf).
536
537 As far as N2VC is concerned, the config section for vdu and vnf are
538 identical. This joins them together so tests only need to iterate
539 through one list.
540 """
541 configs = []
542
543 """Get all vdu and/or vdu config in a descriptor."""
544 vnf_config = self.vnfd.get("vnf-configuration")
545 if vnf_config:
546 juju = vnf_config['juju']
547 if juju:
548 configs.append(vnf_config)
549
550 for vdu in self.vnfd['vdu']:
551 vdu_config = vdu.get('vdu-configuration')
552 if vdu_config:
553 juju = vdu_config['juju']
554 if juju:
555 configs.append(vdu_config)
556
557 return configs
558
559 @classmethod
560 def get_charm_names(self):
561 """Return a list of charms used by the test descriptor."""
562
563 charms = {}
564
565 # Check if the VDUs in this VNF have a charm
566 for config in self.get_config():
567 juju = config['juju']
568
569 name = juju['charm']
570 if name not in charms:
571 charms[name] = 1
572
573 return charms.keys()
574
575 @classmethod
576 async def CreateContainer(self, *args):
577 """Create a LXD container for use with a proxy charm.abs
578
579 1. Get the public key from the charm via `get-ssh-public-key` action
580 2. Create container with said key injected for the ubuntu user
581 """
582 if self.container is None:
583 # logging.debug("CreateContainer called.")
584
585 # HACK: Set this so the n2vc_callback knows
586 # there's a container being created
587 self.container = True
588
589 # Create and configure a LXD container for use with a proxy charm.
590 (model_name, application_name, _, _) = args
591
592 # Execute 'get-ssh-public-key' primitive and get returned value
593 uuid = await self.n2vc.ExecutePrimitive(
594 model_name,
595 application_name,
596 "get-ssh-public-key",
597 None,
598 )
599 # logging.debug("Action UUID: {}".format(uuid))
600 result = await self.n2vc.GetPrimitiveOutput(model_name, uuid)
601 # logging.debug("Action result: {}".format(result))
602 pubkey = result['pubkey']
603
604 self.container = create_lxd_container(
605 public_key=pubkey,
606 name=os.path.basename(__file__)
607 )
608
609 return self.container
610
611 @classmethod
612 def get_container_ip(self):
613 """Return the IPv4 address of container's eth0 interface."""
614 ipaddr = None
615 if self.container:
616 addresses = self.container.state().network['eth0']['addresses']
617 # The interface may have more than one address, but we only need
618 # the first one for testing purposes.
619 ipaddr = addresses[0]['address']
620
621 return ipaddr
622
623 @classmethod
624 def n2vc_callback(self, *args, **kwargs):
625 """Monitor and react to changes in the charm state.
626
627 This is where we will monitor the state of the charm:
628 - is it active?
629 - is it in error?
630 - is it waiting on input to continue?
631
632 When the state changes, we respond appropriately:
633 - configuring ssh credentials for a proxy charm
634 - running a service primitive
635
636 Lastly, when the test has finished we begin the teardown, removing the
637 charm and associated LXD container, and notify pytest that this test
638 is over.
639
640 Args are expected to contain four values, received from N2VC:
641 - str, the name of the model
642 - str, the name of the application
643 - str, the workload status as reported by Juju
644 - str, the workload message as reported by Juju
645 """
646 (model, application, status, message) = args
647 # logging.debug("Callback for {}/{} - {} ({})".format(
648 # model,
649 # application,
650 # status,
651 # message
652 # ))
653
654 # Make sure we're only receiving valid status. This will catch charms
655 # that aren't setting their workload state and appear as "unknown"
656 # assert status not in ["active", "blocked", "waiting", "maintenance"]
657
658 task = None
659 if kwargs and 'task' in kwargs:
660 task = kwargs['task']
661 # logging.debug("Got task: {}".format(task))
662
663 # Closures and inner functions, oh my.
664 def is_active():
665 """Is the charm in an active state?"""
666 if status in ["active"]:
667 return True
668 return False
669
670 def is_blocked():
671 """Is the charm waiting for us?"""
672 if status in ["blocked"]:
673 return True
674 return False
675
676 def configure_ssh_proxy(task):
677 """Configure the proxy charm to use the lxd container."""
678 logging.debug("configure_ssh_proxy({})".format(task))
679
680 mgmtaddr = self.get_container_ip()
681
682 logging.debug(
683 "Setting config ssh-hostname={}".format(mgmtaddr)
684 )
685
686 task = asyncio.ensure_future(
687 self.n2vc.ExecutePrimitive(
688 model,
689 application,
690 "config",
691 None,
692 params={
693 'ssh-hostname': mgmtaddr,
694 'ssh-username': 'ubuntu',
695 }
696 )
697 )
698
699 # Execute the VNFD's 'initial-config-primitive'
700 task.add_done_callback(functools.partial(
701 execute_initial_config_primitives,
702 ))
703
704 def execute_initial_config_primitives(task=None):
705 logging.debug("execute_initial_config_primitives({})".format(task))
706
707 init_config = self.charms[application]
708
709 """
710 The initial-config-primitive is run during deploy but may fail
711 on some steps because proxy charm access isn't configured.
712
713 At this stage, we'll re-run those actions.
714 """
715
716 task = asyncio.ensure_future(
717 self.n2vc.ExecuteInitialPrimitives(
718 model,
719 application,
720 init_config,
721 )
722 )
723
724 """
725 ExecutePrimitives will return a list of uuids. We need to check the
726 status of each. The test continues if all Actions succeed, and
727 fails if any of them fail.
728 """
729 task.add_done_callback(functools.partial(wait_for_uuids))
730
731 def check_metrics():
732 task = asyncio.ensure_future(
733 self.n2vc.GetMetrics(
734 model,
735 application,
736 )
737 )
738
739 task.add_done_callback(
740 functools.partial(
741 verify_metrics,
742 )
743 )
744
745 def verify_metrics(task):
746 logging.debug("Verifying metrics!")
747 # Check if task returned metrics
748 results = task.result()
749
750 metrics = parse_metrics(application, results)
751 logging.debug(metrics)
752
753 if len(metrics):
754 task = asyncio.ensure_future(
755 self.n2vc.RemoveCharms(model, application)
756 )
757
758 task.add_done_callback(functools.partial(stop_test))
759
760 else:
761 # TODO: Ran into a case where it took 9 attempts before metrics
762 # were available; the controller is slow sometimes.
763 time.sleep(60)
764 check_metrics()
765
766 def wait_for_uuids(task):
767 logging.debug("wait_for_uuids({})".format(task))
768 uuids = task.result()
769
770 waitfor = len(uuids)
771 finished = 0
772
773 def get_primitive_result(uuid, task):
774 logging.debug("Got result from action")
775 # completed, failed, or running
776 result = task.result()
777
778 if status in result and result['status'] \
779 in ["completed", "failed"]:
780
781 # It's over
782 logging.debug("Action {} is {}".format(
783 uuid,
784 task.result['status'])
785 )
786 pass
787 else:
788 logging.debug("action is still running")
789
790 def get_primitive_status(uuid, task):
791 result = task.result()
792
793 if result == "completed":
794 # Make sure all primitives are finished
795 global finished
796 finished += 1
797
798 if waitfor == finished:
799 # logging.debug("Action complete; removing charm")
800 task = asyncio.ensure_future(
801 self.n2vc.RemoveCharms(model, application)
802 )
803
804 task.add_done_callback(functools.partial(stop_test))
805 elif result == "failed":
806 # logging.debug("Action failed; removing charm")
807 assert False
808 self._running = False
809 return
810 else:
811 # logging.debug("action is still running: {}".format(result))
812 # logging.debug(result)
813 # pass
814 # The primitive is running; try again.
815 task = asyncio.ensure_future(
816 self.n2vc.GetPrimitiveStatus(model, uuid)
817 )
818 task.add_done_callback(functools.partial(
819 get_primitive_result,
820 uuid,
821 ))
822
823 for actionid in uuids:
824 task = asyncio.ensure_future(
825 self.n2vc.GetPrimitiveStatus(model, actionid)
826 )
827 task.add_done_callback(functools.partial(
828 get_primitive_result,
829 actionid,
830 ))
831
832 def stop_test(task):
833 """Stop the test.
834
835 When the test has either succeeded or reached a failing state,
836 begin the process of removing the test fixtures.
837 """
838 asyncio.ensure_future(
839 self.n2vc.RemoveCharms(model, application)
840 )
841
842 self._running = False
843
844 if is_blocked():
845 # logging.debug("Charm is in a blocked state!")
846
847 # Container only applies to proxy charms.
848 if self.isproxy(application):
849
850 if self.container is None:
851 # logging.debug(
852 # "Ensuring CreateContainer: status is {}".format(status)
853 # )
854
855 # Create the new LXD container
856 task = asyncio.ensure_future(self.CreateContainer(*args))
857
858 # Configure the proxy charm to use the container when ready
859 task.add_done_callback(functools.partial(
860 configure_ssh_proxy,
861 ))
862
863 # task.add_done_callback(functools.partial(
864 # stop_test,
865 # ))
866 # create_lxd_container()
867 # self.container = True
868 else:
869 # A charm may validly be in a blocked state if it's waiting for
870 # relations or some other kind of manual intervention
871 # logging.debug("This is not a proxy charm.")
872 # TODO: needs testing
873 task = asyncio.ensure_future(
874 execute_initial_config_primitives()
875 )
876
877 task.add_done_callback(functools.partial(stop_test))
878
879 elif is_active():
880 # Does the charm have metrics defined?
881 if has_metrics(self.charms[application]['name']):
882 # logging.debug("metrics.yaml defined in the layer!")
883
884 # Force a run of the metric collector, so we don't have
885 # to wait for it's normal 5 minute interval run.
886 # NOTE: this shouldn't be done outside of CI
887 collect_metrics(application)
888
889 # get the current metrics
890 check_metrics()
891 else:
892 # When the charm reaches an active state and hasn't been
893 # handled (metrics collection, etc)., the test has succeded.
894 # logging.debug("Charm is active! Removing charm...")
895 task = asyncio.ensure_future(
896 self.n2vc.RemoveCharms(model, application)
897 )
898
899 task.add_done_callback(functools.partial(stop_test))