1156 fix path obtained for k8s_helm_conn cluster
[osm/N2VC.git] / tests / base.py
1 #!/usr/bin/env python3
2 # Copyright 2019 Canonical Ltd.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import asyncio
17 import datetime
18 import logging
19 import n2vc.vnf
20 import pylxd
21 import pytest
22 import os
23 import shlex
24 import subprocess
25 import time
26 import uuid
27 import yaml
28
29 from juju.controller import Controller
30
31 # Disable InsecureRequestWarning w/LXD
32 import urllib3
33 urllib3.disable_warnings()
34 logging.getLogger("urllib3").setLevel(logging.WARNING)
35
36 here = os.path.dirname(os.path.realpath(__file__))
37
38
39 class CleanController():
40 """
41 Context manager that automatically connects and disconnects from
42 the currently active controller.
43
44 Note: Unlike CleanModel, this will not create a new controller for you,
45 and an active controller must already be available.
46 """
47 def __init__(self):
48 self._controller = None
49
50 async def __aenter__(self):
51 self._controller = Controller()
52 await self._controller.connect()
53 return self._controller
54
55 async def __aexit__(self, exc_type, exc, tb):
56 await self._controller.disconnect()
57
58
59 def debug(msg):
60 """Format debug messages in a consistent way."""
61 now = datetime.datetime.now()
62
63 # TODO: Decide on the best way to log. Output from `logging.debug` shows up
64 # when a test fails, but print() will always show up when running tox with
65 # `-s`, which is really useful for debugging single tests without having to
66 # insert a False assert to see the log.
67 logging.debug(
68 "[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg)
69 )
70 print(
71 "[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg)
72 )
73
74
75 def get_charm_path():
76 return "{}/charms".format(here)
77
78
79 def get_layer_path():
80 return "{}/charms/layers".format(here)
81
82
83 def collect_metrics(application):
84 """Invoke Juju's metrics collector.
85
86 Caveat: this shells out to the `juju collect-metrics` command, rather than
87 making an API call. At the time of writing, that API is not exposed through
88 the client library.
89 """
90
91 try:
92 subprocess.check_call(['juju', 'collect-metrics', application])
93 except subprocess.CalledProcessError as e:
94 raise Exception("Unable to collect metrics: {}".format(e))
95
96
97 def has_metrics(charm):
98 """Check if a charm has metrics defined."""
99 metricsyaml = "{}/{}/metrics.yaml".format(
100 get_layer_path(),
101 charm,
102 )
103 if os.path.exists(metricsyaml):
104 return True
105 return False
106
107
108 def get_descriptor(descriptor):
109 desc = None
110 try:
111 tmp = yaml.safe_load(descriptor)
112
113 # Remove the envelope
114 root = list(tmp.keys())[0]
115 if root == "nsd:nsd-catalog":
116 desc = tmp['nsd:nsd-catalog']['nsd'][0]
117 elif root == "vnfd:vnfd-catalog":
118 desc = tmp['vnfd:vnfd-catalog']['vnfd'][0]
119 except ValueError:
120 assert False
121 return desc
122
123
124 def get_n2vc(loop=None):
125 """Return an instance of N2VC.VNF."""
126 log = logging.getLogger()
127 log.level = logging.DEBUG
128
129 # Extract parameters from the environment in order to run our test
130 vca_host = os.getenv('VCA_HOST', '127.0.0.1')
131 vca_port = os.getenv('VCA_PORT', 17070)
132 vca_user = os.getenv('VCA_USER', 'admin')
133 vca_charms = os.getenv('VCA_CHARMS', None)
134 vca_secret = os.getenv('VCA_SECRET', None)
135 vca_cacert = os.getenv('VCA_CACERT', None)
136
137 # Get the Juju Public key
138 juju_public_key = get_juju_public_key()
139 if juju_public_key:
140 debug("Reading Juju public key @ {}".format(juju_public_key))
141 with open(juju_public_key, 'r') as f:
142 juju_public_key = f.read()
143 debug("Found public key: {}".format(juju_public_key))
144 else:
145 raise Exception("No Juju Public Key found")
146
147 # Get the ca-cert
148 # os.path.expanduser("~/.config/lxc")
149 # with open("{}/agent.conf".format(AGENT_PATH), "r") as f:
150 # try:
151 # y = yaml.safe_load(f)
152 # self.cacert = y['cacert']
153 # except yaml.YAMLError as exc:
154 # log("Unable to find Juju ca-cert.")
155 # raise exc
156
157 client = n2vc.vnf.N2VC(
158 log=log,
159 server=vca_host,
160 port=vca_port,
161 user=vca_user,
162 secret=vca_secret,
163 artifacts=vca_charms,
164 loop=loop,
165 juju_public_key=juju_public_key,
166 ca_cert=vca_cacert,
167 )
168 return client
169
170
171 def create_lxd_container(public_key=None, name="test_name"):
172 """
173 Returns a container object
174
175 If public_key isn't set, we'll use the Juju ssh key
176
177 :param public_key: The public key to inject into the container
178 :param name: The name of the test being run
179 """
180 container = None
181
182 # Format name so it's valid
183 name = name.replace("_", "-").replace(".", "")
184
185 client = get_lxd_client()
186 if not client:
187 raise Exception("Unable to connect to LXD")
188
189 test_machine = "test-{}-{}".format(
190 uuid.uuid4().hex[-4:],
191 name,
192 )
193
194 private_key_path, public_key_path = find_n2vc_ssh_keys()
195
196 try:
197 # create profile w/cloud-init and juju ssh key
198 if not public_key:
199 public_key = ""
200 with open(public_key_path, "r") as f:
201 public_key = f.readline()
202
203 client.profiles.create(
204 test_machine,
205 config={
206 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key)},
207 devices={
208 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
209 'eth0': {
210 'nictype': 'bridged',
211 'parent': 'lxdbr0',
212 'type': 'nic'
213 }
214 }
215 )
216 except Exception as ex:
217 debug("Error creating lxd profile {}: {}".format(test_machine, ex))
218 raise ex
219
220 try:
221 # create lxc machine
222 config = {
223 'name': test_machine,
224 'source': {
225 'type': 'image',
226 'alias': 'xenial',
227 'mode': 'pull',
228 'protocol': 'simplestreams',
229 'server': 'https://cloud-images.ubuntu.com/releases',
230 },
231 'profiles': [test_machine],
232 }
233 container = client.containers.create(config, wait=True)
234 container.start(wait=True)
235 except Exception as ex:
236 debug("Error creating lxd container {}: {}".format(test_machine, ex))
237 # This is a test-ending failure.
238 raise ex
239
240 def wait_for_network(container, timeout=30):
241 """Wait for eth0 to have an ipv4 address."""
242 starttime = time.time()
243 while(time.time() < starttime + timeout):
244 time.sleep(1)
245 if 'eth0' in container.state().network:
246 addresses = container.state().network['eth0']['addresses']
247 if len(addresses) > 0:
248 if addresses[0]['family'] == 'inet':
249 return addresses[0]
250 return None
251
252 try:
253 wait_for_network(container)
254 except Exception as ex:
255 debug(
256 "Error waiting for container {} network: {}".format(
257 test_machine,
258 ex,
259 )
260 )
261
262 try:
263 waitcount = 0
264 while waitcount <= 5:
265 if is_sshd_running(container):
266 break
267 waitcount += 1
268 time.sleep(1)
269 if waitcount >= 5:
270 debug("couldn't detect sshd running")
271 raise Exception("Unable to verify container sshd")
272
273 except Exception as ex:
274 debug(
275 "Error checking sshd status on {}: {}".format(
276 test_machine,
277 ex,
278 )
279 )
280
281 # HACK: We need to give sshd a chance to bind to the interface,
282 # and pylxd's container.execute seems to be broken and fails and/or
283 # hangs trying to properly check if the service is up.
284 (exit_code, stdout, stderr) = container.execute([
285 'ping',
286 '-c', '5', # Wait for 5 ECHO_REPLY
287 '8.8.8.8', # Ping Google's public DNS
288 '-W', '15', # Set a 15 second deadline
289 ])
290 if exit_code > 0:
291 # The network failed
292 raise Exception("Unable to verify container network")
293
294 return container
295
296
297 def is_sshd_running(container):
298 """Check if sshd is running in the container.
299
300 Check to see if the sshd process is running and listening on port 22.
301
302 :param container: The container to check
303 :return boolean: True if sshd is running.
304 """
305 debug("Container: {}".format(container))
306 try:
307 (rc, stdout, stderr) = container.execute(
308 ["service", "ssh", "status"]
309 )
310 # If the status is a) found and b) running, the exit code will be 0
311 if rc == 0:
312 return True
313 except Exception as ex:
314 debug("Failed to check sshd service status: {}".format(ex))
315
316 return False
317
318
319 def destroy_lxd_container(container):
320 """Stop and delete a LXD container.
321
322 Sometimes we see errors talking to LXD -- ephemerial issues like
323 load or a bug that's killed the API. We'll do our best to clean
324 up here, and we should run a cleanup after all tests are finished
325 to remove any extra containers and profiles belonging to us.
326 """
327
328 if type(container) is bool:
329 return
330
331 name = container.name
332 debug("Destroying container {}".format(name))
333
334 client = get_lxd_client()
335
336 def wait_for_stop(timeout=30):
337 """Wait for eth0 to have an ipv4 address."""
338 starttime = time.time()
339 while(time.time() < starttime + timeout):
340 time.sleep(1)
341 if container.state == "Stopped":
342 return
343
344 def wait_for_delete(timeout=30):
345 starttime = time.time()
346 while(time.time() < starttime + timeout):
347 time.sleep(1)
348 if client.containers.exists(name) is False:
349 return
350
351 try:
352 container.stop(wait=False)
353 wait_for_stop()
354 except Exception as ex:
355 debug(
356 "Error stopping container {}: {}".format(
357 name,
358 ex,
359 )
360 )
361
362 try:
363 container.delete(wait=False)
364 wait_for_delete()
365 except Exception as ex:
366 debug(
367 "Error deleting container {}: {}".format(
368 name,
369 ex,
370 )
371 )
372
373 try:
374 # Delete the profile created for this container
375 profile = client.profiles.get(name)
376 if profile:
377 profile.delete()
378 except Exception as ex:
379 debug(
380 "Error deleting profile {}: {}".format(
381 name,
382 ex,
383 )
384 )
385
386
387 def find_lxd_config():
388 """Find the LXD configuration directory."""
389 paths = []
390 paths.append(os.path.expanduser("~/.config/lxc"))
391 paths.append(os.path.expanduser("~/snap/lxd/current/.config/lxc"))
392
393 for path in paths:
394 if os.path.exists(path):
395 crt = os.path.expanduser("{}/client.crt".format(path))
396 key = os.path.expanduser("{}/client.key".format(path))
397 if os.path.exists(crt) and os.path.exists(key):
398 return (crt, key)
399 return (None, None)
400
401
402 def find_n2vc_ssh_keys():
403 """Find the N2VC ssh keys."""
404
405 paths = []
406 paths.append(os.path.expanduser("~/.ssh/"))
407
408 for path in paths:
409 if os.path.exists(path):
410 private = os.path.expanduser("{}/id_n2vc_rsa".format(path))
411 public = os.path.expanduser("{}/id_n2vc_rsa.pub".format(path))
412 if os.path.exists(private) and os.path.exists(public):
413 return (private, public)
414 return (None, None)
415
416
417 def find_juju_ssh_keys():
418 """Find the Juju ssh keys."""
419
420 paths = []
421 paths.append(os.path.expanduser("~/.local/share/juju/ssh"))
422
423 for path in paths:
424 if os.path.exists(path):
425 private = os.path.expanduser("{}/juju_id_rsa".format(path))
426 public = os.path.expanduser("{}/juju_id_rsa.pub".format(path))
427 if os.path.exists(private) and os.path.exists(public):
428 return (private, public)
429 return (None, None)
430
431
432 def get_juju_private_key():
433 keys = find_juju_ssh_keys()
434 return keys[0]
435
436
437 def get_juju_public_key():
438 """Find the Juju public key."""
439 paths = []
440
441 if 'VCA_PATH' in os.environ:
442 paths.append("{}/ssh".format(os.environ["VCA_PATH"]))
443
444 paths.append(os.path.expanduser("~/.local/share/juju/ssh"))
445 paths.append("/root/.local/share/juju/ssh")
446
447 for path in paths:
448 if os.path.exists(path):
449 public = os.path.expanduser("{}/juju_id_rsa.pub".format(path))
450 if os.path.exists(public):
451 return public
452 return None
453
454
455 def get_lxd_client(host=None, port="8443", verify=False):
456 """ Get the LXD client."""
457
458 if host is None:
459 if 'LXD_HOST' in os.environ:
460 host = os.environ['LXD_HOST']
461 else:
462 host = '127.0.0.1'
463
464 passwd = None
465 if 'LXD_SECRET' in os.environ:
466 passwd = os.environ['LXD_SECRET']
467
468 # debug("Connecting to LXD remote {} w/authentication ({})".format(
469 # host,
470 # passwd
471 # ))
472 client = None
473 (crt, key) = find_lxd_config()
474
475 if crt and key:
476 client = pylxd.Client(
477 endpoint="https://{}:{}".format(host, port),
478 cert=(crt, key),
479 verify=verify,
480 )
481
482 # If the LXD server has a pasword set, authenticate with it.
483 if not client.trusted and passwd:
484 try:
485 client.authenticate(passwd)
486 if not client.trusted:
487 raise Exception("Unable to authenticate with LXD remote")
488 except pylxd.exceptions.LXDAPIException as ex:
489 if 'Certificate already in trust store' in ex:
490 pass
491
492 return client
493
494
495 # TODO: This is marked serial but can be run in parallel with work, including:
496 # - Fixing an event loop issue; seems that all tests stop when one test stops?
497
498
499 @pytest.mark.serial
500 class TestN2VC(object):
501 """TODO:
502 1. Validator Validation
503
504 Automatically validate the descriptors we're using here, unless the test
505 author explicitly wants to skip them. Useful to make sure tests aren't
506 being run against invalid descriptors, validating functionality that may
507 fail against a properly written descriptor.
508
509 We need to have a flag (instance variable) that controls this behavior. It
510 may be necessary to skip validation and run against a descriptor
511 implementing features that have not yet been released in the Information
512 Model.
513 """
514
515 """
516 The six phases of integration testing, for the test itself and each charm?:
517
518 setup/teardown_class:
519 1. Prepare - Verify the environment and create a new model
520 2. Deploy - Mark the test as ready to execute
521 3. Configure - Configuration to reach Active state
522 4. Test - Execute primitive(s) to verify success
523 5. Collect - Collect any useful artifacts for debugging (charm, logs)
524 6. Destroy - Destroy the model
525
526
527 1. Prepare - Building of charm
528 2. Deploy - Deploying charm
529 3. Configure - Configuration to reach Active state
530 4. Test - Execute primitive(s) to verify success
531 5. Collect - Collect any useful artifacts for debugging (charm, logs)
532 6. Destroy - Destroy the charm
533
534 """
535 @classmethod
536 def setup_class(self):
537 """ setup any state specific to the execution of the given class (which
538 usually contains tests).
539 """
540 # Initialize instance variable(s)
541 self.n2vc = None
542
543 # Track internal state for each test run
544 self.state = {}
545
546 # Parse the test's descriptors
547 self.nsd = get_descriptor(self.NSD_YAML)
548 self.vnfd = get_descriptor(self.VNFD_YAML)
549
550 self.ns_name = self.nsd['name']
551 self.vnf_name = self.vnfd['name']
552
553 self.charms = {}
554 self.parse_vnf_descriptor()
555 assert self.charms is not {}
556
557 # Track artifacts, like compiled charms, that will need to be removed
558 self.artifacts = {}
559
560 # Build the charm(s) needed for this test
561 for charm in self.get_charm_names():
562 # debug("Building charm {}".format(charm))
563 self.get_charm(charm)
564
565 # A bit of a hack, in order to allow the N2VC callback to run parallel
566 # to pytest. Test(s) should wait for this flag to change to False
567 # before returning.
568 self._running = True
569 self._stopping = False
570
571 @classmethod
572 def teardown_class(self):
573 """ teardown any state that was previously setup with a call to
574 setup_class.
575 """
576 debug("Running teardown_class...")
577 try:
578
579 debug("Destroying LXD containers...")
580 for application in self.state:
581 if self.state[application]['container']:
582 destroy_lxd_container(self.state[application]['container'])
583 debug("Destroying LXD containers...done.")
584
585 # Logout of N2VC
586 if self.n2vc:
587 debug("teardown_class(): Logging out of N2VC...")
588 yield from self.n2vc.logout()
589 debug("teardown_class(): Logging out of N2VC...done.")
590
591 debug("Running teardown_class...done.")
592 except Exception as ex:
593 debug("Exception in teardown_class: {}".format(ex))
594
595 @classmethod
596 def all_charms_active(self):
597 """Determine if the all deployed charms are active."""
598 active = 0
599
600 for application in self.state:
601 if 'status' in self.state[application]:
602 debug("status of {} is '{}'".format(
603 application,
604 self.state[application]['status'],
605 ))
606 if self.state[application]['status'] == 'active':
607 active += 1
608
609 debug("Active charms: {}/{}".format(
610 active,
611 len(self.charms),
612 ))
613
614 if active == len(self.charms):
615 return True
616
617 return False
618
619 @classmethod
620 def are_tests_finished(self):
621 appcount = len(self.state)
622
623 # If we don't have state yet, keep running.
624 if appcount == 0:
625 debug("No applications")
626 return False
627
628 if self._stopping:
629 debug("_stopping is True")
630 return True
631
632 appdone = 0
633 for application in self.state:
634 if self.state[application]['done']:
635 appdone += 1
636
637 debug("{}/{} charms tested".format(appdone, appcount))
638
639 if appcount == appdone:
640 return True
641
642 return False
643
644 @classmethod
645 async def running(self, timeout=600):
646 """Returns if the test is still running.
647
648 @param timeout The time, in seconds, to wait for the test to complete.
649 """
650 if self.are_tests_finished():
651 await self.stop()
652 return False
653
654 await asyncio.sleep(30)
655
656 return self._running
657
658 @classmethod
659 def get_charm(self, charm):
660 """Build and return the path to the test charm.
661
662 Builds one of the charms in tests/charms/layers and returns the path
663 to the compiled charm. The charm will automatically be removed when
664 when the test is complete.
665
666 Returns: The path to the built charm or None if `charm build` failed.
667 """
668 # Make sure the charm snap is installed
669 charm_cmd = None
670 try:
671 subprocess.check_call(['which', 'charm'])
672 charm_cmd = "charm build"
673 except subprocess.CalledProcessError:
674 # charm_cmd = "charm-build"
675 # debug("Using legacy charm-build")
676 raise Exception("charm snap not installed.")
677
678 if charm not in self.artifacts:
679 try:
680 # Note: This builds the charm under N2VC/tests/charms/builds/
681 # Currently, the snap-installed command only has write access
682 # to the $HOME (changing in an upcoming release) so writing to
683 # /tmp isn't possible at the moment.
684
685 builds = get_charm_path()
686 if not os.path.exists("{}/builds/{}".format(builds, charm)):
687 cmd = "{} --no-local-layers {}/{} -o {}/".format(
688 charm_cmd,
689 get_layer_path(),
690 charm,
691 builds,
692 )
693 # debug(cmd)
694
695 env = os.environ.copy()
696 env["CHARM_BUILD_DIR"] = builds
697
698 subprocess.check_call(shlex.split(cmd), env=env)
699
700 except subprocess.CalledProcessError as e:
701 # charm build will return error code 100 if the charm fails
702 # the auto-run of charm proof, which we can safely ignore for
703 # our CI charms.
704 if e.returncode != 100:
705 raise Exception("charm build failed: {}.".format(e))
706
707 self.artifacts[charm] = {
708 'tmpdir': builds,
709 'charm': "{}/builds/{}".format(builds, charm),
710 }
711
712 return self.artifacts[charm]['charm']
713
714 @classmethod
715 async def deploy(self, vnf_index, charm, params, loop):
716 """An inner function to do the deployment of a charm from
717 either a vdu or vnf.
718 """
719
720 if not self.n2vc:
721 self.n2vc = get_n2vc(loop=loop)
722
723 debug("Creating model for Network Service {}".format(self.ns_name))
724 await self.n2vc.CreateNetworkService(self.ns_name)
725
726 application = self.n2vc.FormatApplicationName(
727 self.ns_name,
728 self.vnf_name,
729 str(vnf_index),
730 )
731
732 # Initialize the state of the application
733 self.state[application] = {
734 'status': None, # Juju status
735 'container': None, # lxd container, for proxy charms
736 'actions': {}, # Actions we've executed
737 'done': False, # Are we done testing this charm?
738 'phase': "deploy", # What phase is this application in?
739 }
740
741 debug("Deploying charm at {}".format(self.artifacts[charm]))
742
743 # If this is a native charm, we need to provision the underlying
744 # machine ala an LXC container.
745 machine_spec = {}
746
747 if not self.isproxy(application):
748 debug("Creating container for native charm")
749 # args = ("default", application, None, None)
750 self.state[application]['container'] = create_lxd_container(
751 name=os.path.basename(__file__)
752 )
753
754 hostname = self.get_container_ip(
755 self.state[application]['container'],
756 )
757
758 machine_spec = {
759 'hostname': hostname,
760 'username': 'ubuntu',
761 }
762
763 await self.n2vc.DeployCharms(
764 self.ns_name,
765 application,
766 self.vnfd,
767 self.get_charm(charm),
768 params,
769 machine_spec,
770 self.n2vc_callback,
771 )
772
773 @classmethod
774 def parse_vnf_descriptor(self):
775 """Parse the VNF descriptor to make running tests easier.
776
777 Parse the charm information in the descriptor to make it easy to write
778 tests to run again it.
779
780 Each charm becomes a dictionary in a list:
781 [
782 'is-proxy': True,
783 'vnf-member-index': 1,
784 'vnf-name': '',
785 'charm-name': '',
786 'initial-config-primitive': {},
787 'config-primitive': {}
788 ]
789 - charm name
790 - is this a proxy charm?
791 - what are the initial-config-primitives (day 1)?
792 - what are the config primitives (day 2)?
793
794 """
795 charms = {}
796
797 # You'd think this would be explicit, but it's just an incremental
798 # value that should be consistent.
799 vnf_member_index = 0
800
801 """Get all vdu and/or vdu config in a descriptor."""
802 config = self.get_config()
803 for cfg in config:
804 if 'juju' in cfg:
805
806 # Get the name to be used for the deployed application
807 application_name = n2vc.vnf.N2VC().FormatApplicationName(
808 self.ns_name,
809 self.vnf_name,
810 str(vnf_member_index),
811 )
812
813 charm = {
814 'application-name': application_name,
815 'proxy': True,
816 'vnf-member-index': vnf_member_index,
817 'vnf-name': self.vnf_name,
818 'name': None,
819 'initial-config-primitive': {},
820 'config-primitive': {},
821 }
822
823 juju = cfg['juju']
824 charm['name'] = juju['charm']
825
826 if 'proxy' in juju:
827 charm['proxy'] = juju['proxy']
828
829 if 'initial-config-primitive' in cfg:
830 charm['initial-config-primitive'] = \
831 cfg['initial-config-primitive']
832
833 if 'config-primitive' in cfg:
834 charm['config-primitive'] = cfg['config-primitive']
835
836 charms[application_name] = charm
837
838 # Increment the vnf-member-index
839 vnf_member_index += 1
840
841 self.charms = charms
842
843 @classmethod
844 def isproxy(self, application_name):
845
846 assert application_name in self.charms
847 assert 'proxy' in self.charms[application_name]
848 assert type(self.charms[application_name]['proxy']) is bool
849
850 # debug(self.charms[application_name])
851 return self.charms[application_name]['proxy']
852
853 @classmethod
854 def get_config(self):
855 """Return an iterable list of config items (vdu and vnf).
856
857 As far as N2VC is concerned, the config section for vdu and vnf are
858 identical. This joins them together so tests only need to iterate
859 through one list.
860 """
861 configs = []
862
863 """Get all vdu and/or vdu config in a descriptor."""
864 vnf_config = self.vnfd.get("vnf-configuration")
865 if vnf_config:
866 juju = vnf_config['juju']
867 if juju:
868 configs.append(vnf_config)
869
870 for vdu in self.vnfd['vdu']:
871 vdu_config = vdu.get('vdu-configuration')
872 if vdu_config:
873 juju = vdu_config['juju']
874 if juju:
875 configs.append(vdu_config)
876
877 return configs
878
879 @classmethod
880 def get_charm_names(self):
881 """Return a list of charms used by the test descriptor."""
882
883 charms = {}
884
885 # Check if the VDUs in this VNF have a charm
886 for config in self.get_config():
887 juju = config['juju']
888
889 name = juju['charm']
890 if name not in charms:
891 charms[name] = 1
892
893 return charms.keys()
894
895 @classmethod
896 def get_phase(self, application):
897 return self.state[application]['phase']
898
899 @classmethod
900 def set_phase(self, application, phase):
901 self.state[application]['phase'] = phase
902
903 @classmethod
904 async def configure_proxy_charm(self, *args):
905 """Configure a container for use via ssh."""
906 (model, application, _, _) = args
907
908 try:
909 if self.get_phase(application) == "deploy":
910 self.set_phase(application, "configure")
911
912 debug("Start CreateContainer for {}".format(application))
913 self.state[application]['container'] = \
914 await self.CreateContainer(*args)
915 debug("Done CreateContainer for {}".format(application))
916
917 if self.state[application]['container']:
918 debug("Configure {} for container".format(application))
919 if await self.configure_ssh_proxy(application):
920 await asyncio.sleep(0.1)
921 return True
922 else:
923 debug("Failed to configure container for {}".format(application))
924 else:
925 debug("skipping CreateContainer for {}: {}".format(
926 application,
927 self.get_phase(application),
928 ))
929
930 except Exception as ex:
931 debug("configure_proxy_charm exception: {}".format(ex))
932 finally:
933 await asyncio.sleep(0.1)
934
935 return False
936
937 @classmethod
938 async def execute_charm_tests(self, *args):
939 (model, application, _, _) = args
940
941 debug("Executing charm test(s) for {}".format(application))
942
943 if self.state[application]['done']:
944 debug("Trying to execute tests against finished charm...aborting")
945 return False
946
947 try:
948 phase = self.get_phase(application)
949 # We enter the test phase when after deploy (for native charms) or
950 # configure, for proxy charms.
951 if phase in ["deploy", "configure"]:
952 self.set_phase(application, "test")
953 if self.are_tests_finished():
954 raise Exception("Trying to execute init-config on finished test")
955
956 if await self.execute_initial_config_primitives(application):
957 # check for metrics
958 await self.check_metrics(application)
959
960 debug("Done testing {}".format(application))
961 self.state[application]['done'] = True
962
963 except Exception as ex:
964 debug("Exception in execute_charm_tests: {}".format(ex))
965 finally:
966 await asyncio.sleep(0.1)
967
968 return True
969
970 @classmethod
971 async def CreateContainer(self, *args):
972 """Create a LXD container for use with a proxy charm.abs
973
974 1. Get the public key from the charm via `get-ssh-public-key` action
975 2. Create container with said key injected for the ubuntu user
976
977 Returns a Container object
978 """
979 # Create and configure a LXD container for use with a proxy charm.
980 (model, application, _, _) = args
981
982 debug("[CreateContainer] {}".format(args))
983 container = None
984
985 try:
986 # Execute 'get-ssh-public-key' primitive and get returned value
987 uuid = await self.n2vc.ExecutePrimitive(
988 model,
989 application,
990 "get-ssh-public-key",
991 None,
992 )
993
994 result = await self.n2vc.GetPrimitiveOutput(model, uuid)
995 pubkey = result['pubkey']
996
997 container = create_lxd_container(
998 public_key=pubkey,
999 name=os.path.basename(__file__)
1000 )
1001
1002 return container
1003 except Exception as ex:
1004 debug("Error creating container: {}".format(ex))
1005 pass
1006
1007 return None
1008
1009 @classmethod
1010 async def stop(self):
1011 """Stop the test.
1012
1013 - Remove charms
1014 - Stop and delete containers
1015 - Logout of N2VC
1016
1017 TODO: Clean up duplicate code between teardown_class() and stop()
1018 """
1019 debug("stop() called")
1020
1021 if self.n2vc and self._running and not self._stopping:
1022 self._running = False
1023 self._stopping = True
1024
1025 # Destroy the network service
1026 try:
1027 await self.n2vc.DestroyNetworkService(self.ns_name)
1028 except Exception as e:
1029 debug(
1030 "Error Destroying Network Service \"{}\": {}".format(
1031 self.ns_name,
1032 e,
1033 )
1034 )
1035
1036 # Wait for the applications to be removed and delete the containers
1037 for application in self.charms:
1038 try:
1039
1040 while True:
1041 # Wait for the application to be removed
1042 await asyncio.sleep(10)
1043 if not await self.n2vc.HasApplication(
1044 self.ns_name,
1045 application,
1046 ):
1047 break
1048
1049 # Need to wait for the charm to finish, because native charms
1050 if self.state[application]['container']:
1051 debug("Deleting LXD container...")
1052 destroy_lxd_container(
1053 self.state[application]['container']
1054 )
1055 self.state[application]['container'] = None
1056 debug("Deleting LXD container...done.")
1057 else:
1058 debug("No container found for {}".format(application))
1059 except Exception as e:
1060 debug("Error while deleting container: {}".format(e))
1061
1062 # Logout of N2VC
1063 try:
1064 debug("stop(): Logging out of N2VC...")
1065 await self.n2vc.logout()
1066 self.n2vc = None
1067 debug("stop(): Logging out of N2VC...Done.")
1068 except Exception as ex:
1069 debug(ex)
1070
1071 # Let the test know we're finished.
1072 debug("Marking test as finished.")
1073 # self._running = False
1074 else:
1075 debug("Skipping stop()")
1076
1077 @classmethod
1078 def get_container_ip(self, container):
1079 """Return the IPv4 address of container's eth0 interface."""
1080 ipaddr = None
1081 if container:
1082 addresses = container.state().network['eth0']['addresses']
1083 # The interface may have more than one address, but we only need
1084 # the first one for testing purposes.
1085 ipaddr = addresses[0]['address']
1086
1087 return ipaddr
1088
1089 @classmethod
1090 async def configure_ssh_proxy(self, application, task=None):
1091 """Configure the proxy charm to use the lxd container.
1092
1093 Configure the charm to use a LXD container as it's VNF.
1094 """
1095 debug("Configuring ssh proxy for {}".format(application))
1096
1097 mgmtaddr = self.get_container_ip(
1098 self.state[application]['container'],
1099 )
1100
1101 debug(
1102 "Setting ssh-hostname for {} to {}".format(
1103 application,
1104 mgmtaddr,
1105 )
1106 )
1107
1108 await self.n2vc.ExecutePrimitive(
1109 self.ns_name,
1110 application,
1111 "config",
1112 None,
1113 params={
1114 'ssh-hostname': mgmtaddr,
1115 'ssh-username': 'ubuntu',
1116 }
1117 )
1118
1119 return True
1120
1121 @classmethod
1122 async def execute_initial_config_primitives(self, application, task=None):
1123 debug("Executing initial_config_primitives for {}".format(application))
1124 try:
1125 init_config = self.charms[application]
1126
1127 """
1128 The initial-config-primitive is run during deploy but may fail
1129 on some steps because proxy charm access isn't configured.
1130
1131 Re-run those actions so we can inspect the status.
1132 """
1133 uuids = await self.n2vc.ExecuteInitialPrimitives(
1134 self.ns_name,
1135 application,
1136 init_config,
1137 )
1138
1139 """
1140 ExecutePrimitives will return a list of uuids. We need to check the
1141 status of each. The test continues if all Actions succeed, and
1142 fails if any of them fail.
1143 """
1144 await self.wait_for_uuids(application, uuids)
1145 debug("Primitives for {} finished.".format(application))
1146
1147 return True
1148 except Exception as ex:
1149 debug("execute_initial_config_primitives exception: {}".format(ex))
1150 raise ex
1151
1152 return False
1153
1154 @classmethod
1155 async def check_metrics(self, application, task=None):
1156 """Check and run metrics, if present.
1157
1158 Checks to see if metrics are specified by the charm. If so, collects
1159 the metrics.
1160
1161 If no metrics, then mark the test as finished.
1162 """
1163 if has_metrics(self.charms[application]['name']):
1164 debug("Collecting metrics for {}".format(application))
1165
1166 metrics = await self.n2vc.GetMetrics(
1167 self.ns_name,
1168 application,
1169 )
1170
1171 return await self.verify_metrics(application, metrics)
1172
1173 @classmethod
1174 async def verify_metrics(self, application, metrics):
1175 """Verify the charm's metrics.
1176
1177 Verify that the charm has sent metrics successfully.
1178
1179 Stops the test when finished.
1180 """
1181 debug("Verifying metrics for {}: {}".format(application, metrics))
1182
1183 if len(metrics):
1184 return True
1185
1186 else:
1187 # TODO: Ran into a case where it took 9 attempts before metrics
1188 # were available; the controller is slow sometimes.
1189 await asyncio.sleep(30)
1190 return await self.check_metrics(application)
1191
1192 @classmethod
1193 async def wait_for_uuids(self, application, uuids):
1194 """Wait for primitives to execute.
1195
1196 The task will provide a list of uuids representing primitives that are
1197 queued to run.
1198 """
1199 debug("Waiting for uuids for {}: {}".format(application, uuids))
1200 waitfor = len(uuids)
1201 finished = 0
1202
1203 while waitfor > finished:
1204 for uid in uuids:
1205 await asyncio.sleep(10)
1206
1207 if uuid not in self.state[application]['actions']:
1208 self.state[application]['actions'][uid] = "pending"
1209
1210 status = self.state[application]['actions'][uid]
1211
1212 # Have we already marked this as done?
1213 if status in ["pending", "running"]:
1214
1215 debug("Getting status of {} ({})...".format(uid, status))
1216 status = await self.n2vc.GetPrimitiveStatus(
1217 self.ns_name,
1218 uid,
1219 )
1220 debug("...state of {} is {}".format(uid, status))
1221 self.state[application]['actions'][uid] = status
1222
1223 if status in ['completed', 'failed']:
1224 finished += 1
1225
1226 debug("{}/{} actions complete".format(finished, waitfor))
1227
1228 # Wait for the primitive to finish and try again
1229 if waitfor > finished:
1230 debug("Waiting 10s for action to finish...")
1231 await asyncio.sleep(10)
1232
1233 @classmethod
1234 def n2vc_callback(self, *args, **kwargs):
1235 (model, application, status, message) = args
1236 # debug("callback: {}".format(args))
1237
1238 if application not in self.state:
1239 # Initialize the state of the application
1240 self.state[application] = {
1241 'status': None, # Juju status
1242 'container': None, # lxd container, for proxy charms
1243 'actions': {}, # Actions we've executed
1244 'done': False, # Are we done testing this charm?
1245 'phase': "deploy", # What phase is this application in?
1246 }
1247
1248 self.state[application]['status'] = status
1249
1250 if status in ['waiting', 'maintenance', 'unknown']:
1251 # Nothing to do for these
1252 return
1253
1254 debug("callback: {}".format(args))
1255
1256 if self.state[application]['done']:
1257 debug("{} is done".format(application))
1258 return
1259
1260 if status in ['error']:
1261 # To test broken charms, if a charm enters an error state we should
1262 # end the test
1263 debug("{} is in an error state, stop the test.".format(application))
1264 # asyncio.ensure_future(self.stop())
1265 self.state[application]['done'] = True
1266 assert False
1267
1268 if status in ["blocked"] and self.isproxy(application):
1269 if self.state[application]['phase'] == "deploy":
1270 debug("Configuring proxy charm for {}".format(application))
1271 asyncio.ensure_future(self.configure_proxy_charm(*args))
1272
1273 elif status in ["active"]:
1274 """When a charm is active, we can assume that it has been properly
1275 configured (not blocked), regardless of if it's a proxy or not.
1276
1277 All primitives should be complete by init_config_primitive
1278 """
1279 asyncio.ensure_future(self.execute_charm_tests(*args))