15 from juju
.controller
import Controller
17 # Disable InsecureRequestWarning w/LXD
19 urllib3
.disable_warnings()
20 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
22 here
= os
.path
.dirname(os
.path
.realpath(__file__
))
25 class CleanController():
27 Context manager that automatically connects and disconnects from
28 the currently active controller.
30 Note: Unlike CleanModel, this will not create a new controller for you,
31 and an active controller must already be available.
34 self
._controller
= None
36 async def __aenter__(self
):
37 self
._controller
= Controller()
38 await self
._controller
.connect()
39 return self
._controller
41 async def __aexit__(self
, exc_type
, exc
, tb
):
42 await self
._controller
.disconnect()
46 """Format debug messages in a consistent way."""
47 now
= datetime
.datetime
.now()
49 # TODO: Decide on the best way to log. Output from `logging.debug` shows up
50 # when a test fails, but print() will always show up when running tox with
51 # `-s`, which is really useful for debugging single tests without having to
52 # insert a False assert to see the log.
54 "[{}] {}".format(now
.strftime('%Y-%m-%dT%H:%M:%S'), msg
)
57 "[{}] {}".format(now
.strftime('%Y-%m-%dT%H:%M:%S'), msg
)
62 return "{}/charms".format(here
)
66 return "{}/charms/layers".format(here
)
69 def collect_metrics(application
):
70 """Invoke Juju's metrics collector.
72 Caveat: this shells out to the `juju collect-metrics` command, rather than
73 making an API call. At the time of writing, that API is not exposed through
78 subprocess
.check_call(['juju', 'collect-metrics', application
])
79 except subprocess
.CalledProcessError
as e
:
80 raise Exception("Unable to collect metrics: {}".format(e
))
83 def has_metrics(charm
):
84 """Check if a charm has metrics defined."""
85 metricsyaml
= "{}/{}/metrics.yaml".format(
89 if os
.path
.exists(metricsyaml
):
94 def get_descriptor(descriptor
):
97 tmp
= yaml
.load(descriptor
)
100 root
= list(tmp
.keys())[0]
101 if root
== "nsd:nsd-catalog":
102 desc
= tmp
['nsd:nsd-catalog']['nsd'][0]
103 elif root
== "vnfd:vnfd-catalog":
104 desc
= tmp
['vnfd:vnfd-catalog']['vnfd'][0]
110 def get_n2vc(loop
=None):
111 """Return an instance of N2VC.VNF."""
112 log
= logging
.getLogger()
113 log
.level
= logging
.DEBUG
115 # Extract parameters from the environment in order to run our test
116 vca_host
= os
.getenv('VCA_HOST', '127.0.0.1')
117 vca_port
= os
.getenv('VCA_PORT', 17070)
118 vca_user
= os
.getenv('VCA_USER', 'admin')
119 vca_charms
= os
.getenv('VCA_CHARMS', None)
120 vca_secret
= os
.getenv('VCA_SECRET', None)
122 # Get the Juju Public key
123 juju_public_key
= get_juju_public_key()
125 debug("Reading Juju public key @ {}".format(juju_public_key
))
126 with
open(juju_public_key
, 'r') as f
:
127 juju_public_key
= f
.read()
128 debug("Found public key: {}".format(juju_public_key
))
130 raise Exception("No Juju Public Key found")
133 # os.path.expanduser("~/.config/lxc")
134 # with open("{}/agent.conf".format(AGENT_PATH), "r") as f:
136 # y = yaml.safe_load(f)
137 # self.cacert = y['cacert']
138 # except yaml.YAMLError as exc:
139 # log("Unable to find Juju ca-cert.")
142 client
= n2vc
.vnf
.N2VC(
148 artifacts
=vca_charms
,
150 juju_public_key
=juju_public_key
,
155 def create_lxd_container(public_key
=None, name
="test_name"):
157 Returns a container object
159 If public_key isn't set, we'll use the Juju ssh key
161 :param public_key: The public key to inject into the container
162 :param name: The name of the test being run
166 # Format name so it's valid
167 name
= name
.replace("_", "-").replace(".", "")
169 client
= get_lxd_client()
171 raise Exception("Unable to connect to LXD")
173 test_machine
= "test-{}-{}".format(
174 uuid
.uuid4().hex[-4:],
178 private_key_path
, public_key_path
= find_n2vc_ssh_keys()
181 # create profile w/cloud-init and juju ssh key
184 with
open(public_key_path
, "r") as f
:
185 public_key
= f
.readline()
187 client
.profiles
.create(
190 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key
)},
192 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
194 'nictype': 'bridged',
200 except Exception as ex
:
201 debug("Error creating lxd profile {}: {}".format(test_machine
, ex
))
207 'name': test_machine
,
212 'protocol': 'simplestreams',
213 'server': 'https://cloud-images.ubuntu.com/releases',
215 'profiles': [test_machine
],
217 container
= client
.containers
.create(config
, wait
=True)
218 container
.start(wait
=True)
219 except Exception as ex
:
220 debug("Error creating lxd container {}: {}".format(test_machine
, ex
))
221 # This is a test-ending failure.
224 def wait_for_network(container
, timeout
=30):
225 """Wait for eth0 to have an ipv4 address."""
226 starttime
= time
.time()
227 while(time
.time() < starttime
+ timeout
):
229 if 'eth0' in container
.state().network
:
230 addresses
= container
.state().network
['eth0']['addresses']
231 if len(addresses
) > 0:
232 if addresses
[0]['family'] == 'inet':
237 wait_for_network(container
)
238 except Exception as ex
:
240 "Error waiting for container {} network: {}".format(
248 while waitcount
<= 5:
249 if is_sshd_running(container
):
254 debug("couldn't detect sshd running")
255 raise Exception("Unable to verify container sshd")
257 except Exception as ex
:
259 "Error checking sshd status on {}: {}".format(
265 # HACK: We need to give sshd a chance to bind to the interface,
266 # and pylxd's container.execute seems to be broken and fails and/or
267 # hangs trying to properly check if the service is up.
268 (exit_code
, stdout
, stderr
) = container
.execute([
270 '-c', '5', # Wait for 5 ECHO_REPLY
271 '8.8.8.8', # Ping Google's public DNS
272 '-W', '15', # Set a 15 second deadline
276 raise Exception("Unable to verify container network")
281 def is_sshd_running(container
):
282 """Check if sshd is running in the container.
284 Check to see if the sshd process is running and listening on port 22.
286 :param container: The container to check
287 :return boolean: True if sshd is running.
289 debug("Container: {}".format(container
))
291 (rc
, stdout
, stderr
) = container
.execute(
292 ["service", "ssh", "status"]
294 # If the status is a) found and b) running, the exit code will be 0
297 except Exception as ex
:
298 debug("Failed to check sshd service status: {}".format(ex
))
303 def destroy_lxd_container(container
):
304 """Stop and delete a LXD container.
306 Sometimes we see errors talking to LXD -- ephemerial issues like
307 load or a bug that's killed the API. We'll do our best to clean
308 up here, and we should run a cleanup after all tests are finished
309 to remove any extra containers and profiles belonging to us.
312 if type(container
) is bool:
315 name
= container
.name
316 debug("Destroying container {}".format(name
))
318 client
= get_lxd_client()
320 def wait_for_stop(timeout
=30):
321 """Wait for eth0 to have an ipv4 address."""
322 starttime
= time
.time()
323 while(time
.time() < starttime
+ timeout
):
325 if container
.state
== "Stopped":
328 def wait_for_delete(timeout
=30):
329 starttime
= time
.time()
330 while(time
.time() < starttime
+ timeout
):
332 if client
.containers
.exists(name
) is False:
336 container
.stop(wait
=False)
338 except Exception as ex
:
340 "Error stopping container {}: {}".format(
347 container
.delete(wait
=False)
349 except Exception as ex
:
351 "Error deleting container {}: {}".format(
358 # Delete the profile created for this container
359 profile
= client
.profiles
.get(name
)
362 except Exception as ex
:
364 "Error deleting profile {}: {}".format(
371 def find_lxd_config():
372 """Find the LXD configuration directory."""
374 paths
.append(os
.path
.expanduser("~/.config/lxc"))
375 paths
.append(os
.path
.expanduser("~/snap/lxd/current/.config/lxc"))
378 if os
.path
.exists(path
):
379 crt
= os
.path
.expanduser("{}/client.crt".format(path
))
380 key
= os
.path
.expanduser("{}/client.key".format(path
))
381 if os
.path
.exists(crt
) and os
.path
.exists(key
):
386 def find_n2vc_ssh_keys():
387 """Find the N2VC ssh keys."""
390 paths
.append(os
.path
.expanduser("~/.ssh/"))
393 if os
.path
.exists(path
):
394 private
= os
.path
.expanduser("{}/id_n2vc_rsa".format(path
))
395 public
= os
.path
.expanduser("{}/id_n2vc_rsa.pub".format(path
))
396 if os
.path
.exists(private
) and os
.path
.exists(public
):
397 return (private
, public
)
401 def find_juju_ssh_keys():
402 """Find the Juju ssh keys."""
405 paths
.append(os
.path
.expanduser("~/.local/share/juju/ssh"))
408 if os
.path
.exists(path
):
409 private
= os
.path
.expanduser("{}/juju_id_rsa".format(path
))
410 public
= os
.path
.expanduser("{}/juju_id_rsa.pub".format(path
))
411 if os
.path
.exists(private
) and os
.path
.exists(public
):
412 return (private
, public
)
416 def get_juju_private_key():
417 keys
= find_juju_ssh_keys()
421 def get_juju_public_key():
422 """Find the Juju public key."""
425 if 'VCA_PATH' in os
.environ
:
426 paths
.append("{}/ssh".format(os
.environ
["VCA_PATH"]))
428 paths
.append(os
.path
.expanduser("~/.local/share/juju/ssh"))
429 paths
.append("/root/.local/share/juju/ssh")
432 if os
.path
.exists(path
):
433 public
= os
.path
.expanduser("{}/juju_id_rsa.pub".format(path
))
434 if os
.path
.exists(public
):
439 def get_lxd_client(host
=None, port
="8443", verify
=False):
440 """ Get the LXD client."""
443 if 'LXD_HOST' in os
.environ
:
444 host
= os
.environ
['LXD_HOST']
449 if 'LXD_SECRET' in os
.environ
:
450 passwd
= os
.environ
['LXD_SECRET']
452 # debug("Connecting to LXD remote {} w/authentication ({})".format(
457 (crt
, key
) = find_lxd_config()
460 client
= pylxd
.Client(
461 endpoint
="https://{}:{}".format(host
, port
),
466 # If the LXD server has a pasword set, authenticate with it.
467 if not client
.trusted
and passwd
:
469 client
.authenticate(passwd
)
470 if not client
.trusted
:
471 raise Exception("Unable to authenticate with LXD remote")
472 except pylxd
.exceptions
.LXDAPIException
as ex
:
473 if 'Certificate already in trust store' in ex
:
479 # TODO: This is marked serial but can be run in parallel with work, including:
480 # - Fixing an event loop issue; seems that all tests stop when one test stops?
484 class TestN2VC(object):
486 1. Validator Validation
488 Automatically validate the descriptors we're using here, unless the test
489 author explicitly wants to skip them. Useful to make sure tests aren't
490 being run against invalid descriptors, validating functionality that may
491 fail against a properly written descriptor.
493 We need to have a flag (instance variable) that controls this behavior. It
494 may be necessary to skip validation and run against a descriptor
495 implementing features that have not yet been released in the Information
500 The six phases of integration testing, for the test itself and each charm?:
502 setup/teardown_class:
503 1. Prepare - Verify the environment and create a new model
504 2. Deploy - Mark the test as ready to execute
505 3. Configure - Configuration to reach Active state
506 4. Test - Execute primitive(s) to verify success
507 5. Collect - Collect any useful artifacts for debugging (charm, logs)
508 6. Destroy - Destroy the model
511 1. Prepare - Building of charm
512 2. Deploy - Deploying charm
513 3. Configure - Configuration to reach Active state
514 4. Test - Execute primitive(s) to verify success
515 5. Collect - Collect any useful artifacts for debugging (charm, logs)
516 6. Destroy - Destroy the charm
520 def setup_class(self
):
521 """ setup any state specific to the execution of the given class (which
522 usually contains tests).
524 # Initialize instance variable(s)
527 # Track internal state for each test run
530 # Parse the test's descriptors
531 self
.nsd
= get_descriptor(self
.NSD_YAML
)
532 self
.vnfd
= get_descriptor(self
.VNFD_YAML
)
534 self
.ns_name
= self
.nsd
['name']
535 self
.vnf_name
= self
.vnfd
['name']
538 self
.parse_vnf_descriptor()
539 assert self
.charms
is not {}
541 # Track artifacts, like compiled charms, that will need to be removed
544 # Build the charm(s) needed for this test
545 for charm
in self
.get_charm_names():
546 # debug("Building charm {}".format(charm))
547 self
.get_charm(charm
)
549 # A bit of a hack, in order to allow the N2VC callback to run parallel
550 # to pytest. Test(s) should wait for this flag to change to False
553 self
._stopping
= False
556 def teardown_class(self
):
557 """ teardown any state that was previously setup with a call to
560 debug("Running teardown_class...")
563 debug("Destroying LXD containers...")
564 for application
in self
.state
:
565 if self
.state
[application
]['container']:
566 destroy_lxd_container(self
.state
[application
]['container'])
567 debug("Destroying LXD containers...done.")
571 debug("teardown_class(): Logging out of N2VC...")
572 yield from self
.n2vc
.logout()
573 debug("teardown_class(): Logging out of N2VC...done.")
575 debug("Running teardown_class...done.")
576 except Exception as ex
:
577 debug("Exception in teardown_class: {}".format(ex
))
580 def all_charms_active(self
):
581 """Determine if the all deployed charms are active."""
584 for application
in self
.state
:
585 if 'status' in self
.state
[application
]:
586 debug("status of {} is '{}'".format(
588 self
.state
[application
]['status'],
590 if self
.state
[application
]['status'] == 'active':
593 debug("Active charms: {}/{}".format(
598 if active
== len(self
.charms
):
604 def are_tests_finished(self
):
605 appcount
= len(self
.state
)
607 # If we don't have state yet, keep running.
609 debug("No applications")
613 debug("_stopping is True")
617 for application
in self
.state
:
618 if self
.state
[application
]['done']:
621 debug("{}/{} charms tested".format(appdone
, appcount
))
623 if appcount
== appdone
:
629 async def running(self
, timeout
=600):
630 """Returns if the test is still running.
632 @param timeout The time, in seconds, to wait for the test to complete.
634 if self
.are_tests_finished():
638 await asyncio
.sleep(30)
643 def get_charm(self
, charm
):
644 """Build and return the path to the test charm.
646 Builds one of the charms in tests/charms/layers and returns the path
647 to the compiled charm. The charm will automatically be removed when
648 when the test is complete.
650 Returns: The path to the built charm or None if `charm build` failed.
652 # Make sure the charm snap is installed
655 subprocess
.check_call(['which', 'charm'])
656 charm_cmd
= "charm build"
657 except subprocess
.CalledProcessError
:
658 # charm_cmd = "charm-build"
659 # debug("Using legacy charm-build")
660 raise Exception("charm snap not installed.")
662 if charm
not in self
.artifacts
:
664 # Note: This builds the charm under N2VC/tests/charms/builds/
665 # Currently, the snap-installed command only has write access
666 # to the $HOME (changing in an upcoming release) so writing to
667 # /tmp isn't possible at the moment.
669 builds
= get_charm_path()
670 if not os
.path
.exists("{}/builds/{}".format(builds
, charm
)):
671 cmd
= "{} --no-local-layers {}/{} -o {}/".format(
679 env
= os
.environ
.copy()
680 env
["CHARM_BUILD_DIR"] = builds
682 subprocess
.check_call(shlex
.split(cmd
), env
=env
)
684 except subprocess
.CalledProcessError
as e
:
685 # charm build will return error code 100 if the charm fails
686 # the auto-run of charm proof, which we can safely ignore for
688 if e
.returncode
!= 100:
689 raise Exception("charm build failed: {}.".format(e
))
691 self
.artifacts
[charm
] = {
693 'charm': "{}/builds/{}".format(builds
, charm
),
696 return self
.artifacts
[charm
]['charm']
699 async def deploy(self
, vnf_index
, charm
, params
, loop
):
700 """An inner function to do the deployment of a charm from
705 self
.n2vc
= get_n2vc(loop
=loop
)
707 debug("Creating model for Network Service {}".format(self
.ns_name
))
708 await self
.n2vc
.CreateNetworkService(self
.ns_name
)
710 application
= self
.n2vc
.FormatApplicationName(
716 # Initialize the state of the application
717 self
.state
[application
] = {
718 'status': None, # Juju status
719 'container': None, # lxd container, for proxy charms
720 'actions': {}, # Actions we've executed
721 'done': False, # Are we done testing this charm?
722 'phase': "deploy", # What phase is this application in?
725 debug("Deploying charm at {}".format(self
.artifacts
[charm
]))
727 # If this is a native charm, we need to provision the underlying
728 # machine ala an LXC container.
731 if not self
.isproxy(application
):
732 debug("Creating container for native charm")
733 # args = ("default", application, None, None)
734 self
.state
[application
]['container'] = create_lxd_container(
735 name
=os
.path
.basename(__file__
)
738 hostname
= self
.get_container_ip(
739 self
.state
[application
]['container'],
743 'hostname': hostname
,
744 'username': 'ubuntu',
747 await self
.n2vc
.DeployCharms(
751 self
.get_charm(charm
),
758 def parse_vnf_descriptor(self
):
759 """Parse the VNF descriptor to make running tests easier.
761 Parse the charm information in the descriptor to make it easy to write
762 tests to run again it.
764 Each charm becomes a dictionary in a list:
767 'vnf-member-index': 1,
770 'initial-config-primitive': {},
771 'config-primitive': {}
774 - is this a proxy charm?
775 - what are the initial-config-primitives (day 1)?
776 - what are the config primitives (day 2)?
781 # You'd think this would be explicit, but it's just an incremental
782 # value that should be consistent.
785 """Get all vdu and/or vdu config in a descriptor."""
786 config
= self
.get_config()
790 # Get the name to be used for the deployed application
791 application_name
= n2vc
.vnf
.N2VC().FormatApplicationName(
794 str(vnf_member_index
),
798 'application-name': application_name
,
800 'vnf-member-index': vnf_member_index
,
801 'vnf-name': self
.vnf_name
,
803 'initial-config-primitive': {},
804 'config-primitive': {},
808 charm
['name'] = juju
['charm']
811 charm
['proxy'] = juju
['proxy']
813 if 'initial-config-primitive' in cfg
:
814 charm
['initial-config-primitive'] = \
815 cfg
['initial-config-primitive']
817 if 'config-primitive' in cfg
:
818 charm
['config-primitive'] = cfg
['config-primitive']
820 charms
[application_name
] = charm
822 # Increment the vnf-member-index
823 vnf_member_index
+= 1
828 def isproxy(self
, application_name
):
830 assert application_name
in self
.charms
831 assert 'proxy' in self
.charms
[application_name
]
832 assert type(self
.charms
[application_name
]['proxy']) is bool
834 # debug(self.charms[application_name])
835 return self
.charms
[application_name
]['proxy']
838 def get_config(self
):
839 """Return an iterable list of config items (vdu and vnf).
841 As far as N2VC is concerned, the config section for vdu and vnf are
842 identical. This joins them together so tests only need to iterate
847 """Get all vdu and/or vdu config in a descriptor."""
848 vnf_config
= self
.vnfd
.get("vnf-configuration")
850 juju
= vnf_config
['juju']
852 configs
.append(vnf_config
)
854 for vdu
in self
.vnfd
['vdu']:
855 vdu_config
= vdu
.get('vdu-configuration')
857 juju
= vdu_config
['juju']
859 configs
.append(vdu_config
)
864 def get_charm_names(self
):
865 """Return a list of charms used by the test descriptor."""
869 # Check if the VDUs in this VNF have a charm
870 for config
in self
.get_config():
871 juju
= config
['juju']
874 if name
not in charms
:
880 def get_phase(self
, application
):
881 return self
.state
[application
]['phase']
884 def set_phase(self
, application
, phase
):
885 self
.state
[application
]['phase'] = phase
888 async def configure_proxy_charm(self
, *args
):
889 """Configure a container for use via ssh."""
890 (model
, application
, _
, _
) = args
893 if self
.get_phase(application
) == "deploy":
894 self
.set_phase(application
, "configure")
896 debug("Start CreateContainer for {}".format(application
))
897 self
.state
[application
]['container'] = \
898 await self
.CreateContainer(*args
)
899 debug("Done CreateContainer for {}".format(application
))
901 if self
.state
[application
]['container']:
902 debug("Configure {} for container".format(application
))
903 if await self
.configure_ssh_proxy(application
):
904 await asyncio
.sleep(0.1)
907 debug("Failed to configure container for {}".format(application
))
909 debug("skipping CreateContainer for {}: {}".format(
911 self
.get_phase(application
),
914 except Exception as ex
:
915 debug("configure_proxy_charm exception: {}".format(ex
))
917 await asyncio
.sleep(0.1)
922 async def execute_charm_tests(self
, *args
):
923 (model
, application
, _
, _
) = args
925 debug("Executing charm test(s) for {}".format(application
))
927 if self
.state
[application
]['done']:
928 debug("Trying to execute tests against finished charm...aborting")
932 phase
= self
.get_phase(application
)
933 # We enter the test phase when after deploy (for native charms) or
934 # configure, for proxy charms.
935 if phase
in ["deploy", "configure"]:
936 self
.set_phase(application
, "test")
937 if self
.are_tests_finished():
938 raise Exception("Trying to execute init-config on finished test")
940 if await self
.execute_initial_config_primitives(application
):
942 await self
.check_metrics(application
)
944 debug("Done testing {}".format(application
))
945 self
.state
[application
]['done'] = True
947 except Exception as ex
:
948 debug("Exception in execute_charm_tests: {}".format(ex
))
950 await asyncio
.sleep(0.1)
955 async def CreateContainer(self
, *args
):
956 """Create a LXD container for use with a proxy charm.abs
958 1. Get the public key from the charm via `get-ssh-public-key` action
959 2. Create container with said key injected for the ubuntu user
961 Returns a Container object
963 # Create and configure a LXD container for use with a proxy charm.
964 (model
, application
, _
, _
) = args
966 debug("[CreateContainer] {}".format(args
))
970 # Execute 'get-ssh-public-key' primitive and get returned value
971 uuid
= await self
.n2vc
.ExecutePrimitive(
974 "get-ssh-public-key",
978 result
= await self
.n2vc
.GetPrimitiveOutput(model
, uuid
)
979 pubkey
= result
['pubkey']
981 container
= create_lxd_container(
983 name
=os
.path
.basename(__file__
)
987 except Exception as ex
:
988 debug("Error creating container: {}".format(ex
))
994 async def stop(self
):
998 - Stop and delete containers
1001 TODO: Clean up duplicate code between teardown_class() and stop()
1003 debug("stop() called")
1005 if self
.n2vc
and self
._running
and not self
._stopping
:
1006 self
._running
= False
1007 self
._stopping
= True
1009 # Destroy the network service
1011 await self
.n2vc
.DestroyNetworkService(self
.ns_name
)
1012 except Exception as e
:
1014 "Error Destroying Network Service \"{}\": {}".format(
1020 # Wait for the applications to be removed and delete the containers
1021 for application
in self
.charms
:
1025 # Wait for the application to be removed
1026 await asyncio
.sleep(10)
1027 if not await self
.n2vc
.HasApplication(
1033 # Need to wait for the charm to finish, because native charms
1034 if self
.state
[application
]['container']:
1035 debug("Deleting LXD container...")
1036 destroy_lxd_container(
1037 self
.state
[application
]['container']
1039 self
.state
[application
]['container'] = None
1040 debug("Deleting LXD container...done.")
1042 debug("No container found for {}".format(application
))
1043 except Exception as e
:
1044 debug("Error while deleting container: {}".format(e
))
1048 debug("stop(): Logging out of N2VC...")
1049 await self
.n2vc
.logout()
1051 debug("stop(): Logging out of N2VC...Done.")
1052 except Exception as ex
:
1055 # Let the test know we're finished.
1056 debug("Marking test as finished.")
1057 # self._running = False
1059 debug("Skipping stop()")
1062 def get_container_ip(self
, container
):
1063 """Return the IPv4 address of container's eth0 interface."""
1066 addresses
= container
.state().network
['eth0']['addresses']
1067 # The interface may have more than one address, but we only need
1068 # the first one for testing purposes.
1069 ipaddr
= addresses
[0]['address']
1074 async def configure_ssh_proxy(self
, application
, task
=None):
1075 """Configure the proxy charm to use the lxd container.
1077 Configure the charm to use a LXD container as it's VNF.
1079 debug("Configuring ssh proxy for {}".format(application
))
1081 mgmtaddr
= self
.get_container_ip(
1082 self
.state
[application
]['container'],
1086 "Setting ssh-hostname for {} to {}".format(
1092 await self
.n2vc
.ExecutePrimitive(
1098 'ssh-hostname': mgmtaddr
,
1099 'ssh-username': 'ubuntu',
1106 async def execute_initial_config_primitives(self
, application
, task
=None):
1107 debug("Executing initial_config_primitives for {}".format(application
))
1109 init_config
= self
.charms
[application
]
1112 The initial-config-primitive is run during deploy but may fail
1113 on some steps because proxy charm access isn't configured.
1115 Re-run those actions so we can inspect the status.
1117 uuids
= await self
.n2vc
.ExecuteInitialPrimitives(
1124 ExecutePrimitives will return a list of uuids. We need to check the
1125 status of each. The test continues if all Actions succeed, and
1126 fails if any of them fail.
1128 await self
.wait_for_uuids(application
, uuids
)
1129 debug("Primitives for {} finished.".format(application
))
1132 except Exception as ex
:
1133 debug("execute_initial_config_primitives exception: {}".format(ex
))
1139 async def check_metrics(self
, application
, task
=None):
1140 """Check and run metrics, if present.
1142 Checks to see if metrics are specified by the charm. If so, collects
1145 If no metrics, then mark the test as finished.
1147 if has_metrics(self
.charms
[application
]['name']):
1148 debug("Collecting metrics for {}".format(application
))
1150 metrics
= await self
.n2vc
.GetMetrics(
1155 return await self
.verify_metrics(application
, metrics
)
1158 async def verify_metrics(self
, application
, metrics
):
1159 """Verify the charm's metrics.
1161 Verify that the charm has sent metrics successfully.
1163 Stops the test when finished.
1165 debug("Verifying metrics for {}: {}".format(application
, metrics
))
1171 # TODO: Ran into a case where it took 9 attempts before metrics
1172 # were available; the controller is slow sometimes.
1173 await asyncio
.sleep(30)
1174 return await self
.check_metrics(application
)
1177 async def wait_for_uuids(self
, application
, uuids
):
1178 """Wait for primitives to execute.
1180 The task will provide a list of uuids representing primitives that are
1183 debug("Waiting for uuids for {}: {}".format(application
, uuids
))
1184 waitfor
= len(uuids
)
1187 while waitfor
> finished
:
1189 await asyncio
.sleep(10)
1191 if uuid
not in self
.state
[application
]['actions']:
1192 self
.state
[application
]['actions'][uid
] = "pending"
1194 status
= self
.state
[application
]['actions'][uid
]
1196 # Have we already marked this as done?
1197 if status
in ["pending", "running"]:
1199 debug("Getting status of {} ({})...".format(uid
, status
))
1200 status
= await self
.n2vc
.GetPrimitiveStatus(
1204 debug("...state of {} is {}".format(uid
, status
))
1205 self
.state
[application
]['actions'][uid
] = status
1207 if status
in ['completed', 'failed']:
1210 debug("{}/{} actions complete".format(finished
, waitfor
))
1212 # Wait for the primitive to finish and try again
1213 if waitfor
> finished
:
1214 debug("Waiting 10s for action to finish...")
1215 await asyncio
.sleep(10)
1218 def n2vc_callback(self
, *args
, **kwargs
):
1219 (model
, application
, status
, message
) = args
1220 # debug("callback: {}".format(args))
1222 if application
not in self
.state
:
1223 # Initialize the state of the application
1224 self
.state
[application
] = {
1225 'status': None, # Juju status
1226 'container': None, # lxd container, for proxy charms
1227 'actions': {}, # Actions we've executed
1228 'done': False, # Are we done testing this charm?
1229 'phase': "deploy", # What phase is this application in?
1232 self
.state
[application
]['status'] = status
1234 if status
in ['waiting', 'maintenance', 'unknown']:
1235 # Nothing to do for these
1238 debug("callback: {}".format(args
))
1240 if self
.state
[application
]['done']:
1241 debug("{} is done".format(application
))
1244 if status
in ['error']:
1245 # To test broken charms, if a charm enters an error state we should
1247 debug("{} is in an error state, stop the test.".format(application
))
1248 # asyncio.ensure_future(self.stop())
1249 self
.state
[application
]['done'] = True
1252 if status
in ["blocked"] and self
.isproxy(application
):
1253 if self
.state
[application
]['phase'] == "deploy":
1254 debug("Configuring proxy charm for {}".format(application
))
1255 asyncio
.ensure_future(self
.configure_proxy_charm(*args
))
1257 elif status
in ["active"]:
1258 """When a charm is active, we can assume that it has been properly
1259 configured (not blocked), regardless of if it's a proxy or not.
1261 All primitives should be complete by init_config_primitive
1263 asyncio
.ensure_future(self
.execute_charm_tests(*args
))