18 from juju
.controller
import Controller
20 # Disable InsecureRequestWarning w/LXD
22 urllib3
.disable_warnings()
23 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
25 here
= os
.path
.dirname(os
.path
.realpath(__file__
))
28 def is_bootstrapped():
29 result
= subprocess
.run(['juju', 'switch'], stdout
=subprocess
.PIPE
)
31 result
.returncode
== 0 and
32 len(result
.stdout
.decode().strip()) > 0)
35 bootstrapped
= pytest
.mark
.skipif(
36 not is_bootstrapped(),
37 reason
='bootstrapped Juju environment required')
40 class CleanController():
42 Context manager that automatically connects and disconnects from
43 the currently active controller.
45 Note: Unlike CleanModel, this will not create a new controller for you,
46 and an active controller must already be available.
49 self
._controller
= None
51 async def __aenter__(self
):
52 self
._controller
= Controller()
53 await self
._controller
.connect()
54 return self
._controller
56 async def __aexit__(self
, exc_type
, exc
, tb
):
57 await self
._controller
.disconnect()
61 return "{}/charms".format(here
)
65 return "{}/charms/layers".format(here
)
68 def parse_metrics(application
, results
):
69 """Parse the returned metrics into a dict."""
71 # We'll receive the results for all units, to look for the one we want
72 # Caveat: we're grabbing results from the first unit of the application,
73 # which is enough for testing, since we're only deploying a single unit.
76 if unit
.startswith(application
):
77 for result
in results
[unit
]:
78 retval
[result
['key']] = result
['value']
82 def collect_metrics(application
):
83 """Invoke Juju's metrics collector.
85 Caveat: this shells out to the `juju collect-metrics` command, rather than
86 making an API call. At the time of writing, that API is not exposed through
91 subprocess
.check_call(['juju', 'collect-metrics', application
])
92 except subprocess
.CalledProcessError
as e
:
93 raise Exception("Unable to collect metrics: {}".format(e
))
96 def has_metrics(charm
):
97 """Check if a charm has metrics defined."""
98 metricsyaml
= "{}/{}/metrics.yaml".format(
102 if os
.path
.exists(metricsyaml
):
107 def get_descriptor(descriptor
):
110 tmp
= yaml
.load(descriptor
)
112 # Remove the envelope
113 root
= list(tmp
.keys())[0]
114 if root
== "nsd:nsd-catalog":
115 desc
= tmp
['nsd:nsd-catalog']['nsd'][0]
116 elif root
== "vnfd:vnfd-catalog":
117 desc
= tmp
['vnfd:vnfd-catalog']['vnfd'][0]
123 def get_n2vc(loop
=None):
124 """Return an instance of N2VC.VNF."""
125 log
= logging
.getLogger()
126 log
.level
= logging
.DEBUG
128 # Running under tox/pytest makes getting env variables harder.
130 # Extract parameters from the environment in order to run our test
131 vca_host
= os
.getenv('VCA_HOST', '127.0.0.1')
132 vca_port
= os
.getenv('VCA_PORT', 17070)
133 vca_user
= os
.getenv('VCA_USER', 'admin')
134 vca_charms
= os
.getenv('VCA_CHARMS', None)
135 vca_secret
= os
.getenv('VCA_SECRET', None)
137 client
= n2vc
.vnf
.N2VC(
143 artifacts
=vca_charms
,
149 def create_lxd_container(public_key
=None, name
="test_name"):
151 Returns a container object
153 If public_key isn't set, we'll use the Juju ssh key
155 :param public_key: The public key to inject into the container
156 :param name: The name of the test being run
160 # Format name so it's valid
161 name
= name
.replace("_", "-").replace(".", "")
163 client
= get_lxd_client()
164 test_machine
= "test-{}-{}".format(
165 uuid
.uuid4().hex[-4:],
169 private_key_path
, public_key_path
= find_juju_ssh_keys()
171 # create profile w/cloud-init and juju ssh key
174 with
open(public_key_path
, "r") as f
:
175 public_key
= f
.readline()
177 client
.profiles
.create(
180 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key
)},
182 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
184 'nictype': 'bridged',
193 'name': test_machine
,
198 'protocol': 'simplestreams',
199 'server': 'https://cloud-images.ubuntu.com/releases',
201 'profiles': [test_machine
],
203 container
= client
.containers
.create(config
, wait
=True)
204 container
.start(wait
=True)
206 def wait_for_network(container
, timeout
=30):
207 """Wait for eth0 to have an ipv4 address."""
208 starttime
= time
.time()
209 while(time
.time() < starttime
+ timeout
):
211 if 'eth0' in container
.state().network
:
212 addresses
= container
.state().network
['eth0']['addresses']
213 if len(addresses
) > 0:
214 if addresses
[0]['family'] == 'inet':
218 wait_for_network(container
)
220 # HACK: We need to give sshd a chance to bind to the interface,
221 # and pylxd's container.execute seems to be broken and fails and/or
222 # hangs trying to properly check if the service is up.
229 def destroy_lxd_container(container
):
230 """Stop and delete a LXD container."""
231 name
= container
.name
232 client
= get_lxd_client()
234 def wait_for_stop(timeout
=30):
235 """Wait for eth0 to have an ipv4 address."""
236 starttime
= time
.time()
237 while(time
.time() < starttime
+ timeout
):
239 if container
.state
== "Stopped":
242 def wait_for_delete(timeout
=30):
243 starttime
= time
.time()
244 while(time
.time() < starttime
+ timeout
):
246 if client
.containers
.exists(name
) is False:
249 container
.stop(wait
=False)
252 container
.delete(wait
=False)
255 # Delete the profile created for this container
256 profile
= client
.profiles
.get(name
)
261 def find_lxd_config():
262 """Find the LXD configuration directory."""
264 paths
.append(os
.path
.expanduser("~/.config/lxc"))
265 paths
.append(os
.path
.expanduser("~/snap/lxd/current/.config/lxc"))
268 if os
.path
.exists(path
):
269 crt
= os
.path
.expanduser("{}/client.crt".format(path
))
270 key
= os
.path
.expanduser("{}/client.key".format(path
))
271 if os
.path
.exists(crt
) and os
.path
.exists(key
):
276 def find_juju_ssh_keys():
277 """Find the Juju ssh keys."""
280 paths
.append(os
.path
.expanduser("~/.local/share/juju/ssh/"))
283 if os
.path
.exists(path
):
284 private
= os
.path
.expanduser("{}/juju_id_rsa".format(path
))
285 public
= os
.path
.expanduser("{}/juju_id_rsa.pub".format(path
))
286 if os
.path
.exists(private
) and os
.path
.exists(public
):
287 return (private
, public
)
291 def get_juju_private_key():
292 keys
= find_juju_ssh_keys()
296 def get_lxd_client(host
="127.0.0.1", port
="8443", verify
=False):
297 """ Get the LXD client."""
299 (crt
, key
) = find_lxd_config()
302 client
= pylxd
.Client(
303 endpoint
="https://{}:{}".format(host
, port
),
310 # TODO: This is marked serial but can be run in parallel with work, including:
311 # - Fixing an event loop issue; seems that all tests stop when one test stops?
315 class TestN2VC(object):
317 1. Validator Validation
319 Automatically validate the descriptors we're using here, unless the test author explicitly wants to skip them. Useful to make sure tests aren't being run against invalid descriptors, validating functionality that may fail against a properly written descriptor.
321 We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model.
325 def setup_class(self
):
326 """ setup any state specific to the execution of the given class (which
327 usually contains tests).
329 # Initialize instance variable(s)
330 # self.container = None
332 # Track internal state for each test run
335 # Parse the test's descriptors
336 self
.nsd
= get_descriptor(self
.NSD_YAML
)
337 self
.vnfd
= get_descriptor(self
.VNFD_YAML
)
339 self
.ns_name
= self
.nsd
['name']
340 self
.vnf_name
= self
.vnfd
['name']
343 self
.parse_vnf_descriptor()
344 assert self
.charms
is not {}
346 # Track artifacts, like compiled charms, that will need to be removed
349 # Build the charm(s) needed for this test
350 for charm
in self
.get_charm_names():
351 self
.get_charm(charm
)
353 # A bit of a hack, in order to allow the N2VC callback to run parallel
354 # to pytest. Test(s) should wait for this flag to change to False
359 def teardown_class(self
):
360 """ teardown any state that was previously setup with a call to
363 for application
in self
.state
:
365 "Destroying container for application {}".format(application
)
367 if self
.state
[application
]['container']:
368 destroy_lxd_container(self
.state
[application
]['container'])
370 # Clean up any artifacts created during the test
371 logging
.debug("Artifacts: {}".format(self
.artifacts
))
372 for charm
in self
.artifacts
:
373 artifact
= self
.artifacts
[charm
]
374 if os
.path
.exists(artifact
['tmpdir']):
375 logging
.debug("Removing directory '{}'".format(artifact
))
376 shutil
.rmtree(artifact
['tmpdir'])
380 asyncio
.ensure_future(self
.n2vc
.logout())
381 logging
.debug("Tearing down")
385 def all_charms_active(self
):
386 """Determine if the all deployed charms are active."""
388 for application
in self
.charms
:
389 if self
.charms
[application
]['status'] == 'active':
392 if active
== len(self
.charms
):
393 logging
.warn("All charms active!")
399 def running(self
, timeout
=600):
400 """Returns if the test is still running.
402 @param timeout The time, in seconds, to wait for the test to complete.
405 # if start + now > start > timeout:
410 def get_charm(self
, charm
):
411 """Build and return the path to the test charm.
413 Builds one of the charms in tests/charms/layers and returns the path
414 to the compiled charm. The charm will automatically be removed when
415 when the test is complete.
417 Returns: The path to the built charm or None if `charm build` failed.
420 # Make sure the charm snap is installed
422 subprocess
.check_call(['which', 'charm'])
423 except subprocess
.CalledProcessError
as e
:
424 raise Exception("charm snap not installed.")
426 if charm
not in self
.artifacts
:
428 # Note: This builds the charm under N2VC/tests/charms/
429 # The snap-installed command only has write access to the users $HOME
430 # so writing to /tmp isn't possible at the moment.
431 builds
= tempfile
.mkdtemp(dir=get_charm_path())
433 cmd
= "charm build {}/{} -o {}/".format(
440 subprocess
.check_call(shlex
.split(cmd
))
442 self
.artifacts
[charm
] = {
444 'charm': "{}/builds/{}".format(builds
, charm
),
446 except subprocess
.CalledProcessError
as e
:
447 raise Exception("charm build failed: {}.".format(e
))
449 return self
.artifacts
[charm
]['charm']
452 async def deploy(self
, vnf_index
, charm
, params
, loop
):
453 """An inner function to do the deployment of a charm from
457 self
.n2vc
= get_n2vc(loop
=loop
)
459 vnf_name
= self
.n2vc
.FormatApplicationName(
464 logging
.debug("Deploying charm at {}".format(self
.artifacts
[charm
]))
466 await self
.n2vc
.DeployCharms(
470 self
.get_charm(charm
),
477 def parse_vnf_descriptor(self
):
478 """Parse the VNF descriptor to make running tests easier.
480 Parse the charm information in the descriptor to make it easy to write
481 tests to run again it.
483 Each charm becomes a dictionary in a list:
486 'vnf-member-index': 1,
489 'initial-config-primitive': {},
490 'config-primitive': {}
493 - is this a proxy charm?
494 - what are the initial-config-primitives (day 1)?
495 - what are the config primitives (day 2)?
500 # You'd think this would be explicit, but it's just an incremental
501 # value that should be consistent.
504 """Get all vdu and/or vdu config in a descriptor."""
505 config
= self
.get_config()
509 # Get the name to be used for the deployed application
510 application_name
= n2vc
.vnf
.N2VC().FormatApplicationName(
513 str(vnf_member_index
),
517 'application-name': application_name
,
519 'vnf-member-index': vnf_member_index
,
520 'vnf-name': self
.vnf_name
,
522 'initial-config-primitive': {},
523 'config-primitive': {},
527 charm
['name'] = juju
['charm']
530 charm
['proxy'] = juju
['proxy']
532 if 'initial-config-primitive' in cfg
:
533 charm
['initial-config-primitive'] = \
534 cfg
['initial-config-primitive']
536 if 'config-primitive' in cfg
:
537 charm
['config-primitive'] = cfg
['config-primitive']
539 charms
[application_name
] = charm
541 # Increment the vnf-member-index
542 vnf_member_index
+= 1
547 def isproxy(self
, application_name
):
549 assert application_name
in self
.charms
550 assert 'proxy' in self
.charms
[application_name
]
551 assert type(self
.charms
[application_name
]['proxy']) is bool
553 # logging.debug(self.charms[application_name])
554 return self
.charms
[application_name
]['proxy']
557 def get_config(self
):
558 """Return an iterable list of config items (vdu and vnf).
560 As far as N2VC is concerned, the config section for vdu and vnf are
561 identical. This joins them together so tests only need to iterate
566 """Get all vdu and/or vdu config in a descriptor."""
567 vnf_config
= self
.vnfd
.get("vnf-configuration")
569 juju
= vnf_config
['juju']
571 configs
.append(vnf_config
)
573 for vdu
in self
.vnfd
['vdu']:
574 vdu_config
= vdu
.get('vdu-configuration')
576 juju
= vdu_config
['juju']
578 configs
.append(vdu_config
)
583 def get_charm_names(self
):
584 """Return a list of charms used by the test descriptor."""
588 # Check if the VDUs in this VNF have a charm
589 for config
in self
.get_config():
590 juju
= config
['juju']
593 if name
not in charms
:
599 async def CreateContainer(self
, *args
):
600 """Create a LXD container for use with a proxy charm.abs
602 1. Get the public key from the charm via `get-ssh-public-key` action
603 2. Create container with said key injected for the ubuntu user
605 # Create and configure a LXD container for use with a proxy charm.
606 (model
, application
, _
, _
) = args
607 # self.state[application_name]
609 print("trying to create container")
610 if self
.state
[application
]['container'] is None:
612 "Creating container for application {}".format(application
)
614 # HACK: Set this so the n2vc_callback knows
615 # there's a container being created
616 self
.state
[application
]['container'] = True
618 # Execute 'get-ssh-public-key' primitive and get returned value
619 uuid
= await self
.n2vc
.ExecutePrimitive(
622 "get-ssh-public-key",
625 result
= await self
.n2vc
.GetPrimitiveOutput(model
, uuid
)
626 pubkey
= result
['pubkey']
628 self
.state
[application
]['container'] = create_lxd_container(
630 name
=os
.path
.basename(__file__
)
633 return self
.state
[application
]['container']
640 - Stop and delete containers
643 logging
.warning("Stop the test.")
645 for application
in self
.charms
:
647 logging
.warn("Removing charm")
648 await self
.n2vc
.RemoveCharms(model
, application
)
651 "Destroying container for application {}".format(application
)
653 if self
.state
[application
]['container']:
654 destroy_lxd_container(self
.state
[application
]['container'])
655 except Exception as e
:
656 logging
.warn("Error while deleting container: {}".format(e
))
658 # Clean up any artifacts created during the test
659 logging
.debug("Artifacts: {}".format(self
.artifacts
))
660 for charm
in self
.artifacts
:
661 artifact
= self
.artifacts
[charm
]
662 if os
.path
.exists(artifact
['tmpdir']):
663 logging
.debug("Removing directory '{}'".format(artifact
))
664 shutil
.rmtree(artifact
['tmpdir'])
667 await self
.n2vc
.logout()
670 self
._running
= False
673 def get_container_ip(self
, container
):
674 """Return the IPv4 address of container's eth0 interface."""
677 addresses
= container
.state().network
['eth0']['addresses']
678 # The interface may have more than one address, but we only need
679 # the first one for testing purposes.
680 ipaddr
= addresses
[0]['address']
685 def n2vc_callback(self
, *args
, **kwargs
):
686 """Monitor and react to changes in the charm state.
688 This is where we will monitor the state of the charm:
691 - is it waiting on input to continue?
693 When the state changes, we respond appropriately:
694 - configuring ssh credentials for a proxy charm
695 - running a service primitive
697 Lastly, when the test has finished we begin the teardown, removing the
698 charm and associated LXD container, and notify pytest that this test
701 Args are expected to contain four values, received from N2VC:
702 - str, the name of the model
703 - str, the name of the application
704 - str, the workload status as reported by Juju
705 - str, the workload message as reported by Juju
707 (model
, application
, status
, message
) = args
708 # logging.warn("Callback for {}/{} - {} ({})".format(
715 if application
not in self
.state
:
716 # Initialize the state of the application
717 self
.state
[application
] = {
722 # Make sure we're only receiving valid status. This will catch charms
723 # that aren't setting their workload state and appear as "unknown"
724 # assert status not in ["active", "blocked", "waiting", "maintenance"]
727 if kwargs
and 'task' in kwargs
:
728 task
= kwargs
['task']
729 # logging.debug("Got task: {}".format(task))
731 # if application in self.charms:
732 self
.state
[application
]['status'] = status
734 # Closures and inner functions, oh my.
736 """Is the charm in an active state?"""
737 if status
in ["active"]:
742 """Is the charm waiting for us?"""
743 if status
in ["blocked"]:
747 def configure_ssh_proxy(task
):
748 """Configure the proxy charm to use the lxd container."""
749 logging
.debug("configure_ssh_proxy({})".format(task
))
751 mgmtaddr
= self
.get_container_ip(
752 self
.state
[application
]['container'],
756 "Setting config ssh-hostname={}".format(mgmtaddr
)
759 # task = asyncio.ensure_future(
764 task
= asyncio
.ensure_future(
765 self
.n2vc
.ExecutePrimitive(
771 'ssh-hostname': mgmtaddr
,
772 'ssh-username': 'ubuntu',
777 # Execute the VNFD's 'initial-config-primitive'
778 task
.add_done_callback(functools
.partial(
779 execute_initial_config_primitives
,
782 def execute_initial_config_primitives(task
=None):
783 logging
.debug("execute_initial_config_primitives({})".format(task
))
785 init_config
= self
.charms
[application
]
788 The initial-config-primitive is run during deploy but may fail
789 on some steps because proxy charm access isn't configured.
791 At this stage, we'll re-run those actions.
794 task
= asyncio
.ensure_future(
795 self
.n2vc
.ExecuteInitialPrimitives(
803 ExecutePrimitives will return a list of uuids. We need to check the
804 status of each. The test continues if all Actions succeed, and
805 fails if any of them fail.
807 task
.add_done_callback(functools
.partial(wait_for_uuids
))
810 task
= asyncio
.ensure_future(
811 self
.n2vc
.GetMetrics(
817 task
.add_done_callback(
823 def verify_metrics(task
):
824 logging
.debug("Verifying metrics!")
825 # Check if task returned metrics
826 results
= task
.result()
828 metrics
= parse_metrics(application
, results
)
829 logging
.debug(metrics
)
832 logging
.warn("[metrics] removing charms")
833 task
= asyncio
.ensure_future(
834 self
.n2vc
.RemoveCharms(model
, application
)
837 task
.add_done_callback(functools
.partial(self
.stop
))
840 # TODO: Ran into a case where it took 9 attempts before metrics
841 # were available; the controller is slow sometimes.
845 def wait_for_uuids(task
):
846 logging
.debug("wait_for_uuids({})".format(task
))
847 uuids
= task
.result()
852 def get_primitive_result(uuid
, task
):
853 logging
.debug("Got result from action")
854 # completed, failed, or running
855 result
= task
.result()
857 if status
in result
and result
['status'] \
858 in ["completed", "failed"]:
861 logging
.debug("Action {} is {}".format(
863 task
.result
['status'])
867 logging
.debug("action is still running")
869 def get_primitive_status(uuid
, task
):
870 result
= task
.result()
872 if result
== "completed":
873 # Make sure all primitives are finished
877 if waitfor
== finished
:
878 if self
.all_charms_active():
879 logging
.debug("Action complete; removing charm")
880 task
= asyncio
.ensure_future(
883 # task = asyncio.ensure_future(
884 # self.n2vc.RemoveCharms(model, application)
886 # task.add_done_callback(functools.partial(stop_test))
888 logging
.warn("Not all charms in an active state.")
889 elif result
== "failed":
890 logging
.debug("Action failed; removing charm")
891 task
= asyncio
.ensure_future(
894 # task = asyncio.ensure_future(
895 # self.n2vc.RemoveCharms(model, application)
897 # task.add_done_callback(functools.partial(stop_test))
900 # self._running = False
903 # logging.debug("action is still running: {}".format(result))
904 # logging.debug(result)
906 # The primitive is running; try again.
907 task
= asyncio
.ensure_future(
908 self
.n2vc
.GetPrimitiveStatus(model
, uuid
)
910 task
.add_done_callback(functools
.partial(
911 get_primitive_result
,
915 for actionid
in uuids
:
916 task
= asyncio
.ensure_future(
917 self
.n2vc
.GetPrimitiveStatus(model
, actionid
)
919 task
.add_done_callback(functools
.partial(
920 get_primitive_result
,
924 # def stop_test(task):
927 # When the test has either succeeded or reached a failing state,
928 # begin the process of removing the test fixtures.
930 # for application in self.charms:
931 # asyncio.ensure_future(
932 # self.n2vc.RemoveCharms(model, application)
935 # self._running = False
938 # Container only applies to proxy charms.
939 if self
.isproxy(application
):
941 if self
.state
[application
]['container'] is None:
942 logging
.warn("Creating new container")
943 # Create the new LXD container
945 task
= asyncio
.ensure_future(self
.CreateContainer(*args
))
947 # Configure the proxy charm to use the container when ready
948 task
.add_done_callback(functools
.partial(
952 # task.add_done_callback(functools.partial(
955 # create_lxd_container()
956 # self.container = True
958 # logging.warn("{} already has container".format(application))
960 # task = asyncio.ensure_future(
961 # self.n2vc.RemoveCharms(model, application)
963 # task.add_done_callback(functools.partial(stop_test))
966 # A charm may validly be in a blocked state if it's waiting for
967 # relations or some other kind of manual intervention
968 # logging.debug("This is not a proxy charm.")
969 # TODO: needs testing
970 task
= asyncio
.ensure_future(
971 execute_initial_config_primitives()
974 # task.add_done_callback(functools.partial(stop_test))
977 # Does the charm have metrics defined?
978 if has_metrics(self
.charms
[application
]['name']):
979 # logging.debug("metrics.yaml defined in the layer!")
981 # Force a run of the metric collector, so we don't have
982 # to wait for it's normal 5 minute interval run.
983 # NOTE: this shouldn't be done outside of CI
984 collect_metrics(application
)
986 # get the current metrics
989 # When the charm reaches an active state and hasn't been
990 # handled (metrics collection, etc)., the test has succeded.
991 # logging.debug("Charm is active! Removing charm...")
992 if self
.all_charms_active():
993 logging
.warn("All charms active!")
994 task
= asyncio
.ensure_future(
998 # task = asyncio.ensure_future(
999 # self.n2vc.RemoveCharms(model, application)
1001 # task.add_done_callback(functools.partial(stop_test))
1003 logging
.warning("Waiting for all charms to be active.")
1004 # task = asyncio.ensure_future(
1005 # self.n2vc.RemoveCharms(model, application)
1007 # task.add_done_callback(functools.partial(stop_test))