18 from juju
.controller
import Controller
20 # Disable InsecureRequestWarning w/LXD
22 urllib3
.disable_warnings()
23 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
25 here
= os
.path
.dirname(os
.path
.realpath(__file__
))
28 def is_bootstrapped():
29 result
= subprocess
.run(['juju', 'switch'], stdout
=subprocess
.PIPE
)
31 result
.returncode
== 0 and
32 len(result
.stdout
.decode().strip()) > 0)
35 bootstrapped
= pytest
.mark
.skipif(
36 not is_bootstrapped(),
37 reason
='bootstrapped Juju environment required')
40 class CleanController():
42 Context manager that automatically connects and disconnects from
43 the currently active controller.
45 Note: Unlike CleanModel, this will not create a new controller for you,
46 and an active controller must already be available.
49 self
._controller
= None
51 async def __aenter__(self
):
52 self
._controller
= Controller()
53 await self
._controller
.connect()
54 return self
._controller
56 async def __aexit__(self
, exc_type
, exc
, tb
):
57 await self
._controller
.disconnect()
61 return "{}/charms".format(here
)
65 return "{}/charms/layers".format(here
)
68 def parse_metrics(application
, results
):
69 """Parse the returned metrics into a dict."""
71 # We'll receive the results for all units, to look for the one we want
72 # Caveat: we're grabbing results from the first unit of the application,
73 # which is enough for testing, since we're only deploying a single unit.
76 if unit
.startswith(application
):
77 for result
in results
[unit
]:
78 retval
[result
['key']] = result
['value']
82 def collect_metrics(application
):
83 """Invoke Juju's metrics collector.
85 Caveat: this shells out to the `juju collect-metrics` command, rather than
86 making an API call. At the time of writing, that API is not exposed through
91 subprocess
.check_call(['juju', 'collect-metrics', application
])
92 except subprocess
.CalledProcessError
as e
:
93 raise Exception("Unable to collect metrics: {}".format(e
))
96 def has_metrics(charm
):
97 """Check if a charm has metrics defined."""
98 metricsyaml
= "{}/{}/metrics.yaml".format(
102 if os
.path
.exists(metricsyaml
):
107 def get_descriptor(descriptor
):
110 tmp
= yaml
.load(descriptor
)
112 # Remove the envelope
113 root
= list(tmp
.keys())[0]
114 if root
== "nsd:nsd-catalog":
115 desc
= tmp
['nsd:nsd-catalog']['nsd'][0]
116 elif root
== "vnfd:vnfd-catalog":
117 desc
= tmp
['vnfd:vnfd-catalog']['vnfd'][0]
123 def get_n2vc(loop
=None):
124 """Return an instance of N2VC.VNF."""
125 log
= logging
.getLogger()
126 log
.level
= logging
.DEBUG
128 # Running under tox/pytest makes getting env variables harder.
130 # Extract parameters from the environment in order to run our test
131 vca_host
= os
.getenv('VCA_HOST', '127.0.0.1')
132 vca_port
= os
.getenv('VCA_PORT', 17070)
133 vca_user
= os
.getenv('VCA_USER', 'admin')
134 vca_charms
= os
.getenv('VCA_CHARMS', None)
135 vca_secret
= os
.getenv('VCA_SECRET', None)
137 client
= n2vc
.vnf
.N2VC(
143 artifacts
=vca_charms
,
149 def create_lxd_container(public_key
=None, name
="test_name"):
151 Returns a container object
153 If public_key isn't set, we'll use the Juju ssh key
155 :param public_key: The public key to inject into the container
156 :param name: The name of the test being run
160 # Format name so it's valid
161 name
= name
.replace("_", "-").replace(".", "")
163 client
= get_lxd_client()
164 test_machine
= "test-{}-{}".format(
165 uuid
.uuid4().hex[-4:],
169 private_key_path
, public_key_path
= find_juju_ssh_keys()
171 # create profile w/cloud-init and juju ssh key
174 with
open(public_key_path
, "r") as f
:
175 public_key
= f
.readline()
177 client
.profiles
.create(
180 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key
)},
182 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
184 'nictype': 'bridged',
193 'name': test_machine
,
198 'protocol': 'simplestreams',
199 'server': 'https://cloud-images.ubuntu.com/releases',
201 'profiles': [test_machine
],
203 container
= client
.containers
.create(config
, wait
=True)
204 container
.start(wait
=True)
206 def wait_for_network(container
, timeout
=30):
207 """Wait for eth0 to have an ipv4 address."""
208 starttime
= time
.time()
209 while(time
.time() < starttime
+ timeout
):
211 if 'eth0' in container
.state().network
:
212 addresses
= container
.state().network
['eth0']['addresses']
213 if len(addresses
) > 0:
214 if addresses
[0]['family'] == 'inet':
218 wait_for_network(container
)
220 # HACK: We need to give sshd a chance to bind to the interface,
221 # and pylxd's container.execute seems to be broken and fails and/or
222 # hangs trying to properly check if the service is up.
229 def destroy_lxd_container(container
):
230 """Stop and delete a LXD container."""
231 name
= container
.name
232 client
= get_lxd_client()
234 def wait_for_stop(timeout
=30):
235 """Wait for eth0 to have an ipv4 address."""
236 starttime
= time
.time()
237 while(time
.time() < starttime
+ timeout
):
239 if container
.state
== "Stopped":
242 def wait_for_delete(timeout
=30):
243 starttime
= time
.time()
244 while(time
.time() < starttime
+ timeout
):
246 if client
.containers
.exists(name
) is False:
249 container
.stop(wait
=False)
252 container
.delete(wait
=False)
255 # Delete the profile created for this container
256 profile
= client
.profiles
.get(name
)
261 def find_lxd_config():
262 """Find the LXD configuration directory."""
264 paths
.append(os
.path
.expanduser("~/.config/lxc"))
265 paths
.append(os
.path
.expanduser("~/snap/lxd/current/.config/lxc"))
268 if os
.path
.exists(path
):
269 crt
= os
.path
.expanduser("{}/client.crt".format(path
))
270 key
= os
.path
.expanduser("{}/client.key".format(path
))
271 if os
.path
.exists(crt
) and os
.path
.exists(key
):
276 def find_juju_ssh_keys():
277 """Find the Juju ssh keys."""
280 paths
.append(os
.path
.expanduser("~/.local/share/juju/ssh/"))
283 if os
.path
.exists(path
):
284 private
= os
.path
.expanduser("{}/juju_id_rsa".format(path
))
285 public
= os
.path
.expanduser("{}/juju_id_rsa.pub".format(path
))
286 if os
.path
.exists(private
) and os
.path
.exists(public
):
287 return (private
, public
)
291 def get_juju_private_key():
292 keys
= find_juju_ssh_keys()
296 def get_lxd_client(host
="127.0.0.1", port
="8443", verify
=False):
297 """ Get the LXD client."""
299 (crt
, key
) = find_lxd_config()
302 client
= pylxd
.Client(
303 endpoint
="https://{}:{}".format(host
, port
),
310 # TODO: This is marked serial but can be run in parallel with work, including:
311 # - Fixing an event loop issue; seems that all tests stop when one test stops?
315 class TestN2VC(object):
317 1. Validator Validation
319 Automatically validate the descriptors we're using here, unless the test author explicitly wants to skip them. Useful to make sure tests aren't being run against invalid descriptors, validating functionality that may fail against a properly written descriptor.
321 We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model.
325 def setup_class(self
):
326 """ setup any state specific to the execution of the given class (which
327 usually contains tests).
329 # Initialize instance variable(s)
330 self
.container
= None
332 # Parse the test's descriptors
333 self
.nsd
= get_descriptor(self
.NSD_YAML
)
334 self
.vnfd
= get_descriptor(self
.VNFD_YAML
)
336 self
.ns_name
= self
.nsd
['name']
337 self
.vnf_name
= self
.vnfd
['name']
340 self
.parse_vnf_descriptor()
341 assert self
.charms
is not {}
343 # Track artifacts, like compiled charms, that will need to be removed
346 # Build the charm(s) needed for this test
347 for charm
in self
.get_charm_names():
348 self
.get_charm(charm
)
350 # A bit of a hack, in order to allow the N2VC callback to run parallel
351 # to pytest. Test(s) should wait for this flag to change to False
356 def teardown_class(self
):
357 """ teardown any state that was previously setup with a call to
361 destroy_lxd_container(self
.container
)
363 # Clean up any artifacts created during the test
364 logging
.debug("Artifacts: {}".format(self
.artifacts
))
365 for charm
in self
.artifacts
:
366 artifact
= self
.artifacts
[charm
]
367 if os
.path
.exists(artifact
['tmpdir']):
368 logging
.debug("Removing directory '{}'".format(artifact
))
369 shutil
.rmtree(artifact
['tmpdir'])
372 asyncio
.ensure_future(self
.n2vc
.logout())
375 def running(self
, timeout
=600):
376 """Returns if the test is still running.
378 @param timeout The time, in seconds, to wait for the test to complete.
381 # if start + now > start > timeout:
386 def get_charm(self
, charm
):
387 """Build and return the path to the test charm.
389 Builds one of the charms in tests/charms/layers and returns the path
390 to the compiled charm. The charm will automatically be removed when
391 when the test is complete.
393 Returns: The path to the built charm or None if `charm build` failed.
396 # Make sure the charm snap is installed
398 subprocess
.check_call(['which', 'charm'])
399 except subprocess
.CalledProcessError
as e
:
400 raise Exception("charm snap not installed.")
402 if charm
not in self
.artifacts
:
404 # Note: This builds the charm under N2VC/tests/charms/
405 # The snap-installed command only has write access to the users $HOME
406 # so writing to /tmp isn't possible at the moment.
407 builds
= tempfile
.mkdtemp(dir=get_charm_path())
409 cmd
= "charm build {}/{} -o {}/".format(
416 subprocess
.check_call(shlex
.split(cmd
))
418 self
.artifacts
[charm
] = {
420 'charm': "{}/builds/{}".format(builds
, charm
),
422 except subprocess
.CalledProcessError
as e
:
423 raise Exception("charm build failed: {}.".format(e
))
425 return self
.artifacts
[charm
]['charm']
428 async def deploy(self
, vnf_index
, charm
, params
, loop
):
429 """An inner function to do the deployment of a charm from
433 self
.n2vc
= get_n2vc(loop
=loop
)
435 vnf_name
= self
.n2vc
.FormatApplicationName(
440 logging
.debug("Deploying charm at {}".format(self
.artifacts
[charm
]))
442 await self
.n2vc
.DeployCharms(
446 self
.get_charm(charm
),
453 def parse_vnf_descriptor(self
):
454 """Parse the VNF descriptor to make running tests easier.
456 Parse the charm information in the descriptor to make it easy to write
457 tests to run again it.
459 Each charm becomes a dictionary in a list:
462 'vnf-member-index': 1,
466 'initial-config-primitive': {},
467 'config-primitive': {}
470 - is this a proxy charm?
471 - what are the initial-config-primitives (day 1)?
472 - what are the config primitives (day 2)?
477 # You'd think this would be explicit, but it's just an incremental
478 # value that should be consistent.
481 """Get all vdu and/or vdu config in a descriptor."""
482 config
= self
.get_config()
486 # Get the name to be used for the deployed application
487 application_name
= n2vc
.vnf
.N2VC().FormatApplicationName(
490 str(vnf_member_index
),
494 'application-name': application_name
,
496 'vnf-member-index': vnf_member_index
,
497 'vnf-name': self
.vnf_name
,
499 'initial-config-primitive': {},
500 'config-primitive': {},
504 charm
['name'] = juju
['charm']
507 charm
['proxy'] = juju
['proxy']
509 if 'initial-config-primitive' in cfg
:
510 charm
['initial-config-primitive'] = \
511 cfg
['initial-config-primitive']
513 if 'config-primitive' in cfg
:
514 charm
['config-primitive'] = cfg
['config-primitive']
516 charms
[application_name
] = charm
518 # Increment the vnf-member-index
519 vnf_member_index
+= 1
524 def isproxy(self
, application_name
):
526 assert application_name
in self
.charms
527 assert 'proxy' in self
.charms
[application_name
]
528 assert type(self
.charms
[application_name
]['proxy']) is bool
530 # logging.debug(self.charms[application_name])
531 return self
.charms
[application_name
]['proxy']
534 def get_config(self
):
535 """Return an iterable list of config items (vdu and vnf).
537 As far as N2VC is concerned, the config section for vdu and vnf are
538 identical. This joins them together so tests only need to iterate
543 """Get all vdu and/or vdu config in a descriptor."""
544 vnf_config
= self
.vnfd
.get("vnf-configuration")
546 juju
= vnf_config
['juju']
548 configs
.append(vnf_config
)
550 for vdu
in self
.vnfd
['vdu']:
551 vdu_config
= vdu
.get('vdu-configuration')
553 juju
= vdu_config
['juju']
555 configs
.append(vdu_config
)
560 def get_charm_names(self
):
561 """Return a list of charms used by the test descriptor."""
565 # Check if the VDUs in this VNF have a charm
566 for config
in self
.get_config():
567 juju
= config
['juju']
570 if name
not in charms
:
576 async def CreateContainer(self
, *args
):
577 """Create a LXD container for use with a proxy charm.abs
579 1. Get the public key from the charm via `get-ssh-public-key` action
580 2. Create container with said key injected for the ubuntu user
582 if self
.container
is None:
583 # logging.debug("CreateContainer called.")
585 # HACK: Set this so the n2vc_callback knows
586 # there's a container being created
587 self
.container
= True
589 # Create and configure a LXD container for use with a proxy charm.
590 (model_name
, application_name
, _
, _
) = args
592 # Execute 'get-ssh-public-key' primitive and get returned value
593 uuid
= await self
.n2vc
.ExecutePrimitive(
596 "get-ssh-public-key",
599 # logging.debug("Action UUID: {}".format(uuid))
600 result
= await self
.n2vc
.GetPrimitiveOutput(model_name
, uuid
)
601 # logging.debug("Action result: {}".format(result))
602 pubkey
= result
['pubkey']
604 self
.container
= create_lxd_container(
606 name
=os
.path
.basename(__file__
)
609 return self
.container
612 def get_container_ip(self
):
613 """Return the IPv4 address of container's eth0 interface."""
616 addresses
= self
.container
.state().network
['eth0']['addresses']
617 # The interface may have more than one address, but we only need
618 # the first one for testing purposes.
619 ipaddr
= addresses
[0]['address']
624 def n2vc_callback(self
, *args
, **kwargs
):
625 """Monitor and react to changes in the charm state.
627 This is where we will monitor the state of the charm:
630 - is it waiting on input to continue?
632 When the state changes, we respond appropriately:
633 - configuring ssh credentials for a proxy charm
634 - running a service primitive
636 Lastly, when the test has finished we begin the teardown, removing the
637 charm and associated LXD container, and notify pytest that this test
640 Args are expected to contain four values, received from N2VC:
641 - str, the name of the model
642 - str, the name of the application
643 - str, the workload status as reported by Juju
644 - str, the workload message as reported by Juju
646 (model
, application
, status
, message
) = args
647 # logging.debug("Callback for {}/{} - {} ({})".format(
654 # Make sure we're only receiving valid status. This will catch charms
655 # that aren't setting their workload state and appear as "unknown"
656 # assert status not in ["active", "blocked", "waiting", "maintenance"]
659 if kwargs
and 'task' in kwargs
:
660 task
= kwargs
['task']
661 # logging.debug("Got task: {}".format(task))
663 # Closures and inner functions, oh my.
665 """Is the charm in an active state?"""
666 if status
in ["active"]:
671 """Is the charm waiting for us?"""
672 if status
in ["blocked"]:
676 def configure_ssh_proxy(task
):
677 """Configure the proxy charm to use the lxd container."""
678 logging
.debug("configure_ssh_proxy({})".format(task
))
680 mgmtaddr
= self
.get_container_ip()
683 "Setting config ssh-hostname={}".format(mgmtaddr
)
686 task
= asyncio
.ensure_future(
687 self
.n2vc
.ExecutePrimitive(
693 'ssh-hostname': mgmtaddr
,
694 'ssh-username': 'ubuntu',
699 # Execute the VNFD's 'initial-config-primitive'
700 task
.add_done_callback(functools
.partial(
701 execute_initial_config_primitives
,
704 def execute_initial_config_primitives(task
=None):
705 logging
.debug("execute_initial_config_primitives({})".format(task
))
707 init_config
= self
.charms
[application
]
710 The initial-config-primitive is run during deploy but may fail
711 on some steps because proxy charm access isn't configured.
713 At this stage, we'll re-run those actions.
716 task
= asyncio
.ensure_future(
717 self
.n2vc
.ExecuteInitialPrimitives(
725 ExecutePrimitives will return a list of uuids. We need to check the
726 status of each. The test continues if all Actions succeed, and
727 fails if any of them fail.
729 task
.add_done_callback(functools
.partial(wait_for_uuids
))
732 task
= asyncio
.ensure_future(
733 self
.n2vc
.GetMetrics(
739 task
.add_done_callback(
745 def verify_metrics(task
):
746 logging
.debug("Verifying metrics!")
747 # Check if task returned metrics
748 results
= task
.result()
750 metrics
= parse_metrics(application
, results
)
751 logging
.debug(metrics
)
754 task
= asyncio
.ensure_future(
755 self
.n2vc
.RemoveCharms(model
, application
)
758 task
.add_done_callback(functools
.partial(stop_test
))
761 # TODO: Ran into a case where it took 9 attempts before metrics
762 # were available; the controller is slow sometimes.
766 def wait_for_uuids(task
):
767 logging
.debug("wait_for_uuids({})".format(task
))
768 uuids
= task
.result()
773 def get_primitive_result(uuid
, task
):
774 logging
.debug("Got result from action")
775 # completed, failed, or running
776 result
= task
.result()
778 if status
in result
and result
['status'] \
779 in ["completed", "failed"]:
782 logging
.debug("Action {} is {}".format(
784 task
.result
['status'])
788 logging
.debug("action is still running")
790 def get_primitive_status(uuid
, task
):
791 result
= task
.result()
793 if result
== "completed":
794 # Make sure all primitives are finished
798 if waitfor
== finished
:
799 # logging.debug("Action complete; removing charm")
800 task
= asyncio
.ensure_future(
801 self
.n2vc
.RemoveCharms(model
, application
)
804 task
.add_done_callback(functools
.partial(stop_test
))
805 elif result
== "failed":
806 # logging.debug("Action failed; removing charm")
808 self
._running
= False
811 # logging.debug("action is still running: {}".format(result))
812 # logging.debug(result)
814 # The primitive is running; try again.
815 task
= asyncio
.ensure_future(
816 self
.n2vc
.GetPrimitiveStatus(model
, uuid
)
818 task
.add_done_callback(functools
.partial(
819 get_primitive_result
,
823 for actionid
in uuids
:
824 task
= asyncio
.ensure_future(
825 self
.n2vc
.GetPrimitiveStatus(model
, actionid
)
827 task
.add_done_callback(functools
.partial(
828 get_primitive_result
,
835 When the test has either succeeded or reached a failing state,
836 begin the process of removing the test fixtures.
838 asyncio
.ensure_future(
839 self
.n2vc
.RemoveCharms(model
, application
)
842 self
._running
= False
845 # logging.debug("Charm is in a blocked state!")
847 # Container only applies to proxy charms.
848 if self
.isproxy(application
):
850 if self
.container
is None:
852 # "Ensuring CreateContainer: status is {}".format(status)
855 # Create the new LXD container
856 task
= asyncio
.ensure_future(self
.CreateContainer(*args
))
858 # Configure the proxy charm to use the container when ready
859 task
.add_done_callback(functools
.partial(
863 # task.add_done_callback(functools.partial(
866 # create_lxd_container()
867 # self.container = True
869 # A charm may validly be in a blocked state if it's waiting for
870 # relations or some other kind of manual intervention
871 # logging.debug("This is not a proxy charm.")
872 # TODO: needs testing
873 task
= asyncio
.ensure_future(
874 execute_initial_config_primitives()
877 task
.add_done_callback(functools
.partial(stop_test
))
880 # Does the charm have metrics defined?
881 if has_metrics(self
.charms
[application
]['name']):
882 # logging.debug("metrics.yaml defined in the layer!")
884 # Force a run of the metric collector, so we don't have
885 # to wait for it's normal 5 minute interval run.
886 # NOTE: this shouldn't be done outside of CI
887 collect_metrics(application
)
889 # get the current metrics
892 # When the charm reaches an active state and hasn't been
893 # handled (metrics collection, etc)., the test has succeded.
894 # logging.debug("Charm is active! Removing charm...")
895 task
= asyncio
.ensure_future(
896 self
.n2vc
.RemoveCharms(model
, application
)
899 task
.add_done_callback(functools
.partial(stop_test
))