15 from juju
.controller
import Controller
17 # Disable InsecureRequestWarning w/LXD
19 urllib3
.disable_warnings()
20 logging
.getLogger("urllib3").setLevel(logging
.WARNING
)
22 here
= os
.path
.dirname(os
.path
.realpath(__file__
))
25 def is_bootstrapped():
26 result
= subprocess
.run(['juju', 'switch'], stdout
=subprocess
.PIPE
)
28 result
.returncode
== 0 and len(result
.stdout
.decode().strip()) > 0)
31 bootstrapped
= pytest
.mark
.skipif(
32 not is_bootstrapped(),
33 reason
='bootstrapped Juju environment required')
36 class CleanController():
38 Context manager that automatically connects and disconnects from
39 the currently active controller.
41 Note: Unlike CleanModel, this will not create a new controller for you,
42 and an active controller must already be available.
45 self
._controller
= None
47 async def __aenter__(self
):
48 self
._controller
= Controller()
49 await self
._controller
.connect()
50 return self
._controller
52 async def __aexit__(self
, exc_type
, exc
, tb
):
53 await self
._controller
.disconnect()
57 """Format debug messages in a consistent way."""
58 now
= datetime
.datetime
.now()
60 # TODO: Decide on the best way to log. Output from `logging.debug` shows up
61 # when a test fails, but print() will always show up when running tox with
62 # `-s`, which is really useful for debugging single tests without having to
63 # insert a False assert to see the log.
65 "[{}] {}".format(now
.strftime('%Y-%m-%dT%H:%M:%S'), msg
)
68 "[{}] {}".format(now
.strftime('%Y-%m-%dT%H:%M:%S'), msg
)
73 return "{}/charms".format(here
)
77 return "{}/charms/layers".format(here
)
80 def collect_metrics(application
):
81 """Invoke Juju's metrics collector.
83 Caveat: this shells out to the `juju collect-metrics` command, rather than
84 making an API call. At the time of writing, that API is not exposed through
89 subprocess
.check_call(['juju', 'collect-metrics', application
])
90 except subprocess
.CalledProcessError
as e
:
91 raise Exception("Unable to collect metrics: {}".format(e
))
94 def has_metrics(charm
):
95 """Check if a charm has metrics defined."""
96 metricsyaml
= "{}/{}/metrics.yaml".format(
100 if os
.path
.exists(metricsyaml
):
105 def get_descriptor(descriptor
):
108 tmp
= yaml
.load(descriptor
)
110 # Remove the envelope
111 root
= list(tmp
.keys())[0]
112 if root
== "nsd:nsd-catalog":
113 desc
= tmp
['nsd:nsd-catalog']['nsd'][0]
114 elif root
== "vnfd:vnfd-catalog":
115 desc
= tmp
['vnfd:vnfd-catalog']['vnfd'][0]
121 def get_n2vc(loop
=None):
122 """Return an instance of N2VC.VNF."""
123 log
= logging
.getLogger()
124 log
.level
= logging
.DEBUG
126 # Extract parameters from the environment in order to run our test
127 vca_host
= os
.getenv('VCA_HOST', '127.0.0.1')
128 vca_port
= os
.getenv('VCA_PORT', 17070)
129 vca_user
= os
.getenv('VCA_USER', 'admin')
130 vca_charms
= os
.getenv('VCA_CHARMS', None)
131 vca_secret
= os
.getenv('VCA_SECRET', None)
133 client
= n2vc
.vnf
.N2VC(
139 artifacts
=vca_charms
,
145 def create_lxd_container(public_key
=None, name
="test_name"):
147 Returns a container object
149 If public_key isn't set, we'll use the Juju ssh key
151 :param public_key: The public key to inject into the container
152 :param name: The name of the test being run
156 # Format name so it's valid
157 name
= name
.replace("_", "-").replace(".", "")
159 client
= get_lxd_client()
160 test_machine
= "test-{}-{}".format(
161 uuid
.uuid4().hex[-4:],
165 private_key_path
, public_key_path
= find_n2vc_ssh_keys()
168 # create profile w/cloud-init and juju ssh key
171 with
open(public_key_path
, "r") as f
:
172 public_key
= f
.readline()
174 client
.profiles
.create(
177 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key
)},
179 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
181 'nictype': 'bridged',
187 except Exception as ex
:
188 debug("Error creating lxd profile {}: {}".format(test_machine
, ex
))
194 'name': test_machine
,
199 'protocol': 'simplestreams',
200 'server': 'https://cloud-images.ubuntu.com/releases',
202 'profiles': [test_machine
],
204 container
= client
.containers
.create(config
, wait
=True)
205 container
.start(wait
=True)
206 except Exception as ex
:
207 debug("Error creating lxd container {}: {}".format(test_machine
, ex
))
208 # This is a test-ending failure.
211 def wait_for_network(container
, timeout
=30):
212 """Wait for eth0 to have an ipv4 address."""
213 starttime
= time
.time()
214 while(time
.time() < starttime
+ timeout
):
216 if 'eth0' in container
.state().network
:
217 addresses
= container
.state().network
['eth0']['addresses']
218 if len(addresses
) > 0:
219 if addresses
[0]['family'] == 'inet':
224 wait_for_network(container
)
225 except Exception as ex
:
227 "Error waiting for container {} network: {}".format(
233 # HACK: We need to give sshd a chance to bind to the interface,
234 # and pylxd's container.execute seems to be broken and fails and/or
235 # hangs trying to properly check if the service is up.
236 (exit_code
, stdout
, stderr
) = container
.execute([
238 '-c', '5', # Wait for 5 ECHO_REPLY
239 '8.8.8.8', # Ping Google's public DNS
240 '-W', '15', # Set a 15 second deadline
244 raise Exception("Unable to verify container network")
249 def destroy_lxd_container(container
):
250 """Stop and delete a LXD container.
252 Sometimes we see errors talking to LXD -- ephemerial issues like
253 load or a bug that's killed the API. We'll do our best to clean
254 up here, and we should run a cleanup after all tests are finished
255 to remove any extra containers and profiles belonging to us.
258 if type(container
) is bool:
261 name
= container
.name
262 debug("Destroying container {}".format(name
))
264 client
= get_lxd_client()
266 def wait_for_stop(timeout
=30):
267 """Wait for eth0 to have an ipv4 address."""
268 starttime
= time
.time()
269 while(time
.time() < starttime
+ timeout
):
271 if container
.state
== "Stopped":
274 def wait_for_delete(timeout
=30):
275 starttime
= time
.time()
276 while(time
.time() < starttime
+ timeout
):
278 if client
.containers
.exists(name
) is False:
282 container
.stop(wait
=False)
284 except Exception as ex
:
286 "Error stopping container {}: {}".format(
293 container
.delete(wait
=False)
295 except Exception as ex
:
297 "Error deleting container {}: {}".format(
304 # Delete the profile created for this container
305 profile
= client
.profiles
.get(name
)
308 except Exception as ex
:
310 "Error deleting profile {}: {}".format(
317 def find_lxd_config():
318 """Find the LXD configuration directory."""
320 paths
.append(os
.path
.expanduser("~/.config/lxc"))
321 paths
.append(os
.path
.expanduser("~/snap/lxd/current/.config/lxc"))
324 if os
.path
.exists(path
):
325 crt
= os
.path
.expanduser("{}/client.crt".format(path
))
326 key
= os
.path
.expanduser("{}/client.key".format(path
))
327 if os
.path
.exists(crt
) and os
.path
.exists(key
):
332 def find_n2vc_ssh_keys():
333 """Find the N2VC ssh keys."""
336 paths
.append(os
.path
.expanduser("~/.ssh/"))
339 if os
.path
.exists(path
):
340 private
= os
.path
.expanduser("{}/id_n2vc_rsa".format(path
))
341 public
= os
.path
.expanduser("{}/id_n2vc_rsa.pub".format(path
))
342 if os
.path
.exists(private
) and os
.path
.exists(public
):
343 return (private
, public
)
347 def find_juju_ssh_keys():
348 """Find the Juju ssh keys."""
351 paths
.append(os
.path
.expanduser("~/.local/share/juju/ssh/"))
354 if os
.path
.exists(path
):
355 private
= os
.path
.expanduser("{}/juju_id_rsa".format(path
))
356 public
= os
.path
.expanduser("{}/juju_id_rsa.pub".format(path
))
357 if os
.path
.exists(private
) and os
.path
.exists(public
):
358 return (private
, public
)
362 def get_juju_private_key():
363 keys
= find_juju_ssh_keys()
367 def get_lxd_client(host
="127.0.0.1", port
="8443", verify
=False):
368 """ Get the LXD client."""
370 (crt
, key
) = find_lxd_config()
373 client
= pylxd
.Client(
374 endpoint
="https://{}:{}".format(host
, port
),
382 # TODO: This is marked serial but can be run in parallel with work, including:
383 # - Fixing an event loop issue; seems that all tests stop when one test stops?
387 class TestN2VC(object):
389 1. Validator Validation
391 Automatically validate the descriptors we're using here, unless the test author explicitly wants to skip them. Useful to make sure tests aren't being run against invalid descriptors, validating functionality that may fail against a properly written descriptor.
393 We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model.
397 The six phases of integration testing, for the test itself and each charm?:
399 setup/teardown_class:
400 1. Prepare - Verify the environment and create a new model
401 2. Deploy - Mark the test as ready to execute
402 3. Configure - Configuration to reach Active state
403 4. Test - Execute primitive(s) to verify success
404 5. Collect - Collect any useful artifacts for debugging (charm, logs)
405 6. Destroy - Destroy the model
408 1. Prepare - Building of charm
409 2. Deploy - Deploying charm
410 3. Configure - Configuration to reach Active state
411 4. Test - Execute primitive(s) to verify success
412 5. Collect - Collect any useful artifacts for debugging (charm, logs)
413 6. Destroy - Destroy the charm
417 def setup_class(self
):
418 """ setup any state specific to the execution of the given class (which
419 usually contains tests).
421 # Initialize instance variable(s)
424 # Track internal state for each test run
427 # Parse the test's descriptors
428 self
.nsd
= get_descriptor(self
.NSD_YAML
)
429 self
.vnfd
= get_descriptor(self
.VNFD_YAML
)
431 self
.ns_name
= self
.nsd
['name']
432 self
.vnf_name
= self
.vnfd
['name']
435 self
.parse_vnf_descriptor()
436 assert self
.charms
is not {}
438 # Track artifacts, like compiled charms, that will need to be removed
441 # Build the charm(s) needed for this test
442 for charm
in self
.get_charm_names():
443 self
.get_charm(charm
)
445 # A bit of a hack, in order to allow the N2VC callback to run parallel
446 # to pytest. Test(s) should wait for this flag to change to False
449 self
._stopping
= False
452 def teardown_class(self
):
453 """ teardown any state that was previously setup with a call to
456 debug("Running teardown_class...")
459 debug("Destroying LXD containers...")
460 for application
in self
.state
:
461 if self
.state
[application
]['container']:
462 destroy_lxd_container(self
.state
[application
]['container'])
463 debug("Destroying LXD containers...done.")
467 debug("teardown_class(): Logging out of N2VC...")
468 yield from self
.n2vc
.logout()
469 debug("teardown_class(): Logging out of N2VC...done.")
471 debug("Running teardown_class...done.")
472 except Exception as ex
:
473 debug("Exception in teardown_class: {}".format(ex
))
476 def all_charms_active(self
):
477 """Determine if the all deployed charms are active."""
480 for application
in self
.state
:
481 if 'status' in self
.state
[application
]:
482 debug("status of {} is '{}'".format(
484 self
.state
[application
]['status'],
486 if self
.state
[application
]['status'] == 'active':
489 debug("Active charms: {}/{}".format(
494 if active
== len(self
.charms
):
500 def are_tests_finished(self
):
501 appcount
= len(self
.state
)
503 # If we don't have state yet, keep running.
505 debug("No applications")
509 debug("_stopping is True")
513 for application
in self
.state
:
514 if self
.state
[application
]['done']:
517 debug("{}/{} charms tested".format(appdone
, appcount
))
519 if appcount
== appdone
:
525 async def running(self
, timeout
=600):
526 """Returns if the test is still running.
528 @param timeout The time, in seconds, to wait for the test to complete.
530 if self
.are_tests_finished():
534 await asyncio
.sleep(30)
539 def get_charm(self
, charm
):
540 """Build and return the path to the test charm.
542 Builds one of the charms in tests/charms/layers and returns the path
543 to the compiled charm. The charm will automatically be removed when
544 when the test is complete.
546 Returns: The path to the built charm or None if `charm build` failed.
549 # Make sure the charm snap is installed
551 subprocess
.check_call(['which', 'charm'])
552 except subprocess
.CalledProcessError
:
553 raise Exception("charm snap not installed.")
555 if charm
not in self
.artifacts
:
557 # Note: This builds the charm under N2VC/tests/charms/builds/
558 # Currently, the snap-installed command only has write access
559 # to the $HOME (changing in an upcoming release) so writing to
560 # /tmp isn't possible at the moment.
561 builds
= get_charm_path()
563 if not os
.path
.exists("{}/builds/{}".format(builds
, charm
)):
564 cmd
= "charm build --no-local-layers {}/{} -o {}/".format(
569 subprocess
.check_call(shlex
.split(cmd
))
571 except subprocess
.CalledProcessError
as e
:
572 # charm build will return error code 100 if the charm fails
573 # the auto-run of charm proof, which we can safely ignore for
575 if e
.returncode
!= 100:
576 raise Exception("charm build failed: {}.".format(e
))
578 self
.artifacts
[charm
] = {
580 'charm': "{}/builds/{}".format(builds
, charm
),
583 return self
.artifacts
[charm
]['charm']
586 async def deploy(self
, vnf_index
, charm
, params
, loop
):
587 """An inner function to do the deployment of a charm from
592 self
.n2vc
= get_n2vc(loop
=loop
)
594 application
= self
.n2vc
.FormatApplicationName(
600 # Initialize the state of the application
601 self
.state
[application
] = {
602 'status': None, # Juju status
603 'container': None, # lxd container, for proxy charms
604 'actions': {}, # Actions we've executed
605 'done': False, # Are we done testing this charm?
606 'phase': "deploy", # What phase is this application in?
609 debug("Deploying charm at {}".format(self
.artifacts
[charm
]))
611 # If this is a native charm, we need to provision the underlying
612 # machine ala an LXC container.
615 if not self
.isproxy(application
):
616 debug("Creating container for native charm")
617 # args = ("default", application, None, None)
618 self
.state
[application
]['container'] = create_lxd_container(
619 name
=os
.path
.basename(__file__
)
622 hostname
= self
.get_container_ip(
623 self
.state
[application
]['container'],
631 await self
.n2vc
.DeployCharms(
635 self
.get_charm(charm
),
642 def parse_vnf_descriptor(self
):
643 """Parse the VNF descriptor to make running tests easier.
645 Parse the charm information in the descriptor to make it easy to write
646 tests to run again it.
648 Each charm becomes a dictionary in a list:
651 'vnf-member-index': 1,
654 'initial-config-primitive': {},
655 'config-primitive': {}
658 - is this a proxy charm?
659 - what are the initial-config-primitives (day 1)?
660 - what are the config primitives (day 2)?
665 # You'd think this would be explicit, but it's just an incremental
666 # value that should be consistent.
669 """Get all vdu and/or vdu config in a descriptor."""
670 config
= self
.get_config()
674 # Get the name to be used for the deployed application
675 application_name
= n2vc
.vnf
.N2VC().FormatApplicationName(
678 str(vnf_member_index
),
682 'application-name': application_name
,
684 'vnf-member-index': vnf_member_index
,
685 'vnf-name': self
.vnf_name
,
687 'initial-config-primitive': {},
688 'config-primitive': {},
692 charm
['name'] = juju
['charm']
695 charm
['proxy'] = juju
['proxy']
697 if 'initial-config-primitive' in cfg
:
698 charm
['initial-config-primitive'] = \
699 cfg
['initial-config-primitive']
701 if 'config-primitive' in cfg
:
702 charm
['config-primitive'] = cfg
['config-primitive']
704 charms
[application_name
] = charm
706 # Increment the vnf-member-index
707 vnf_member_index
+= 1
712 def isproxy(self
, application_name
):
714 assert application_name
in self
.charms
715 assert 'proxy' in self
.charms
[application_name
]
716 assert type(self
.charms
[application_name
]['proxy']) is bool
718 # debug(self.charms[application_name])
719 return self
.charms
[application_name
]['proxy']
722 def get_config(self
):
723 """Return an iterable list of config items (vdu and vnf).
725 As far as N2VC is concerned, the config section for vdu and vnf are
726 identical. This joins them together so tests only need to iterate
731 """Get all vdu and/or vdu config in a descriptor."""
732 vnf_config
= self
.vnfd
.get("vnf-configuration")
734 juju
= vnf_config
['juju']
736 configs
.append(vnf_config
)
738 for vdu
in self
.vnfd
['vdu']:
739 vdu_config
= vdu
.get('vdu-configuration')
741 juju
= vdu_config
['juju']
743 configs
.append(vdu_config
)
748 def get_charm_names(self
):
749 """Return a list of charms used by the test descriptor."""
753 # Check if the VDUs in this VNF have a charm
754 for config
in self
.get_config():
755 juju
= config
['juju']
758 if name
not in charms
:
764 def get_phase(self
, application
):
765 return self
.state
[application
]['phase']
768 def set_phase(self
, application
, phase
):
769 self
.state
[application
]['phase'] = phase
772 async def configure_proxy_charm(self
, *args
):
773 """Configure a container for use via ssh."""
774 (model
, application
, _
, _
) = args
777 if self
.get_phase(application
) == "deploy":
778 self
.set_phase(application
, "configure")
780 debug("Start CreateContainer for {}".format(application
))
781 self
.state
[application
]['container'] = \
782 await self
.CreateContainer(*args
)
783 debug("Done CreateContainer for {}".format(application
))
785 if self
.state
[application
]['container']:
786 debug("Configure {} for container".format(application
))
787 if await self
.configure_ssh_proxy(application
):
788 await asyncio
.sleep(0.1)
791 debug("Failed to configure container for {}".format(application
))
793 debug("skipping CreateContainer for {}: {}".format(
795 self
.get_phase(application
),
798 except Exception as ex
:
799 debug("configure_proxy_charm exception: {}".format(ex
))
801 await asyncio
.sleep(0.1)
806 async def execute_charm_tests(self
, *args
):
807 (model
, application
, _
, _
) = args
809 debug("Executing charm test(s) for {}".format(application
))
811 if self
.state
[application
]['done']:
812 debug("Trying to execute tests against finished charm...aborting")
816 phase
= self
.get_phase(application
)
817 # We enter the test phase when after deploy (for native charms) or
818 # configure, for proxy charms.
819 if phase
in ["deploy", "configure"]:
820 self
.set_phase(application
, "test")
821 if self
.are_tests_finished():
822 raise Exception("Trying to execute init-config on finished test")
824 if await self
.execute_initial_config_primitives(application
):
826 await self
.check_metrics(application
)
828 debug("Done testing {}".format(application
))
829 self
.state
[application
]['done'] = True
831 except Exception as ex
:
832 debug("Exception in execute_charm_tests: {}".format(ex
))
834 await asyncio
.sleep(0.1)
839 async def CreateContainer(self
, *args
):
840 """Create a LXD container for use with a proxy charm.abs
842 1. Get the public key from the charm via `get-ssh-public-key` action
843 2. Create container with said key injected for the ubuntu user
845 Returns a Container object
847 # Create and configure a LXD container for use with a proxy charm.
848 (model
, application
, _
, _
) = args
850 debug("[CreateContainer] {}".format(args
))
854 # Execute 'get-ssh-public-key' primitive and get returned value
855 uuid
= await self
.n2vc
.ExecutePrimitive(
858 "get-ssh-public-key",
862 result
= await self
.n2vc
.GetPrimitiveOutput(model
, uuid
)
863 pubkey
= result
['pubkey']
865 container
= create_lxd_container(
867 name
=os
.path
.basename(__file__
)
871 except Exception as ex
:
872 debug("Error creating container: {}".format(ex
))
878 async def stop(self
):
882 - Stop and delete containers
885 TODO: Clean up duplicate code between teardown_class() and stop()
887 debug("stop() called")
889 if self
.n2vc
and self
._running
and not self
._stopping
:
890 self
._running
= False
891 self
._stopping
= True
893 for application
in self
.charms
:
895 await self
.n2vc
.RemoveCharms(self
.ns_name
, application
)
898 # Wait for the application to be removed
899 await asyncio
.sleep(10)
900 if not await self
.n2vc
.HasApplication(
905 await self
.n2vc
.DestroyNetworkService(self
.ns_name
)
907 # Need to wait for the charm to finish, because native charms
908 if self
.state
[application
]['container']:
909 debug("Deleting LXD container...")
910 destroy_lxd_container(
911 self
.state
[application
]['container']
913 self
.state
[application
]['container'] = None
914 debug("Deleting LXD container...done.")
916 debug("No container found for {}".format(application
))
917 except Exception as e
:
918 debug("Error while deleting container: {}".format(e
))
922 debug("stop(): Logging out of N2VC...")
923 await self
.n2vc
.logout()
925 debug("stop(): Logging out of N2VC...Done.")
926 except Exception as ex
:
929 # Let the test know we're finished.
930 debug("Marking test as finished.")
931 # self._running = False
933 debug("Skipping stop()")
936 def get_container_ip(self
, container
):
937 """Return the IPv4 address of container's eth0 interface."""
940 addresses
= container
.state().network
['eth0']['addresses']
941 # The interface may have more than one address, but we only need
942 # the first one for testing purposes.
943 ipaddr
= addresses
[0]['address']
948 async def configure_ssh_proxy(self
, application
, task
=None):
949 """Configure the proxy charm to use the lxd container.
951 Configure the charm to use a LXD container as it's VNF.
953 debug("Configuring ssh proxy for {}".format(application
))
955 mgmtaddr
= self
.get_container_ip(
956 self
.state
[application
]['container'],
960 "Setting ssh-hostname for {} to {}".format(
966 await self
.n2vc
.ExecutePrimitive(
972 'ssh-hostname': mgmtaddr
,
973 'ssh-username': 'ubuntu',
980 async def execute_initial_config_primitives(self
, application
, task
=None):
981 debug("Executing initial_config_primitives for {}".format(application
))
983 init_config
= self
.charms
[application
]
986 The initial-config-primitive is run during deploy but may fail
987 on some steps because proxy charm access isn't configured.
989 Re-run those actions so we can inspect the status.
991 uuids
= await self
.n2vc
.ExecuteInitialPrimitives(
998 ExecutePrimitives will return a list of uuids. We need to check the
999 status of each. The test continues if all Actions succeed, and
1000 fails if any of them fail.
1002 await self
.wait_for_uuids(application
, uuids
)
1003 debug("Primitives for {} finished.".format(application
))
1006 except Exception as ex
:
1007 debug("execute_initial_config_primitives exception: {}".format(ex
))
1012 async def check_metrics(self
, application
, task
=None):
1013 """Check and run metrics, if present.
1015 Checks to see if metrics are specified by the charm. If so, collects
1018 If no metrics, then mark the test as finished.
1020 if has_metrics(self
.charms
[application
]['name']):
1021 debug("Collecting metrics for {}".format(application
))
1023 metrics
= await self
.n2vc
.GetMetrics(
1028 return await self
.verify_metrics(application
, metrics
)
1031 async def verify_metrics(self
, application
, metrics
):
1032 """Verify the charm's metrics.
1034 Verify that the charm has sent metrics successfully.
1036 Stops the test when finished.
1038 debug("Verifying metrics for {}: {}".format(application
, metrics
))
1044 # TODO: Ran into a case where it took 9 attempts before metrics
1045 # were available; the controller is slow sometimes.
1046 await asyncio
.sleep(30)
1047 return await self
.check_metrics(application
)
1050 async def wait_for_uuids(self
, application
, uuids
):
1051 """Wait for primitives to execute.
1053 The task will provide a list of uuids representing primitives that are
1056 debug("Waiting for uuids for {}: {}".format(application
, uuids
))
1057 waitfor
= len(uuids
)
1060 while waitfor
> finished
:
1062 await asyncio
.sleep(10)
1064 if uuid
not in self
.state
[application
]['actions']:
1065 self
.state
[application
]['actions'][uid
] = "pending"
1067 status
= self
.state
[application
]['actions'][uid
]
1069 # Have we already marked this as done?
1070 if status
in ["pending", "running"]:
1072 debug("Getting status of {} ({})...".format(uid
, status
))
1073 status
= await self
.n2vc
.GetPrimitiveStatus(
1077 debug("...state of {} is {}".format(uid
, status
))
1078 self
.state
[application
]['actions'][uid
] = status
1080 if status
in ['completed', 'failed']:
1083 debug("{}/{} actions complete".format(finished
, waitfor
))
1085 # Wait for the primitive to finish and try again
1086 if waitfor
> finished
:
1087 debug("Waiting 10s for action to finish...")
1088 await asyncio
.sleep(10)
1091 def n2vc_callback(self
, *args
, **kwargs
):
1092 (model
, application
, status
, message
) = args
1093 # debug("callback: {}".format(args))
1095 if application
not in self
.state
:
1096 # Initialize the state of the application
1097 self
.state
[application
] = {
1098 'status': None, # Juju status
1099 'container': None, # lxd container, for proxy charms
1100 'actions': {}, # Actions we've executed
1101 'done': False, # Are we done testing this charm?
1102 'phase': "deploy", # What phase is this application in?
1105 self
.state
[application
]['status'] = status
1107 if status
in ['waiting', 'maintenance', 'unknown']:
1108 # Nothing to do for these
1111 debug("callback: {}".format(args
))
1113 if self
.state
[application
]['done']:
1114 debug("{} is done".format(application
))
1117 if status
in ["blocked"] and self
.isproxy(application
):
1118 if self
.state
[application
]['phase'] == "deploy":
1119 debug("Configuring proxy charm for {}".format(application
))
1120 asyncio
.ensure_future(self
.configure_proxy_charm(*args
))
1122 elif status
in ["active"]:
1123 """When a charm is active, we can assume that it has been properly
1124 configured (not blocked), regardless of if it's a proxy or not.
1126 All primitives should be complete by init_config_primitive
1128 asyncio
.ensure_future(self
.execute_charm_tests(*args
))