Merge upstream libjuju
Merge upstream libjuju (bug fixes)
Change-Id: Ia28b8a0ea2168a4df74823e8493c650491afb695
Signed-off-by: Adam Israel <adam.israel@canonical.com>
diff --git a/.gitignore b/.gitignore
index 6d00fec..543898d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,15 +1,9 @@
-*.sw[mnop]
-.venv/
+__pycache__
*.pyc
-*.py~
-docs/_build/
-__pycache__/
.tox/
-*.egg-info/
-.cache/
-.\#*
+tests/charms/builds
+tests/charms/deps
dist/
-dev/
-.pytest_cache
-pytestdebug.log
-.vscode/
+.cache/
+.local/
+N2VC.egg-info/
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..80718ed
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,5 @@
+FROM ubuntu:16.04
+
+RUN apt-get update && apt-get -y install git make python python3 \
+ libcurl4-gnutls-dev libgnutls-dev tox python3-dev \
+ debhelper python3-setuptools python-all python3-all apt-utils
diff --git a/Jenkinsfile b/Jenkinsfile
new file mode 100644
index 0000000..ed9e879
--- /dev/null
+++ b/Jenkinsfile
@@ -0,0 +1,32 @@
+properties([
+ parameters([
+ string(defaultValue: env.BRANCH_NAME, description: '', name: 'GERRIT_BRANCH'),
+ string(defaultValue: 'osm/N2VC', description: '', name: 'GERRIT_PROJECT'),
+ string(defaultValue: env.GERRIT_REFSPEC, description: '', name: 'GERRIT_REFSPEC'),
+ string(defaultValue: env.GERRIT_PATCHSET_REVISION, description: '', name: 'GERRIT_PATCHSET_REVISION'),
+ string(defaultValue: 'https://osm.etsi.org/gerrit', description: '', name: 'PROJECT_URL_PREFIX'),
+ booleanParam(defaultValue: false, description: '', name: 'TEST_INSTALL'),
+ string(defaultValue: 'artifactory-osm', description: '', name: 'ARTIFACTORY_SERVER'),
+ ])
+])
+
+def devops_checkout() {
+ dir('devops') {
+ git url: "${PROJECT_URL_PREFIX}/osm/devops", branch: params.GERRIT_BRANCH
+ }
+}
+
+node('docker') {
+ checkout scm
+ devops_checkout()
+
+ ci_stage_2 = load "devops/jenkins/ci-pipelines/ci_stage_2.groovy"
+ ci_stage_2.ci_pipeline( 'N2VC',
+ params.PROJECT_URL_PREFIX,
+ params.GERRIT_PROJECT,
+ params.GERRIT_BRANCH,
+ params.GERRIT_REFSPEC,
+ params.GERRIT_PATCHSET_REVISION,
+ params.TEST_INSTALL,
+ params.ARTIFACTORY_SERVER)
+}
diff --git a/Makefile b/Makefile
index 5938132..abc2c39 100644
--- a/Makefile
+++ b/Makefile
@@ -1,46 +1,13 @@
-BIN := .tox/py3/bin
-PY := $(BIN)/python
-PIP := $(BIN)/pip
-SCHEMAGEN := $(shell which schemagen)
-VERSION=$(shell cat VERSION)
-
clean:
find . -name __pycache__ -type d -exec rm -r {} +
find . -name *.pyc -delete
rm -rf .tox
- rm -rf docs/_build/
-
+ rm -rf tests/charms/builds/*
.tox:
tox -r --notest
-
-client: .tox
-ifndef SCHEMAGEN
- $(error "schemagen is not available, please install from https://github.com/juju/schemagen")
-endif
- $(PY) -m juju.client.facade -s "juju/client/schemas*" -o juju/client/
-
-test:
+test: lint
tox
-
-.PHONY: lint
-lint:
- tox -e lint --notest
-
-docs: .tox
- $(PIP) install -r docs/requirements.txt
- rm -rf docs/_build/
- $(BIN)/sphinx-build -b html docs/ docs/_build/
- cd docs/_build/ && zip -r docs.zip *
-
-release:
- git fetch --tags
- rm dist/*.tar.gz
- $(PY) setup.py sdist
- $(BIN)/twine upload --repository-url https://upload.pypi.org/legacy/ dist/*
- git tag ${VERSION}
- git push --tags
-
-upload: release
-
-
-.PHONY: clean client test docs upload release
+lint:
+ tox -e lint
+package:
+ python3 setup.py --command-packages=stdeb.command bdist_deb
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..bed6c90
--- /dev/null
+++ b/README.md
@@ -0,0 +1,138 @@
+# N2VC
+
+## Objective
+
+The N2VC library provides an OSM-centric interface to the VCA. This enables any OSM module (SO, LCM, osmclient) to use a standard pattern for communicating with Juju and the charms responsible for configuring a VNF.
+
+N2VC relies on the IM module, enforcing compliance with the OSM Information Model.
+
+## Caveats
+
+This library is in active development for OSM Release FOUR. The interface is subject to change prior to release.
+
+## Roadmap
+- Create the N2VC API (in progress)
+- Create a Python library implementing the N2VC API (in progress)
+- Implement N2VC API in SO
+- Implement N2VC API in lcm
+- Add support for N2VC in OSMClient
+
+## Requirements
+
+Because this is still in heavy development, there are a few manual steps required to use this library.
+
+
+```
+# This list is incomplete
+apt install python3-nose
+```
+
+### Install LXD and Juju
+
+In order to run the test(s) included in N2VC, you'll need to install Juju locally.
+
+*Note: It's not necessary to install the juju library via pip3; N2VC uses a version bundled within the modules/ directory.*
+
+```bash
+snap install lxd
+snap install juju --classic
+```
+
+## Metrics
+
+### Limitations
+
+There are currently a few limitations with regard to metrics in Juju.
+1. Metrics are polled by the Controller every five minutes. This interval is not modifiable
+2. The Juju API and CLI only expose the most recently polled metric, so it's necessary to poll the N2VC `GetMetrics` method more often, and discard duplicate values.
+
+
+## Testing
+A basic test has been written to exercise the functionality of the library, and to serve as a demonstration of how to use it.
+
+### Export settings to run test
+
+Export a few environment variables so the test knows where to find the VCA, and the compiled pingpong charm from the devops repository.
+
+```bash
+# You can find the ip of the VCA by running `juju status -m controller` and looking for the DNS for Machine 0
+export VCA_HOST=
+export VCA_PORT=17070
+# You can find these variables in ~/.local/share/juju/accounts.yaml
+export VCA_USER=admin
+export VCA_SECRET=PASSWORD
+```
+
+### Run the test(s)
+
+*Note: There is a bug in the cleanup of the N2VC/Juju that will throw an exception after the test has finished. This is on the list of things to fix for R4 and should not impact your tests or integration.*
+
+```bash
+nosetests3 --nocapture tests/test_python.py
+```
+
+## Known Issues
+
+Many. This is still in active development for Release FOUR.
+
+- An exception is thrown after using N2VC, probably related to the internal AllWatcher used by juju.Model. This shouldn't break usage of N2VC, but it is ugly and needs to be fixed.
+
+```
+Exception ignored in: <generator object WebSocketCommonProtocol.close_connection at 0x7f29a3b3f780>
+Traceback (most recent call last):
+ File "/home/stone/.local/lib/python3.6/site-packages/websockets/protocol.py", line 743, in close_connection
+ if (yield from self.wait_for_connection_lost()):
+ File "/home/stone/.local/lib/python3.6/site-packages/websockets/protocol.py", line 768, in wait_for_connection_lost
+ self.timeout, loop=self.loop)
+ File "/usr/lib/python3.6/asyncio/tasks.py", line 342, in wait_for
+ timeout_handle = loop.call_later(timeout, _release_waiter, waiter)
+ File "/usr/lib/python3.6/asyncio/base_events.py", line 543, in call_later
+ timer = self.call_at(self.time() + delay, callback, *args)
+ File "/usr/lib/python3.6/asyncio/base_events.py", line 553, in call_at
+ self._check_closed()
+ File "/usr/lib/python3.6/asyncio/base_events.py", line 357, in _check_closed
+ raise RuntimeError('Event loop is closed')
+RuntimeError: Event loop is closed
+Exception ignored in: <generator object Queue.get at 0x7f29a2ac6938>
+Traceback (most recent call last):
+ File "/usr/lib/python3.6/asyncio/queues.py", line 169, in get
+ getter.cancel() # Just in case getter is not done yet.
+ File "/usr/lib/python3.6/asyncio/base_events.py", line 574, in call_soon
+ self._check_closed()
+ File "/usr/lib/python3.6/asyncio/base_events.py", line 357, in _check_closed
+ raise RuntimeError('Event loop is closed')
+RuntimeError: Event loop is closed
+Exception ignored in: <coroutine object AllWatcherFacade.Next at 0x7f29a3bd9990>
+Traceback (most recent call last):
+ File "/home/stone/src/osm/N2VC/modules/libjuju/juju/client/facade.py", line 412, in wrapper
+ reply = await f(*args, **kwargs)
+ File "/home/stone/src/osm/N2VC/modules/libjuju/juju/client/_client1.py", line 59, in Next
+ reply = await self.rpc(msg)
+ File "/home/stone/src/osm/N2VC/modules/libjuju/juju/client/overrides.py", line 104, in rpc
+ result = await self.connection.rpc(msg, encoder=TypeEncoder)
+ File "/home/stone/src/osm/N2VC/modules/libjuju/juju/client/connection.py", line 306, in rpc
+ result = await self._recv(msg['request-id'])
+ File "/home/stone/src/osm/N2VC/modules/libjuju/juju/client/connection.py", line 208, in _recv
+ return await self.messages.get(request_id)
+ File "/home/stone/src/osm/N2VC/modules/libjuju/juju/utils.py", line 61, in get
+ value = await self._queues[id].get()
+ File "/usr/lib/python3.6/asyncio/queues.py", line 169, in get
+ getter.cancel() # Just in case getter is not done yet.
+ File "/usr/lib/python3.6/asyncio/base_events.py", line 574, in call_soon
+ self._check_closed()
+ File "/usr/lib/python3.6/asyncio/base_events.py", line 357, in _check_closed
+ raise RuntimeError('Event loop is closed')
+RuntimeError: Event loop is closed
+```
+
+## Modules
+
+To update the libjuju module:
+
+Needs to be fully tested:
+```bash
+git checkout master
+git subtree pull --prefix=modules/libjuju/ --squash libjuju master
+<resolve any merge conflicts>
+git merge --continue
+```
diff --git a/README.rst b/README.rst
deleted file mode 120000
index c750ce5..0000000
--- a/README.rst
+++ /dev/null
@@ -1 +0,0 @@
-docs/readme.rst
\ No newline at end of file
diff --git a/devops-stages/stage-archive.sh b/devops-stages/stage-archive.sh
new file mode 100755
index 0000000..e3d589f
--- /dev/null
+++ b/devops-stages/stage-archive.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+MDG=N2VC
+rm -rf pool
+rm -rf dists
+mkdir -p pool/$MDG
+mv deb_dist/*.deb pool/$MDG/
+mkdir -p dists/unstable/$MDG/binary-amd64/
+apt-ftparchive packages pool/$MDG > dists/unstable/$MDG/binary-amd64/Packages
+gzip -9fk dists/unstable/$MDG/binary-amd64/Packages
+echo "dists/**,pool/$MDG/*.deb"
diff --git a/devops-stages/stage-build.sh b/devops-stages/stage-build.sh
new file mode 100755
index 0000000..bf7602b
--- /dev/null
+++ b/devops-stages/stage-build.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+rm -rf deb_dist
+tox -e build
diff --git a/devops-stages/stage-test.sh b/devops-stages/stage-test.sh
new file mode 100755
index 0000000..0333d84
--- /dev/null
+++ b/devops-stages/stage-test.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+#tox
diff --git a/juju b/juju
new file mode 120000
index 0000000..1d37c18
--- /dev/null
+++ b/juju
@@ -0,0 +1 @@
+modules/libjuju/juju
\ No newline at end of file
diff --git a/modules/libjuju/.gitignore b/modules/libjuju/.gitignore
new file mode 100644
index 0000000..6d00fec
--- /dev/null
+++ b/modules/libjuju/.gitignore
@@ -0,0 +1,15 @@
+*.sw[mnop]
+.venv/
+*.pyc
+*.py~
+docs/_build/
+__pycache__/
+.tox/
+*.egg-info/
+.cache/
+.\#*
+dist/
+dev/
+.pytest_cache
+pytestdebug.log
+.vscode/
diff --git a/.travis.yml b/modules/libjuju/.travis.yml
similarity index 100%
rename from .travis.yml
rename to modules/libjuju/.travis.yml
diff --git a/CONTRIBUTORS b/modules/libjuju/CONTRIBUTORS
similarity index 100%
rename from CONTRIBUTORS
rename to modules/libjuju/CONTRIBUTORS
diff --git a/modules/libjuju/LICENSE b/modules/libjuju/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/modules/libjuju/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/MANIFEST.in b/modules/libjuju/MANIFEST.in
similarity index 100%
rename from MANIFEST.in
rename to modules/libjuju/MANIFEST.in
diff --git a/modules/libjuju/Makefile b/modules/libjuju/Makefile
new file mode 100644
index 0000000..38dcc11
--- /dev/null
+++ b/modules/libjuju/Makefile
@@ -0,0 +1,46 @@
+BIN := .tox/py3/bin
+PY := $(BIN)/python
+PIP := $(BIN)/pip
+SCHEMAGEN := $(shell which schemagen)
+VERSION=$(shell cat VERSION)
+
+clean:
+ find . -name __pycache__ -type d -exec rm -r {} +
+ find . -name *.pyc -delete
+ rm -rf .tox
+ rm -rf docs/_build/
+
+.tox:
+ tox -r --notest
+
+client: .tox
+ifndef SCHEMAGEN
+ $(error "schemagen is not available, please install from https://github.com/juju/schemagen")
+endif
+ $(PY) -m juju.client.facade -s "juju/client/schemas*" -o juju/client/
+
+test:
+ tox
+
+.PHONY: lint
+lint:
+ tox -e lint --notest
+
+docs: .tox
+ $(PIP) install -r docs/requirements.txt
+ rm -rf docs/_build/
+ $(BIN)/sphinx-build -b html docs/ docs/_build/
+ cd docs/_build/ && zip -r docs.zip *
+
+release:
+ git fetch --tags
+ rm dist/*.tar.gz
+ $(PY) setup.py sdist
+ $(BIN)/twine upload --repository-url https://upload.pypi.org/legacy/ dist/*
+ git tag ${VERSION}
+ git push --tags
+
+upload: release
+
+
+.PHONY: clean client test docs upload release
diff --git a/TODO b/modules/libjuju/TODO
similarity index 100%
rename from TODO
rename to modules/libjuju/TODO
diff --git a/VERSION b/modules/libjuju/VERSION
similarity index 100%
rename from VERSION
rename to modules/libjuju/VERSION
diff --git a/docs/Makefile b/modules/libjuju/docs/Makefile
similarity index 100%
rename from docs/Makefile
rename to modules/libjuju/docs/Makefile
diff --git a/docs/_extensions/automembersummary.py b/modules/libjuju/docs/_extensions/automembersummary.py
similarity index 100%
rename from docs/_extensions/automembersummary.py
rename to modules/libjuju/docs/_extensions/automembersummary.py
diff --git a/docs/_static/custom.css b/modules/libjuju/docs/_static/custom.css
similarity index 100%
rename from docs/_static/custom.css
rename to modules/libjuju/docs/_static/custom.css
diff --git a/docs/api/juju.action.rst b/modules/libjuju/docs/api/juju.action.rst
similarity index 100%
rename from docs/api/juju.action.rst
rename to modules/libjuju/docs/api/juju.action.rst
diff --git a/docs/api/juju.annotation.rst b/modules/libjuju/docs/api/juju.annotation.rst
similarity index 100%
rename from docs/api/juju.annotation.rst
rename to modules/libjuju/docs/api/juju.annotation.rst
diff --git a/docs/api/juju.application.rst b/modules/libjuju/docs/api/juju.application.rst
similarity index 100%
rename from docs/api/juju.application.rst
rename to modules/libjuju/docs/api/juju.application.rst
diff --git a/docs/api/juju.client.rst b/modules/libjuju/docs/api/juju.client.rst
similarity index 100%
rename from docs/api/juju.client.rst
rename to modules/libjuju/docs/api/juju.client.rst
diff --git a/docs/api/juju.cloud.rst b/modules/libjuju/docs/api/juju.cloud.rst
similarity index 100%
rename from docs/api/juju.cloud.rst
rename to modules/libjuju/docs/api/juju.cloud.rst
diff --git a/docs/api/juju.constraints.rst b/modules/libjuju/docs/api/juju.constraints.rst
similarity index 100%
rename from docs/api/juju.constraints.rst
rename to modules/libjuju/docs/api/juju.constraints.rst
diff --git a/docs/api/juju.controller.rst b/modules/libjuju/docs/api/juju.controller.rst
similarity index 100%
rename from docs/api/juju.controller.rst
rename to modules/libjuju/docs/api/juju.controller.rst
diff --git a/docs/api/juju.delta.rst b/modules/libjuju/docs/api/juju.delta.rst
similarity index 100%
rename from docs/api/juju.delta.rst
rename to modules/libjuju/docs/api/juju.delta.rst
diff --git a/docs/api/juju.errors.rst b/modules/libjuju/docs/api/juju.errors.rst
similarity index 100%
rename from docs/api/juju.errors.rst
rename to modules/libjuju/docs/api/juju.errors.rst
diff --git a/docs/api/juju.exceptions.rst b/modules/libjuju/docs/api/juju.exceptions.rst
similarity index 100%
rename from docs/api/juju.exceptions.rst
rename to modules/libjuju/docs/api/juju.exceptions.rst
diff --git a/docs/api/juju.juju.rst b/modules/libjuju/docs/api/juju.juju.rst
similarity index 100%
rename from docs/api/juju.juju.rst
rename to modules/libjuju/docs/api/juju.juju.rst
diff --git a/docs/api/juju.loop.rst b/modules/libjuju/docs/api/juju.loop.rst
similarity index 100%
rename from docs/api/juju.loop.rst
rename to modules/libjuju/docs/api/juju.loop.rst
diff --git a/docs/api/juju.machine.rst b/modules/libjuju/docs/api/juju.machine.rst
similarity index 100%
rename from docs/api/juju.machine.rst
rename to modules/libjuju/docs/api/juju.machine.rst
diff --git a/docs/api/juju.model.rst b/modules/libjuju/docs/api/juju.model.rst
similarity index 100%
rename from docs/api/juju.model.rst
rename to modules/libjuju/docs/api/juju.model.rst
diff --git a/docs/api/juju.placement.rst b/modules/libjuju/docs/api/juju.placement.rst
similarity index 100%
rename from docs/api/juju.placement.rst
rename to modules/libjuju/docs/api/juju.placement.rst
diff --git a/docs/api/juju.relation.rst b/modules/libjuju/docs/api/juju.relation.rst
similarity index 100%
rename from docs/api/juju.relation.rst
rename to modules/libjuju/docs/api/juju.relation.rst
diff --git a/docs/api/juju.tag.rst b/modules/libjuju/docs/api/juju.tag.rst
similarity index 100%
rename from docs/api/juju.tag.rst
rename to modules/libjuju/docs/api/juju.tag.rst
diff --git a/docs/api/juju.unit.rst b/modules/libjuju/docs/api/juju.unit.rst
similarity index 100%
rename from docs/api/juju.unit.rst
rename to modules/libjuju/docs/api/juju.unit.rst
diff --git a/docs/api/juju.utils.rst b/modules/libjuju/docs/api/juju.utils.rst
similarity index 100%
rename from docs/api/juju.utils.rst
rename to modules/libjuju/docs/api/juju.utils.rst
diff --git a/docs/api/modules.rst b/modules/libjuju/docs/api/modules.rst
similarity index 100%
rename from docs/api/modules.rst
rename to modules/libjuju/docs/api/modules.rst
diff --git a/docs/changelog.rst b/modules/libjuju/docs/changelog.rst
similarity index 98%
rename from docs/changelog.rst
rename to modules/libjuju/docs/changelog.rst
index 039b468..d5e13ba 100644
--- a/docs/changelog.rst
+++ b/modules/libjuju/docs/changelog.rst
@@ -60,6 +60,7 @@
* Implement set/get model constraints (#253)
+>>>>>>> b8a8281b1785358bd5632a119c016f21811172c6
0.9.1
^^^^^
Monday July 16 2018
diff --git a/docs/conf.py b/modules/libjuju/docs/conf.py
similarity index 100%
rename from docs/conf.py
rename to modules/libjuju/docs/conf.py
diff --git a/docs/index.rst b/modules/libjuju/docs/index.rst
similarity index 100%
rename from docs/index.rst
rename to modules/libjuju/docs/index.rst
diff --git a/docs/narrative/application.rst b/modules/libjuju/docs/narrative/application.rst
similarity index 100%
rename from docs/narrative/application.rst
rename to modules/libjuju/docs/narrative/application.rst
diff --git a/docs/narrative/controller.rst b/modules/libjuju/docs/narrative/controller.rst
similarity index 100%
rename from docs/narrative/controller.rst
rename to modules/libjuju/docs/narrative/controller.rst
diff --git a/docs/narrative/index.rst b/modules/libjuju/docs/narrative/index.rst
similarity index 100%
rename from docs/narrative/index.rst
rename to modules/libjuju/docs/narrative/index.rst
diff --git a/docs/narrative/model.rst b/modules/libjuju/docs/narrative/model.rst
similarity index 100%
rename from docs/narrative/model.rst
rename to modules/libjuju/docs/narrative/model.rst
diff --git a/docs/narrative/unit.rst b/modules/libjuju/docs/narrative/unit.rst
similarity index 100%
rename from docs/narrative/unit.rst
rename to modules/libjuju/docs/narrative/unit.rst
diff --git a/docs/readme.rst b/modules/libjuju/docs/readme.rst
similarity index 100%
rename from docs/readme.rst
rename to modules/libjuju/docs/readme.rst
diff --git a/docs/requirements.txt b/modules/libjuju/docs/requirements.txt
similarity index 100%
rename from docs/requirements.txt
rename to modules/libjuju/docs/requirements.txt
diff --git a/docs/upstream-updates/index.rst b/modules/libjuju/docs/upstream-updates/index.rst
similarity index 100%
rename from docs/upstream-updates/index.rst
rename to modules/libjuju/docs/upstream-updates/index.rst
diff --git a/examples/action.py b/modules/libjuju/examples/action.py
similarity index 100%
rename from examples/action.py
rename to modules/libjuju/examples/action.py
diff --git a/examples/add_machine.py b/modules/libjuju/examples/add_machine.py
similarity index 100%
rename from examples/add_machine.py
rename to modules/libjuju/examples/add_machine.py
diff --git a/examples/add_model.py b/modules/libjuju/examples/add_model.py
similarity index 100%
rename from examples/add_model.py
rename to modules/libjuju/examples/add_model.py
diff --git a/examples/allwatcher.py b/modules/libjuju/examples/allwatcher.py
similarity index 100%
rename from examples/allwatcher.py
rename to modules/libjuju/examples/allwatcher.py
diff --git a/examples/config.py b/modules/libjuju/examples/config.py
similarity index 100%
rename from examples/config.py
rename to modules/libjuju/examples/config.py
diff --git a/examples/connect_current_model.py b/modules/libjuju/examples/connect_current_model.py
similarity index 100%
rename from examples/connect_current_model.py
rename to modules/libjuju/examples/connect_current_model.py
diff --git a/examples/controller.py b/modules/libjuju/examples/controller.py
similarity index 100%
rename from examples/controller.py
rename to modules/libjuju/examples/controller.py
diff --git a/examples/credential.py b/modules/libjuju/examples/credential.py
similarity index 100%
rename from examples/credential.py
rename to modules/libjuju/examples/credential.py
diff --git a/examples/deploy.py b/modules/libjuju/examples/deploy.py
similarity index 100%
rename from examples/deploy.py
rename to modules/libjuju/examples/deploy.py
diff --git a/examples/fullstatus.py b/modules/libjuju/examples/fullstatus.py
similarity index 100%
rename from examples/fullstatus.py
rename to modules/libjuju/examples/fullstatus.py
diff --git a/examples/future.py b/modules/libjuju/examples/future.py
similarity index 100%
rename from examples/future.py
rename to modules/libjuju/examples/future.py
diff --git a/examples/leadership.py b/modules/libjuju/examples/leadership.py
similarity index 100%
rename from examples/leadership.py
rename to modules/libjuju/examples/leadership.py
diff --git a/examples/livemodel.py b/modules/libjuju/examples/livemodel.py
similarity index 100%
rename from examples/livemodel.py
rename to modules/libjuju/examples/livemodel.py
diff --git a/examples/localcharm.py b/modules/libjuju/examples/localcharm.py
similarity index 100%
rename from examples/localcharm.py
rename to modules/libjuju/examples/localcharm.py
diff --git a/examples/relate.py b/modules/libjuju/examples/relate.py
similarity index 100%
rename from examples/relate.py
rename to modules/libjuju/examples/relate.py
diff --git a/examples/unitrun.py b/modules/libjuju/examples/unitrun.py
similarity index 100%
rename from examples/unitrun.py
rename to modules/libjuju/examples/unitrun.py
diff --git a/juju/__init__.py b/modules/libjuju/juju/__init__.py
similarity index 100%
rename from juju/__init__.py
rename to modules/libjuju/juju/__init__.py
diff --git a/juju/action.py b/modules/libjuju/juju/action.py
similarity index 100%
rename from juju/action.py
rename to modules/libjuju/juju/action.py
diff --git a/juju/annotation.py b/modules/libjuju/juju/annotation.py
similarity index 100%
rename from juju/annotation.py
rename to modules/libjuju/juju/annotation.py
diff --git a/juju/application.py b/modules/libjuju/juju/application.py
similarity index 100%
rename from juju/application.py
rename to modules/libjuju/juju/application.py
diff --git a/juju/client/__init__.py b/modules/libjuju/juju/client/__init__.py
similarity index 100%
rename from juju/client/__init__.py
rename to modules/libjuju/juju/client/__init__.py
diff --git a/juju/client/_client.py b/modules/libjuju/juju/client/_client.py
similarity index 99%
rename from juju/client/_client.py
rename to modules/libjuju/juju/client/_client.py
index c9fdef2..bfa9e6f 100644
--- a/juju/client/_client.py
+++ b/modules/libjuju/juju/client/_client.py
@@ -18,7 +18,6 @@
}
-
def lookup_facade(name, version):
"""
Given a facade name and version, attempt to pull that facade out
@@ -36,7 +35,6 @@
"{}".format(name))
-
class TypeFactory:
@classmethod
def from_connection(cls, connection):
@@ -454,5 +452,3 @@
class VolumeAttachmentsWatcherFacade(TypeFactory):
pass
-
-
diff --git a/juju/client/_client1.py b/modules/libjuju/juju/client/_client1.py
similarity index 99%
rename from juju/client/_client1.py
rename to modules/libjuju/juju/client/_client1.py
index dd84e78..83437bc 100644
--- a/juju/client/_client1.py
+++ b/modules/libjuju/juju/client/_client1.py
@@ -1,8 +1,8 @@
# DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py.
# Changes will be overwritten/lost when the file is regenerated.
-from juju.client.facade import Type, ReturnMapping
from juju.client._definitions import *
+from juju.client.facade import ReturnMapping, Type
class ActionPrunerFacade(Type):
@@ -43,7 +43,7 @@
'WatchForModelConfigChanges': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ModelConfigResult)
async def ModelConfig(self):
@@ -105,7 +105,7 @@
name = 'AgentTools'
version = 1
schema = {'properties': {'UpdateToolsAvailable': {'type': 'object'}}, 'type': 'object'}
-
+
@ReturnMapping(None)
async def UpdateToolsAvailable(self):
@@ -143,7 +143,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(AllWatcherNextResults)
async def Next(self):
@@ -225,7 +225,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ApplicationRelationsWatchResult)
async def Next(self):
@@ -307,7 +307,7 @@
'Watch': {'properties': {'Result': {'$ref': '#/definitions/StringsWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ErrorResults)
async def Rescale(self, entities):
@@ -434,7 +434,7 @@
'Restore': {'properties': {'Params': {'$ref': '#/definitions/RestoreArgs'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(BackupsMetadataResult)
async def Create(self, notes):
@@ -592,7 +592,7 @@
'Result': {'$ref': '#/definitions/BundleChangesResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(BundleChangesResults)
async def GetChanges(self, yaml):
@@ -686,7 +686,7 @@
'WatchForModelConfigChanges': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(CloudSpecResults)
async def CloudSpec(self, entities):
@@ -847,7 +847,7 @@
'WatchApplications': {'properties': {'Result': {'$ref': '#/definitions/StringsWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ApplicationGetConfigResults)
async def ApplicationsConfig(self, entities):
@@ -1149,7 +1149,7 @@
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(StringsResult)
async def APIAddresses(self):
@@ -1540,7 +1540,7 @@
'WatchApplications': {'properties': {'Result': {'$ref': '#/definitions/StringsWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(StringsResult)
async def APIAddresses(self):
@@ -1992,7 +1992,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ApplicationGetConfigResults)
async def ApplicationsConfig(self, entities):
@@ -2915,7 +2915,7 @@
'WatchAll': {'properties': {'Result': {'$ref': '#/definitions/AllWatcherId'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(APIHostPortsResult)
async def APIHostPorts(self):
@@ -3687,7 +3687,7 @@
'Result': {'$ref': '#/definitions/StringsResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(CloudResults)
async def Cloud(self, entities):
@@ -3857,7 +3857,7 @@
'Result': {'$ref': '#/definitions/ErrorResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ErrorResult)
async def InvalidateModelCredential(self, reason):
@@ -3919,7 +3919,7 @@
'WatchControllerInfo': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ControllerAPIInfoResults)
async def ControllerInfo(self):
@@ -4257,7 +4257,7 @@
'Result': {'$ref': '#/definitions/RelationStatusWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ErrorResults)
async def PublishIngressNetworkChanges(self, changes):
@@ -4550,7 +4550,7 @@
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(StringsResult)
async def APIAddresses(self):
@@ -4833,7 +4833,7 @@
'WatchExternalControllers': {'properties': {'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ExternalControllerInfoResults)
async def ExternalControllerInfo(self, entities):
@@ -4923,7 +4923,7 @@
'WatchForFanConfigChanges': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(FanConfigResult)
async def FanConfig(self):
@@ -5005,7 +5005,7 @@
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ListFirewallRulesResults)
async def ListFirewallRules(self):
@@ -5080,7 +5080,7 @@
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ErrorResults)
async def ReportKeys(self, entity_keys):
@@ -5165,7 +5165,7 @@
'Result': {'$ref': '#/definitions/StringsResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ErrorResults)
async def AddKeys(self, ssh_keys, user):
@@ -5297,7 +5297,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(StringsResults)
async def AuthorisedKeys(self, entities):
@@ -5385,7 +5385,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(LifeResults)
async def Life(self, entities):
@@ -5488,7 +5488,7 @@
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(LogForwardingGetLastSentResults)
async def GetLastSent(self, ids):
@@ -5576,7 +5576,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(StringResults)
async def LoggingConfig(self, entities):
@@ -5724,7 +5724,7 @@
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ActionResults)
async def Actions(self, entities):
@@ -5893,7 +5893,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(EntitiesResults)
async def AllMachineRemovals(self, entities):
@@ -6175,7 +6175,7 @@
'WatchAPIHostPorts': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(StringsResult)
async def APIAddresses(self):
@@ -6464,7 +6464,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(MeterStatusResults)
async def GetMeterStatus(self, entities):
@@ -6541,7 +6541,7 @@
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(None)
async def AddJujuMachineMetrics(self):
@@ -6646,7 +6646,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(PhaseResults)
async def Phase(self, entities):
@@ -6851,7 +6851,7 @@
'WatchMinionReports': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(SerializedModel)
async def Export(self):
@@ -7065,7 +7065,7 @@
'Watch': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(None)
async def Report(self, migration_id, phase, success):
@@ -7132,7 +7132,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(MigrationStatus)
async def Next(self):
@@ -7303,7 +7303,7 @@
'Prechecks': {'properties': {'Params': {'$ref': '#/definitions/MigrationModelInfo'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(None)
async def Abort(self, model_tag):
@@ -7524,7 +7524,7 @@
'SetSLALevel': {'properties': {'Params': {'$ref': '#/definitions/ModelSLA'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ModelConfigResults)
async def ModelGet(self):
@@ -7712,7 +7712,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(IntResults)
async def ModelEnvironVersion(self, entities):
@@ -7809,7 +7809,7 @@
version = 1
schema = {'properties': {'Next': {'type': 'object'}, 'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(None)
async def Next(self):
@@ -7888,7 +7888,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(OfferStatusWatchResult)
async def Next(self):
@@ -7960,7 +7960,7 @@
'Result': {'$ref': '#/definitions/PayloadListResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(PayloadListResults)
async def List(self, patterns):
@@ -8075,7 +8075,7 @@
'Result': {'$ref': '#/definitions/PayloadResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(PayloadResults)
async def List(self, entities):
@@ -8172,7 +8172,7 @@
version = 1
schema = {'properties': {'Ping': {'type': 'object'}, 'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(None)
async def Ping(self):
@@ -8272,7 +8272,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ProxyConfigResults)
async def ProxyConfig(self, entities):
@@ -8346,7 +8346,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(RelationLifeSuspendedStatusWatchResult)
async def Next(self):
@@ -8420,7 +8420,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(RelationUnitsWatchResult)
async def Next(self):
@@ -8519,7 +8519,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(RemoteApplicationWatchResult)
async def Next(self):
@@ -8858,7 +8858,7 @@
'WatchRemoteRelations': {'properties': {'Result': {'$ref': '#/definitions/StringsWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ErrorResults)
async def ConsumeRemoteRelationChanges(self, changes):
@@ -9183,7 +9183,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(RemoteRelationsWatchResult)
async def Next(self):
@@ -9341,7 +9341,7 @@
'Result': {'$ref': '#/definitions/ResourcesResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(AddPendingResourcesResult)
async def AddPendingResources(self, addcharmwithauthorization, entity, resources):
@@ -9454,7 +9454,7 @@
'Result': {'$ref': '#/definitions/UnitResourcesResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(UnitResourcesResult)
async def GetResourceInfo(self, resource_names):
@@ -9535,7 +9535,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(RetryStrategyResults)
async def RetryStrategy(self, entities):
@@ -9631,7 +9631,7 @@
'Result': {'$ref': '#/definitions/SSHPublicKeysResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(SSHAddressResults)
async def PrivateAddress(self, entities):
@@ -9756,7 +9756,7 @@
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ErrorResults)
async def Claim(self, claims):
@@ -9819,7 +9819,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(StringsWatchResult)
async def Next(self):
@@ -9944,7 +9944,7 @@
'WatchModelResources': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ModelConfigResult)
async def ModelConfig(self):
@@ -10136,7 +10136,7 @@
'WatchUnitAssignments': {'properties': {'Result': {'$ref': '#/definitions/StringsWatchResult'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ErrorResults)
async def AssignUnits(self, entities):
@@ -10345,7 +10345,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(ErrorResults)
async def FinishUpgradeSeries(self, args):
@@ -10698,7 +10698,7 @@
'Result': {'$ref': '#/definitions/NotifyWatchResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(VersionResults)
async def DesiredVersion(self, entities):
@@ -10887,7 +10887,7 @@
'Result': {'$ref': '#/definitions/UserInfoResults'}},
'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(AddUserResults)
async def AddUser(self, users):
@@ -11031,7 +11031,7 @@
'type': 'object'},
'Stop': {'type': 'object'}},
'type': 'object'}
-
+
@ReturnMapping(MachineStorageIdsWatchResult)
async def Next(self):
@@ -11066,5 +11066,3 @@
reply = await self.rpc(msg)
return reply
-
-
diff --git a/juju/client/_client2.py b/modules/libjuju/juju/client/_client2.py
similarity index 99%
rename from juju/client/_client2.py
rename to modules/libjuju/juju/client/_client2.py
index 4b5b518..416faab 100644
--- a/juju/client/_client2.py
+++ b/modules/libjuju/juju/client/_client2.py
@@ -1,8 +1,8 @@
# DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py.
# Changes will be overwritten/lost when the file is regenerated.
-from juju.client.facade import Type, ReturnMapping
from juju.client._definitions import *
+from juju.client.facade import ReturnMapping, Type
class ActionFacade(Type):
@@ -7533,5 +7533,3 @@
reply = await self.rpc(msg)
return reply
-
-
diff --git a/juju/client/_client3.py b/modules/libjuju/juju/client/_client3.py
similarity index 99%
rename from juju/client/_client3.py
rename to modules/libjuju/juju/client/_client3.py
index a63faa8..67840db 100644
--- a/juju/client/_client3.py
+++ b/modules/libjuju/juju/client/_client3.py
@@ -1,8 +1,8 @@
# DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py.
# Changes will be overwritten/lost when the file is regenerated.
-from juju.client.facade import Type, ReturnMapping
from juju.client._definitions import *
+from juju.client.facade import ReturnMapping, Type
class ActionFacade(Type):
@@ -6747,5 +6747,3 @@
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
-
-
diff --git a/juju/client/_client4.py b/modules/libjuju/juju/client/_client4.py
similarity index 99%
rename from juju/client/_client4.py
rename to modules/libjuju/juju/client/_client4.py
index 2795b2c..2336d2c 100644
--- a/juju/client/_client4.py
+++ b/modules/libjuju/juju/client/_client4.py
@@ -1,8 +1,8 @@
# DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py.
# Changes will be overwritten/lost when the file is regenerated.
-from juju.client.facade import Type, ReturnMapping
from juju.client._definitions import *
+from juju.client.facade import ReturnMapping, Type
class ApplicationFacade(Type):
@@ -4624,5 +4624,3 @@
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
-
-
diff --git a/juju/client/_client5.py b/modules/libjuju/juju/client/_client5.py
similarity index 99%
rename from juju/client/_client5.py
rename to modules/libjuju/juju/client/_client5.py
index 51d36f3..415aeae 100644
--- a/juju/client/_client5.py
+++ b/modules/libjuju/juju/client/_client5.py
@@ -1,8 +1,8 @@
# DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py.
# Changes will be overwritten/lost when the file is regenerated.
-from juju.client.facade import Type, ReturnMapping
from juju.client._definitions import *
+from juju.client.facade import ReturnMapping, Type
class ApplicationFacade(Type):
@@ -5320,5 +5320,3 @@
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
-
-
diff --git a/juju/client/_client7.py b/modules/libjuju/juju/client/_client7.py
similarity index 100%
rename from juju/client/_client7.py
rename to modules/libjuju/juju/client/_client7.py
diff --git a/juju/client/_client8.py b/modules/libjuju/juju/client/_client8.py
similarity index 100%
rename from juju/client/_client8.py
rename to modules/libjuju/juju/client/_client8.py
diff --git a/juju/client/_client9.py b/modules/libjuju/juju/client/_client9.py
similarity index 100%
rename from juju/client/_client9.py
rename to modules/libjuju/juju/client/_client9.py
diff --git a/juju/client/_definitions.py b/modules/libjuju/juju/client/_definitions.py
similarity index 99%
rename from juju/client/_definitions.py
rename to modules/libjuju/juju/client/_definitions.py
index 7122308..2d25e39 100644
--- a/juju/client/_definitions.py
+++ b/modules/libjuju/juju/client/_definitions.py
@@ -1,7 +1,7 @@
# DO NOT CHANGE THIS FILE! This file is auto-generated by facade.py.
# Changes will be overwritten/lost when the file is regenerated.
-from juju.client.facade import Type, ReturnMapping
+from juju.client.facade import ReturnMapping, Type
class APIHostPortsResult(Type):
@@ -11056,5 +11056,3 @@
results : typing.Sequence[~ZoneResult]
'''
self.results = [ZoneResult.from_json(o) for o in results or []]
-
-
diff --git a/juju/client/client.py b/modules/libjuju/juju/client/client.py
similarity index 100%
rename from juju/client/client.py
rename to modules/libjuju/juju/client/client.py
diff --git a/juju/client/codegen.py b/modules/libjuju/juju/client/codegen.py
similarity index 100%
rename from juju/client/codegen.py
rename to modules/libjuju/juju/client/codegen.py
diff --git a/juju/client/connection.py b/modules/libjuju/juju/client/connection.py
similarity index 100%
rename from juju/client/connection.py
rename to modules/libjuju/juju/client/connection.py
diff --git a/juju/client/connector.py b/modules/libjuju/juju/client/connector.py
similarity index 100%
rename from juju/client/connector.py
rename to modules/libjuju/juju/client/connector.py
diff --git a/juju/client/facade.py b/modules/libjuju/juju/client/facade.py
similarity index 100%
rename from juju/client/facade.py
rename to modules/libjuju/juju/client/facade.py
diff --git a/juju/client/gocookies.py b/modules/libjuju/juju/client/gocookies.py
similarity index 100%
rename from juju/client/gocookies.py
rename to modules/libjuju/juju/client/gocookies.py
diff --git a/juju/client/jujudata.py b/modules/libjuju/juju/client/jujudata.py
similarity index 100%
rename from juju/client/jujudata.py
rename to modules/libjuju/juju/client/jujudata.py
diff --git a/juju/client/overrides.py b/modules/libjuju/juju/client/overrides.py
similarity index 100%
rename from juju/client/overrides.py
rename to modules/libjuju/juju/client/overrides.py
diff --git a/juju/client/runner.py b/modules/libjuju/juju/client/runner.py
similarity index 100%
rename from juju/client/runner.py
rename to modules/libjuju/juju/client/runner.py
diff --git a/juju/client/schemas-juju-2.0.0.json b/modules/libjuju/juju/client/schemas-juju-2.0.0.json
similarity index 100%
rename from juju/client/schemas-juju-2.0.0.json
rename to modules/libjuju/juju/client/schemas-juju-2.0.0.json
diff --git a/juju/client/schemas-juju-2.0.1.json b/modules/libjuju/juju/client/schemas-juju-2.0.1.json
similarity index 100%
rename from juju/client/schemas-juju-2.0.1.json
rename to modules/libjuju/juju/client/schemas-juju-2.0.1.json
diff --git a/juju/client/schemas-juju-2.0.2.json b/modules/libjuju/juju/client/schemas-juju-2.0.2.json
similarity index 100%
rename from juju/client/schemas-juju-2.0.2.json
rename to modules/libjuju/juju/client/schemas-juju-2.0.2.json
diff --git a/juju/client/schemas-juju-2.0.3.json b/modules/libjuju/juju/client/schemas-juju-2.0.3.json
similarity index 100%
rename from juju/client/schemas-juju-2.0.3.json
rename to modules/libjuju/juju/client/schemas-juju-2.0.3.json
diff --git a/juju/client/schemas-juju-2.1.0.json b/modules/libjuju/juju/client/schemas-juju-2.1.0.json
similarity index 100%
rename from juju/client/schemas-juju-2.1.0.json
rename to modules/libjuju/juju/client/schemas-juju-2.1.0.json
diff --git a/juju/client/schemas-juju-2.1.1.json b/modules/libjuju/juju/client/schemas-juju-2.1.1.json
similarity index 100%
rename from juju/client/schemas-juju-2.1.1.json
rename to modules/libjuju/juju/client/schemas-juju-2.1.1.json
diff --git a/juju/client/schemas-juju-2.1.2.json b/modules/libjuju/juju/client/schemas-juju-2.1.2.json
similarity index 100%
rename from juju/client/schemas-juju-2.1.2.json
rename to modules/libjuju/juju/client/schemas-juju-2.1.2.json
diff --git a/juju/client/schemas-juju-2.2-alpha1.json b/modules/libjuju/juju/client/schemas-juju-2.2-alpha1.json
similarity index 100%
rename from juju/client/schemas-juju-2.2-alpha1.json
rename to modules/libjuju/juju/client/schemas-juju-2.2-alpha1.json
diff --git a/juju/client/schemas-juju-2.2-beta1.json b/modules/libjuju/juju/client/schemas-juju-2.2-beta1.json
similarity index 100%
rename from juju/client/schemas-juju-2.2-beta1.json
rename to modules/libjuju/juju/client/schemas-juju-2.2-beta1.json
diff --git a/juju/client/schemas-juju-2.2-beta2.json b/modules/libjuju/juju/client/schemas-juju-2.2-beta2.json
similarity index 100%
rename from juju/client/schemas-juju-2.2-beta2.json
rename to modules/libjuju/juju/client/schemas-juju-2.2-beta2.json
diff --git a/juju/client/schemas-juju-2.2-rc1.json b/modules/libjuju/juju/client/schemas-juju-2.2-rc1.json
similarity index 100%
rename from juju/client/schemas-juju-2.2-rc1.json
rename to modules/libjuju/juju/client/schemas-juju-2.2-rc1.json
diff --git a/juju/client/schemas-juju-2.3-alpha1.json b/modules/libjuju/juju/client/schemas-juju-2.3-alpha1.json
similarity index 100%
rename from juju/client/schemas-juju-2.3-alpha1.json
rename to modules/libjuju/juju/client/schemas-juju-2.3-alpha1.json
diff --git a/juju/client/schemas-juju-2.5-rc1.json b/modules/libjuju/juju/client/schemas-juju-2.5-rc1.json
similarity index 100%
rename from juju/client/schemas-juju-2.5-rc1.json
rename to modules/libjuju/juju/client/schemas-juju-2.5-rc1.json
diff --git a/juju/client/schemas-juju-latest.json b/modules/libjuju/juju/client/schemas-juju-latest.json
similarity index 100%
rename from juju/client/schemas-juju-latest.json
rename to modules/libjuju/juju/client/schemas-juju-latest.json
diff --git a/juju/cloud.py b/modules/libjuju/juju/cloud.py
similarity index 100%
rename from juju/cloud.py
rename to modules/libjuju/juju/cloud.py
diff --git a/juju/constraints.py b/modules/libjuju/juju/constraints.py
similarity index 100%
rename from juju/constraints.py
rename to modules/libjuju/juju/constraints.py
diff --git a/juju/controller.py b/modules/libjuju/juju/controller.py
similarity index 100%
rename from juju/controller.py
rename to modules/libjuju/juju/controller.py
diff --git a/juju/credential.py b/modules/libjuju/juju/credential.py
similarity index 100%
rename from juju/credential.py
rename to modules/libjuju/juju/credential.py
diff --git a/juju/delta.py b/modules/libjuju/juju/delta.py
similarity index 100%
rename from juju/delta.py
rename to modules/libjuju/juju/delta.py
diff --git a/juju/errors.py b/modules/libjuju/juju/errors.py
similarity index 100%
rename from juju/errors.py
rename to modules/libjuju/juju/errors.py
diff --git a/juju/exceptions.py b/modules/libjuju/juju/exceptions.py
similarity index 100%
rename from juju/exceptions.py
rename to modules/libjuju/juju/exceptions.py
diff --git a/juju/juju.py b/modules/libjuju/juju/juju.py
similarity index 100%
rename from juju/juju.py
rename to modules/libjuju/juju/juju.py
diff --git a/juju/loop.py b/modules/libjuju/juju/loop.py
similarity index 100%
rename from juju/loop.py
rename to modules/libjuju/juju/loop.py
diff --git a/juju/machine.py b/modules/libjuju/juju/machine.py
similarity index 100%
rename from juju/machine.py
rename to modules/libjuju/juju/machine.py
diff --git a/juju/model.py b/modules/libjuju/juju/model.py
similarity index 100%
rename from juju/model.py
rename to modules/libjuju/juju/model.py
diff --git a/juju/placement.py b/modules/libjuju/juju/placement.py
similarity index 100%
rename from juju/placement.py
rename to modules/libjuju/juju/placement.py
diff --git a/juju/provisioner.py b/modules/libjuju/juju/provisioner.py
similarity index 99%
rename from juju/provisioner.py
rename to modules/libjuju/juju/provisioner.py
index da8be16..2886574 100644
--- a/juju/provisioner.py
+++ b/modules/libjuju/juju/provisioner.py
@@ -18,7 +18,6 @@
[re.compile(r"aarch64"), "arm64"],
[re.compile(r"ppc64|ppc64el|ppc64le"), "ppc64el"],
[re.compile(r"s390x?"), "s390x"],
-
]
diff --git a/juju/relation.py b/modules/libjuju/juju/relation.py
similarity index 100%
rename from juju/relation.py
rename to modules/libjuju/juju/relation.py
diff --git a/juju/tag.py b/modules/libjuju/juju/tag.py
similarity index 100%
rename from juju/tag.py
rename to modules/libjuju/juju/tag.py
diff --git a/juju/unit.py b/modules/libjuju/juju/unit.py
similarity index 100%
rename from juju/unit.py
rename to modules/libjuju/juju/unit.py
diff --git a/juju/user.py b/modules/libjuju/juju/user.py
similarity index 100%
rename from juju/user.py
rename to modules/libjuju/juju/user.py
diff --git a/juju/utils.py b/modules/libjuju/juju/utils.py
similarity index 100%
rename from juju/utils.py
rename to modules/libjuju/juju/utils.py
diff --git a/scripts/gendoc b/modules/libjuju/scripts/gendoc
similarity index 100%
rename from scripts/gendoc
rename to modules/libjuju/scripts/gendoc
diff --git a/modules/libjuju/setup.py b/modules/libjuju/setup.py
new file mode 100644
index 0000000..7134677
--- /dev/null
+++ b/modules/libjuju/setup.py
@@ -0,0 +1,61 @@
+# Copyright 2016 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from pathlib import Path
+from setuptools import setup, find_packages
+
+here = Path(__file__).absolute().parent
+readme = here / 'docs' / 'readme.rst'
+changelog = here / 'docs' / 'changelog.rst'
+long_description = '{}\n\n{}'.format(
+ readme.read_text(),
+ changelog.read_text()
+ )
+version = here / 'VERSION'
+
+setup(
+ name='juju',
+ version=version.read_text().strip(),
+ packages=find_packages(
+ exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
+ install_requires=[
+ 'macaroonbakery>=1.1,<2.0',
+ 'pyRFC3339>=1.0,<2.0',
+ 'pyyaml>=3.0,<=4.2',
+ 'theblues>=0.3.8,<1.0',
+ 'websockets>=7.0,<8.0',
+ 'paramiko>=2.4.0,<3.0.0',
+ 'pyasn1>=0.4.4',
+ ],
+ include_package_data=True,
+ maintainer='Juju Ecosystem Engineering',
+ maintainer_email='juju@lists.ubuntu.com',
+ description=('Python library for Juju'),
+ long_description=long_description,
+ url='https://github.com/juju/python-libjuju',
+ license='Apache 2',
+ classifiers=[
+ "Development Status :: 3 - Alpha",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ ],
+ entry_points={
+ 'console_scripts': [
+ ],
+ },
+)
diff --git a/juju/__init__.py b/modules/libjuju/tests/__init__.py
similarity index 100%
copy from juju/__init__.py
copy to modules/libjuju/tests/__init__.py
diff --git a/modules/libjuju/tests/base.py b/modules/libjuju/tests/base.py
new file mode 100644
index 0000000..600372c
--- /dev/null
+++ b/modules/libjuju/tests/base.py
@@ -0,0 +1,148 @@
+import inspect
+import subprocess
+import uuid
+from contextlib import contextmanager
+from pathlib import Path
+
+import mock
+from juju.client.jujudata import FileJujuData
+from juju.controller import Controller
+
+import pytest
+
+
+def is_bootstrapped():
+ try:
+ result = subprocess.run(['juju', 'switch'], stdout=subprocess.PIPE)
+ return (
+ result.returncode == 0 and
+ len(result.stdout.decode().strip()) > 0)
+ except FileNotFoundError:
+ return False
+
+
+bootstrapped = pytest.mark.skipif(
+ not is_bootstrapped(),
+ reason='bootstrapped Juju environment required')
+
+test_run_nonce = uuid.uuid4().hex[-4:]
+
+
+class CleanController():
+ """
+ Context manager that automatically connects and disconnects from
+ the currently active controller.
+
+ Note: Unlike CleanModel, this will not create a new controller for you,
+ and an active controller must already be available.
+ """
+ def __init__(self):
+ self._controller = None
+
+ async def __aenter__(self):
+ self._controller = Controller()
+ await self._controller.connect()
+ return self._controller
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self._controller.disconnect()
+
+
+class CleanModel():
+ """
+ Context manager that automatically connects to the currently active
+ controller, adds a fresh model, returns the connection to that model,
+ and automatically disconnects and cleans up the model.
+
+ The new model is also set as the current default for the controller
+ connection.
+ """
+ def __init__(self, bakery_client=None):
+ self._controller = None
+ self._model = None
+ self._model_uuid = None
+ self._bakery_client = bakery_client
+
+ async def __aenter__(self):
+ model_nonce = uuid.uuid4().hex[-4:]
+ frame = inspect.stack()[1]
+ test_name = frame.function.replace('_', '-')
+ jujudata = TestJujuData()
+ self._controller = Controller(
+ jujudata=jujudata,
+ bakery_client=self._bakery_client,
+ )
+ controller_name = jujudata.current_controller()
+ user_name = jujudata.accounts()[controller_name]['user']
+ await self._controller.connect(controller_name)
+
+ model_name = 'test-{}-{}-{}'.format(
+ test_run_nonce,
+ test_name,
+ model_nonce,
+ )
+ self._model = await self._controller.add_model(model_name)
+
+ # Change the JujuData instance so that it will return the new
+ # model as the current model name, so that we'll connect
+ # to it by default.
+ jujudata.set_model(
+ controller_name,
+ user_name + "/" + model_name,
+ self._model.info.uuid,
+ )
+
+ # save the model UUID in case test closes model
+ self._model_uuid = self._model.info.uuid
+
+ return self._model
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self._model.disconnect()
+ await self._controller.destroy_model(self._model_uuid)
+ await self._controller.disconnect()
+
+
+class TestJujuData(FileJujuData):
+ def __init__(self):
+ self.__controller_name = None
+ self.__model_name = None
+ self.__model_uuid = None
+ super().__init__()
+
+ def set_model(self, controller_name, model_name, model_uuid):
+ self.__controller_name = controller_name
+ self.__model_name = model_name
+ self.__model_uuid = model_uuid
+
+ def current_model(self, *args, **kwargs):
+ return self.__model_name or super().current_model(*args, **kwargs)
+
+ def models(self):
+ all_models = super().models()
+ if self.__model_name is None:
+ return all_models
+ all_models.setdefault(self.__controller_name, {})
+ all_models[self.__controller_name].setdefault('models', {})
+ cmodels = all_models[self.__controller_name]['models']
+ cmodels[self.__model_name] = {'uuid': self.__model_uuid}
+ return all_models
+
+
+class AsyncMock(mock.MagicMock):
+ async def __call__(self, *args, **kwargs):
+ return super().__call__(*args, **kwargs)
+
+
+@contextmanager
+def patch_file(filename):
+ """
+ "Patch" a file so that its current contents are automatically restored
+ when the context is exited.
+ """
+ filepath = Path(filename).expanduser()
+ data = filepath.read_bytes()
+ try:
+ yield
+ finally:
+ filepath.write_bytes(data)
diff --git a/tests/bundle/bundle.yaml b/modules/libjuju/tests/bundle/bundle.yaml
similarity index 100%
rename from tests/bundle/bundle.yaml
rename to modules/libjuju/tests/bundle/bundle.yaml
diff --git a/tests/bundle/invalid.yaml b/modules/libjuju/tests/bundle/invalid.yaml
similarity index 100%
rename from tests/bundle/invalid.yaml
rename to modules/libjuju/tests/bundle/invalid.yaml
diff --git a/tests/bundle/mini-bundle.yaml b/modules/libjuju/tests/bundle/mini-bundle.yaml
similarity index 100%
rename from tests/bundle/mini-bundle.yaml
rename to modules/libjuju/tests/bundle/mini-bundle.yaml
diff --git a/tests/charm/metadata.yaml b/modules/libjuju/tests/charm/metadata.yaml
similarity index 100%
rename from tests/charm/metadata.yaml
rename to modules/libjuju/tests/charm/metadata.yaml
diff --git a/juju/__init__.py b/modules/libjuju/tests/integration/__init__.py
similarity index 100%
copy from juju/__init__.py
copy to modules/libjuju/tests/integration/__init__.py
diff --git a/tests/integration/bundle/bundle-resource-rev.yaml b/modules/libjuju/tests/integration/bundle/bundle-resource-rev.yaml
similarity index 100%
rename from tests/integration/bundle/bundle-resource-rev.yaml
rename to modules/libjuju/tests/integration/bundle/bundle-resource-rev.yaml
diff --git a/tests/integration/bundle/bundle.yaml b/modules/libjuju/tests/integration/bundle/bundle.yaml
similarity index 100%
rename from tests/integration/bundle/bundle.yaml
rename to modules/libjuju/tests/integration/bundle/bundle.yaml
diff --git a/tests/integration/cert.pem b/modules/libjuju/tests/integration/cert.pem
similarity index 100%
rename from tests/integration/cert.pem
rename to modules/libjuju/tests/integration/cert.pem
diff --git a/tests/integration/charm/metadata.yaml b/modules/libjuju/tests/integration/charm/metadata.yaml
similarity index 100%
rename from tests/integration/charm/metadata.yaml
rename to modules/libjuju/tests/integration/charm/metadata.yaml
diff --git a/tests/integration/key.pem b/modules/libjuju/tests/integration/key.pem
similarity index 100%
rename from tests/integration/key.pem
rename to modules/libjuju/tests/integration/key.pem
diff --git a/tests/integration/test_application.py b/modules/libjuju/tests/integration/test_application.py
similarity index 100%
rename from tests/integration/test_application.py
rename to modules/libjuju/tests/integration/test_application.py
diff --git a/tests/integration/test_client.py b/modules/libjuju/tests/integration/test_client.py
similarity index 100%
rename from tests/integration/test_client.py
rename to modules/libjuju/tests/integration/test_client.py
diff --git a/tests/integration/test_connection.py b/modules/libjuju/tests/integration/test_connection.py
similarity index 100%
rename from tests/integration/test_connection.py
rename to modules/libjuju/tests/integration/test_connection.py
diff --git a/tests/integration/test_controller.py b/modules/libjuju/tests/integration/test_controller.py
similarity index 100%
rename from tests/integration/test_controller.py
rename to modules/libjuju/tests/integration/test_controller.py
diff --git a/tests/integration/test_errors.py b/modules/libjuju/tests/integration/test_errors.py
similarity index 100%
rename from tests/integration/test_errors.py
rename to modules/libjuju/tests/integration/test_errors.py
diff --git a/tests/integration/test_macaroon_auth.py b/modules/libjuju/tests/integration/test_macaroon_auth.py
similarity index 100%
rename from tests/integration/test_macaroon_auth.py
rename to modules/libjuju/tests/integration/test_macaroon_auth.py
diff --git a/tests/integration/test_machine.py b/modules/libjuju/tests/integration/test_machine.py
similarity index 100%
rename from tests/integration/test_machine.py
rename to modules/libjuju/tests/integration/test_machine.py
diff --git a/tests/integration/test_model.py b/modules/libjuju/tests/integration/test_model.py
similarity index 94%
rename from tests/integration/test_model.py
rename to modules/libjuju/tests/integration/test_model.py
index 93695b5..58766b4 100644
--- a/tests/integration/test_model.py
+++ b/modules/libjuju/tests/integration/test_model.py
@@ -29,9 +29,11 @@
async def test_deploy_local_bundle_dir(event_loop):
tests_dir = Path(__file__).absolute().parent.parent
bundle_path = tests_dir / 'bundle'
+ mini_bundle_file_path = bundle_path / 'mini-bundle.yaml'
async with base.CleanModel() as model:
await model.deploy(str(bundle_path))
+ await model.deploy(str(mini_bundle_file_path))
wordpress = model.applications.get('wordpress')
mysql = model.applications.get('mysql')
@@ -83,6 +85,28 @@
@base.bootstrapped
@pytest.mark.asyncio
+async def test_deploy_invalid_bundle(event_loop):
+ tests_dir = Path(__file__).absolute().parent.parent
+ bundle_path = tests_dir / 'bundle' / 'invalid.yaml'
+ async with base.CleanModel() as model:
+ with pytest.raises(JujuError):
+ await model.deploy(str(bundle_path))
+
+
+@base.bootstrapped
+@pytest.mark.asyncio
+async def test_deploy_local_charm(event_loop):
+ from pathlib import Path
+ tests_dir = Path(__file__).absolute().parent.parent
+ charm_path = tests_dir / 'charm'
+
+ async with base.CleanModel() as model:
+ await model.deploy(str(charm_path))
+ assert 'charm' in model.applications
+
+
+@base.bootstrapped
+@pytest.mark.asyncio
async def test_deploy_bundle(event_loop):
async with base.CleanModel() as model:
await model.deploy('bundle/wiki-simple')
diff --git a/tests/integration/test_unit.py b/modules/libjuju/tests/integration/test_unit.py
similarity index 100%
rename from tests/integration/test_unit.py
rename to modules/libjuju/tests/integration/test_unit.py
diff --git a/tests/unit/__init__.py b/modules/libjuju/tests/unit/__init__.py
similarity index 100%
rename from tests/unit/__init__.py
rename to modules/libjuju/tests/unit/__init__.py
diff --git a/tests/unit/test_client.py b/modules/libjuju/tests/unit/test_client.py
similarity index 100%
rename from tests/unit/test_client.py
rename to modules/libjuju/tests/unit/test_client.py
diff --git a/tests/unit/test_connection.py b/modules/libjuju/tests/unit/test_connection.py
similarity index 100%
rename from tests/unit/test_connection.py
rename to modules/libjuju/tests/unit/test_connection.py
diff --git a/tests/unit/test_constraints.py b/modules/libjuju/tests/unit/test_constraints.py
similarity index 100%
rename from tests/unit/test_constraints.py
rename to modules/libjuju/tests/unit/test_constraints.py
diff --git a/tests/unit/test_controller.py b/modules/libjuju/tests/unit/test_controller.py
similarity index 100%
rename from tests/unit/test_controller.py
rename to modules/libjuju/tests/unit/test_controller.py
diff --git a/tests/unit/test_gocookies.py b/modules/libjuju/tests/unit/test_gocookies.py
similarity index 100%
rename from tests/unit/test_gocookies.py
rename to modules/libjuju/tests/unit/test_gocookies.py
diff --git a/tests/unit/test_loop.py b/modules/libjuju/tests/unit/test_loop.py
similarity index 100%
rename from tests/unit/test_loop.py
rename to modules/libjuju/tests/unit/test_loop.py
diff --git a/tests/unit/test_model.py b/modules/libjuju/tests/unit/test_model.py
similarity index 100%
rename from tests/unit/test_model.py
rename to modules/libjuju/tests/unit/test_model.py
diff --git a/tests/unit/test_overrides.py b/modules/libjuju/tests/unit/test_overrides.py
similarity index 100%
rename from tests/unit/test_overrides.py
rename to modules/libjuju/tests/unit/test_overrides.py
diff --git a/tests/unit/test_placement.py b/modules/libjuju/tests/unit/test_placement.py
similarity index 100%
rename from tests/unit/test_placement.py
rename to modules/libjuju/tests/unit/test_placement.py
diff --git a/tests/unit/test_registration_string.py b/modules/libjuju/tests/unit/test_registration_string.py
similarity index 100%
rename from tests/unit/test_registration_string.py
rename to modules/libjuju/tests/unit/test_registration_string.py
diff --git a/modules/libjuju/tox.ini b/modules/libjuju/tox.ini
new file mode 100644
index 0000000..350a1fc
--- /dev/null
+++ b/modules/libjuju/tox.ini
@@ -0,0 +1,66 @@
+# Tox (http://tox.testrun.org/) is a tool for running tests
+# in multiple virtualenvs. This configuration file will run the
+# test suite on all supported python versions. To use it, "pip install tox"
+# and then run "tox" from this directory.
+
+[tox]
+envlist = lint,py3
+skipsdist=True
+
+[pytest]
+markers =
+ serial: mark a test that must run by itself
+
+[testenv]
+basepython=python3
+usedevelop=True
+# for testing with other python versions
+commands = py.test --tb native -ra -v -s -n auto -k 'not integration' -m 'not serial' {posargs}
+passenv =
+ HOME
+ TEST_AGENTS
+deps =
+ asynctest
+ ipdb
+ mock
+ pytest
+ pytest-asyncio
+ pytest-xdist
+ Twine
+ # use fork to pick up fix for https://github.com/aaugustin/websockets/pull/528
+ git+https://github.com/johnsca/websockets@bug/client-redirects#egg=websockets
+
+[testenv:py3]
+# default tox env excludes integration and serial tests
+commands =
+ # These need to be installed in a specific order
+ pip install urllib3==1.22
+ pip install pylxd
+ py.test --tb native -ra -v -s -n auto -k 'not integration' -m 'not serial' {posargs}
+
+[testenv:lint]
+envdir = {toxworkdir}/py3
+commands =
+ flake8 --ignore E501,W504 {posargs} juju tests
+deps =
+ flake8
+
+[testenv:integration]
+envdir = {toxworkdir}/py3
+commands =
+ # These need to be installed in a specific order
+ pip install urllib3==1.22
+ pip install pylxd
+ py.test --tb native -ra -v -s -n auto -k 'integration' -m 'not serial' {posargs}
+
+[testenv:serial]
+# tests that can't be run in parallel
+envdir = {toxworkdir}/py3
+commands = py.test --tb native -ra -v -s {posargs:-m 'serial'}
+
+[testenv:example]
+envdir = {toxworkdir}/py3
+commands = python {posargs}
+
+[flake8]
+exclude = juju/client/_*
diff --git a/n2vc/__init__.py b/n2vc/__init__.py
new file mode 100644
index 0000000..889c3ab
--- /dev/null
+++ b/n2vc/__init__.py
@@ -0,0 +1 @@
+version = '0.0.2'
diff --git a/n2vc/vnf.py b/n2vc/vnf.py
new file mode 100644
index 0000000..1bdfe2f
--- /dev/null
+++ b/n2vc/vnf.py
@@ -0,0 +1,1304 @@
+import asyncio
+import logging
+import os
+import os.path
+import re
+import shlex
+import ssl
+import subprocess
+import sys
+# import time
+
+# FIXME: this should load the juju inside or modules without having to
+# explicitly install it. Check why it's not working.
+# Load our subtree of the juju library
+path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+path = os.path.join(path, "modules/libjuju/")
+if path not in sys.path:
+ sys.path.insert(1, path)
+
+from juju.controller import Controller
+from juju.model import ModelObserver
+from juju.errors import JujuAPIError, JujuError
+
+# We might need this to connect to the websocket securely, but test and verify.
+try:
+ ssl._create_default_https_context = ssl._create_unverified_context
+except AttributeError:
+ # Legacy Python doesn't verify by default (see pep-0476)
+ # https://www.python.org/dev/peps/pep-0476/
+ pass
+
+
+# Custom exceptions
+class JujuCharmNotFound(Exception):
+ """The Charm can't be found or is not readable."""
+
+
+class JujuApplicationExists(Exception):
+ """The Application already exists."""
+
+
+class N2VCPrimitiveExecutionFailed(Exception):
+ """Something failed while attempting to execute a primitive."""
+
+
+class NetworkServiceDoesNotExist(Exception):
+ """The Network Service being acted against does not exist."""
+
+
+# Quiet the debug logging
+logging.getLogger('websockets.protocol').setLevel(logging.INFO)
+logging.getLogger('juju.client.connection').setLevel(logging.WARN)
+logging.getLogger('juju.model').setLevel(logging.WARN)
+logging.getLogger('juju.machine').setLevel(logging.WARN)
+
+
+class VCAMonitor(ModelObserver):
+ """Monitor state changes within the Juju Model."""
+ log = None
+ ns_name = None
+ applications = {}
+
+ def __init__(self, ns_name):
+ self.log = logging.getLogger(__name__)
+
+ self.ns_name = ns_name
+
+ def AddApplication(self, application_name, callback, *callback_args):
+ if application_name not in self.applications:
+ self.applications[application_name] = {
+ 'callback': callback,
+ 'callback_args': callback_args
+ }
+
+ def RemoveApplication(self, application_name):
+ if application_name in self.applications:
+ del self.applications[application_name]
+
+ async def on_change(self, delta, old, new, model):
+ """React to changes in the Juju model."""
+
+ if delta.entity == "unit":
+ # Ignore change events from other applications
+ if delta.data['application'] not in self.applications.keys():
+ return
+
+ try:
+
+ application_name = delta.data['application']
+
+ callback = self.applications[application_name]['callback']
+ callback_args = \
+ self.applications[application_name]['callback_args']
+
+ if old and new:
+ # Fire off a callback with the application state
+ if callback:
+ callback(
+ self.ns_name,
+ delta.data['application'],
+ new.workload_status,
+ new.workload_status_message,
+ *callback_args)
+
+ if old and not new:
+ # This is a charm being removed
+ if callback:
+ callback(
+ self.ns_name,
+ delta.data['application'],
+ "removed",
+ "",
+ *callback_args)
+ except Exception as e:
+ self.log.debug("[1] notify_callback exception: {}".format(e))
+
+ elif delta.entity == "action":
+ # TODO: Decide how we want to notify the user of actions
+
+ # uuid = delta.data['id'] # The Action's unique id
+ # msg = delta.data['message'] # The output of the action
+ #
+ # if delta.data['status'] == "pending":
+ # # The action is queued
+ # pass
+ # elif delta.data['status'] == "completed""
+ # # The action was successful
+ # pass
+ # elif delta.data['status'] == "failed":
+ # # The action failed.
+ # pass
+
+ pass
+
+########
+# TODO
+#
+# Create unique models per network service
+# Document all public functions
+
+
+class N2VC:
+ def __init__(self,
+ log=None,
+ server='127.0.0.1',
+ port=17070,
+ user='admin',
+ secret=None,
+ artifacts=None,
+ loop=None,
+ ):
+ """Initialize N2VC
+
+ :param vcaconfig dict A dictionary containing the VCA configuration
+
+ :param artifacts str The directory where charms required by a vnfd are
+ stored.
+
+ :Example:
+ n2vc = N2VC(vcaconfig={
+ 'secret': 'MzI3MDJhOTYxYmM0YzRjNTJiYmY1Yzdm',
+ 'user': 'admin',
+ 'ip-address': '10.44.127.137',
+ 'port': 17070,
+ 'artifacts': '/path/to/charms'
+ })
+ """
+
+ # Initialize instance-level variables
+ self.api = None
+ self.log = None
+ self.controller = None
+ self.connecting = False
+ self.authenticated = False
+
+ # For debugging
+ self.refcount = {
+ 'controller': 0,
+ 'model': 0,
+ }
+
+ self.models = {}
+
+ # Model Observers
+ self.monitors = {}
+
+ # VCA config
+ self.hostname = ""
+ self.port = 17070
+ self.username = ""
+ self.secret = ""
+
+ if log:
+ self.log = log
+ else:
+ self.log = logging.getLogger(__name__)
+
+ # Quiet websocket traffic
+ logging.getLogger('websockets.protocol').setLevel(logging.INFO)
+ logging.getLogger('juju.client.connection').setLevel(logging.WARN)
+ logging.getLogger('model').setLevel(logging.WARN)
+ # logging.getLogger('websockets.protocol').setLevel(logging.DEBUG)
+
+ self.log.debug('JujuApi: instantiated')
+
+ self.server = server
+ self.port = port
+
+ self.secret = secret
+ if user.startswith('user-'):
+ self.user = user
+ else:
+ self.user = 'user-{}'.format(user)
+
+ self.endpoint = '%s:%d' % (server, int(port))
+
+ self.artifacts = artifacts
+
+ self.loop = loop or asyncio.get_event_loop()
+
+ def __del__(self):
+ """Close any open connections."""
+ yield self.logout()
+
+ def notify_callback(self, model_name, application_name, status, message,
+ callback=None, *callback_args):
+ try:
+ if callback:
+ callback(
+ model_name,
+ application_name,
+ status, message,
+ *callback_args,
+ )
+ except Exception as e:
+ self.log.error("[0] notify_callback exception {}".format(e))
+ raise e
+ return True
+
+ # Public methods
+ async def Relate(self, model_name, vnfd):
+ """Create a relation between the charm-enabled VDUs in a VNF.
+
+ The Relation mapping has two parts: the id of the vdu owning the endpoint, and the name of the endpoint.
+
+ vdu:
+ ...
+ relation:
+ - provides: dataVM:db
+ requires: mgmtVM:app
+
+ This tells N2VC that the charm referred to by the dataVM vdu offers a relation named 'db', and the mgmtVM vdu has an 'app' endpoint that should be connected to a database.
+
+ :param str ns_name: The name of the network service.
+ :param dict vnfd: The parsed yaml VNF descriptor.
+ """
+
+ # Currently, the call to Relate() is made automatically after the
+ # deployment of each charm; if the relation depends on a charm that
+ # hasn't been deployed yet, the call will fail silently. This will
+ # prevent an API breakage, with the intent of making this an explicitly
+ # required call in a more object-oriented refactor of the N2VC API.
+
+ configs = []
+ vnf_config = vnfd.get("vnf-configuration")
+ if vnf_config:
+ juju = vnf_config['juju']
+ if juju:
+ configs.append(vnf_config)
+
+ for vdu in vnfd['vdu']:
+ vdu_config = vdu.get('vdu-configuration')
+ if vdu_config:
+ juju = vdu_config['juju']
+ if juju:
+ configs.append(vdu_config)
+
+ def _get_application_name(name):
+ """Get the application name that's mapped to a vnf/vdu."""
+ vnf_member_index = 0
+ vnf_name = vnfd['name']
+
+ for vdu in vnfd.get('vdu'):
+ # Compare the named portion of the relation to the vdu's id
+ if vdu['id'] == name:
+ application_name = self.FormatApplicationName(
+ model_name,
+ vnf_name,
+ str(vnf_member_index),
+ )
+ return application_name
+ else:
+ vnf_member_index += 1
+
+ return None
+
+ # Loop through relations
+ for cfg in configs:
+ if 'juju' in cfg:
+ if 'relation' in juju:
+ for rel in juju['relation']:
+ try:
+
+ # get the application name for the provides
+ (name, endpoint) = rel['provides'].split(':')
+ application_name = _get_application_name(name)
+
+ provides = "{}:{}".format(
+ application_name,
+ endpoint
+ )
+
+ # get the application name for thr requires
+ (name, endpoint) = rel['requires'].split(':')
+ application_name = _get_application_name(name)
+
+ requires = "{}:{}".format(
+ application_name,
+ endpoint
+ )
+ self.log.debug("Relation: {} <-> {}".format(
+ provides,
+ requires
+ ))
+ await self.add_relation(
+ model_name,
+ provides,
+ requires,
+ )
+ except Exception as e:
+ self.log.debug("Exception: {}".format(e))
+
+ return
+
+ async def DeployCharms(self, model_name, application_name, vnfd,
+ charm_path, params={}, machine_spec={},
+ callback=None, *callback_args):
+ """Deploy one or more charms associated with a VNF.
+
+ Deploy the charm(s) referenced in a VNF Descriptor.
+
+ :param str model_name: The name or unique id of the network service.
+ :param str application_name: The name of the application
+ :param dict vnfd: The name of the application
+ :param str charm_path: The path to the Juju charm
+ :param dict params: A dictionary of runtime parameters
+ Examples::
+ {
+ 'rw_mgmt_ip': '1.2.3.4',
+ # Pass the initial-config-primitives section of the vnf or vdu
+ 'initial-config-primitives': {...}
+ 'user_values': dictionary with the day-1 parameters provided at instantiation time. It will replace values
+ inside < >. rw_mgmt_ip will be included here also
+ }
+ :param dict machine_spec: A dictionary describing the machine to
+ install to
+ Examples::
+ {
+ 'hostname': '1.2.3.4',
+ 'username': 'ubuntu',
+ }
+ :param obj callback: A callback function to receive status changes.
+ :param tuple callback_args: A list of arguments to be passed to the
+ callback
+ """
+
+ ########################################################
+ # Verify the path to the charm exists and is readable. #
+ ########################################################
+ if not os.path.exists(charm_path):
+ self.log.debug("Charm path doesn't exist: {}".format(charm_path))
+ self.notify_callback(
+ model_name,
+ application_name,
+ "failed",
+ callback,
+ *callback_args,
+ )
+ raise JujuCharmNotFound("No artifacts configured.")
+
+ ################################
+ # Login to the Juju controller #
+ ################################
+ if not self.authenticated:
+ self.log.debug("Authenticating with Juju")
+ await self.login()
+
+ ##########################################
+ # Get the model for this network service #
+ ##########################################
+ model = await self.get_model(model_name)
+
+ ########################################
+ # Verify the application doesn't exist #
+ ########################################
+ app = await self.get_application(model, application_name)
+ if app:
+ raise JujuApplicationExists("Can't deploy application \"{}\" to model \"{}\" because it already exists.".format(application_name, model_name))
+
+ ################################################################
+ # Register this application with the model-level event monitor #
+ ################################################################
+ if callback:
+ self.monitors[model_name].AddApplication(
+ application_name,
+ callback,
+ *callback_args
+ )
+
+ ########################################################
+ # Check for specific machine placement (native charms) #
+ ########################################################
+ to = ""
+ if machine_spec.keys():
+ if all(k in machine_spec for k in ['host', 'user']):
+ # Enlist an existing machine as a Juju unit
+ machine = await model.add_machine(spec='ssh:{}@{}:{}'.format(
+ machine_spec['user'],
+ machine_spec['host'],
+ self.GetPrivateKeyPath(),
+ ))
+ to = machine.id
+
+ #######################################
+ # Get the initial charm configuration #
+ #######################################
+
+ rw_mgmt_ip = None
+ if 'rw_mgmt_ip' in params:
+ rw_mgmt_ip = params['rw_mgmt_ip']
+
+ if 'initial-config-primitive' not in params:
+ params['initial-config-primitive'] = {}
+
+ initial_config = self._get_config_from_dict(
+ params['initial-config-primitive'],
+ {'<rw_mgmt_ip>': rw_mgmt_ip}
+ )
+
+ self.log.debug("JujuApi: Deploying charm ({}/{}) from {}".format(
+ model_name,
+ application_name,
+ charm_path,
+ to=to,
+ ))
+
+ ########################################################
+ # Deploy the charm and apply the initial configuration #
+ ########################################################
+ app = await model.deploy(
+ # We expect charm_path to be either the path to the charm on disk
+ # or in the format of cs:series/name
+ charm_path,
+ # This is the formatted, unique name for this charm
+ application_name=application_name,
+ # Proxy charms should use the current LTS. This will need to be
+ # changed for native charms.
+ series='xenial',
+ # Apply the initial 'config' primitive during deployment
+ config=initial_config,
+ # Where to deploy the charm to.
+ to=to,
+ )
+
+ # Map the vdu id<->app name,
+ #
+ await self.Relate(model_name, vnfd)
+
+ # #######################################
+ # # Execute initial config primitive(s) #
+ # #######################################
+ uuids = await self.ExecuteInitialPrimitives(
+ model_name,
+ application_name,
+ params,
+ )
+ return uuids
+
+ # primitives = {}
+ #
+ # # Build a sequential list of the primitives to execute
+ # for primitive in params['initial-config-primitive']:
+ # try:
+ # if primitive['name'] == 'config':
+ # # This is applied when the Application is deployed
+ # pass
+ # else:
+ # seq = primitive['seq']
+ #
+ # params = {}
+ # if 'parameter' in primitive:
+ # params = primitive['parameter']
+ #
+ # primitives[seq] = {
+ # 'name': primitive['name'],
+ # 'parameters': self._map_primitive_parameters(
+ # params,
+ # {'<rw_mgmt_ip>': rw_mgmt_ip}
+ # ),
+ # }
+ #
+ # for primitive in sorted(primitives):
+ # await self.ExecutePrimitive(
+ # model_name,
+ # application_name,
+ # primitives[primitive]['name'],
+ # callback,
+ # callback_args,
+ # **primitives[primitive]['parameters'],
+ # )
+ # except N2VCPrimitiveExecutionFailed as e:
+ # self.log.debug(
+ # "[N2VC] Exception executing primitive: {}".format(e)
+ # )
+ # raise
+
+ async def GetPrimitiveStatus(self, model_name, uuid):
+ """Get the status of an executed Primitive.
+
+ The status of an executed Primitive will be one of three values:
+ - completed
+ - failed
+ - running
+ """
+ status = None
+ try:
+ if not self.authenticated:
+ await self.login()
+
+ model = await self.get_model(model_name)
+
+ results = await model.get_action_status(uuid)
+
+ if uuid in results:
+ status = results[uuid]
+
+ except Exception as e:
+ self.log.debug(
+ "Caught exception while getting primitive status: {}".format(e)
+ )
+ raise N2VCPrimitiveExecutionFailed(e)
+
+ return status
+
+ async def GetPrimitiveOutput(self, model_name, uuid):
+ """Get the output of an executed Primitive.
+
+ Note: this only returns output for a successfully executed primitive.
+ """
+ results = None
+ try:
+ if not self.authenticated:
+ await self.login()
+
+ model = await self.get_model(model_name)
+ results = await model.get_action_output(uuid, 60)
+ except Exception as e:
+ self.log.debug(
+ "Caught exception while getting primitive status: {}".format(e)
+ )
+ raise N2VCPrimitiveExecutionFailed(e)
+
+ return results
+
+ # async def ProvisionMachine(self, model_name, hostname, username):
+ # """Provision machine for usage with Juju.
+ #
+ # Provisions a previously instantiated machine for use with Juju.
+ # """
+ # try:
+ # if not self.authenticated:
+ # await self.login()
+ #
+ # # FIXME: This is hard-coded until model-per-ns is added
+ # model_name = 'default'
+ #
+ # model = await self.get_model(model_name)
+ # model.add_machine(spec={})
+ #
+ # machine = await model.add_machine(spec='ssh:{}@{}:{}'.format(
+ # "ubuntu",
+ # host['address'],
+ # private_key_path,
+ # ))
+ # return machine.id
+ #
+ # except Exception as e:
+ # self.log.debug(
+ # "Caught exception while getting primitive status: {}".format(e)
+ # )
+ # raise N2VCPrimitiveExecutionFailed(e)
+
+ def GetPrivateKeyPath(self):
+ homedir = os.environ['HOME']
+ sshdir = "{}/.ssh".format(homedir)
+ private_key_path = "{}/id_n2vc_rsa".format(sshdir)
+ return private_key_path
+
+ async def GetPublicKey(self):
+ """Get the N2VC SSH public key.abs
+
+ Returns the SSH public key, to be injected into virtual machines to
+ be managed by the VCA.
+
+ The first time this is run, a ssh keypair will be created. The public
+ key is injected into a VM so that we can provision the machine with
+ Juju, after which Juju will communicate with the VM directly via the
+ juju agent.
+ """
+ public_key = ""
+
+ # Find the path to where we expect our key to live.
+ homedir = os.environ['HOME']
+ sshdir = "{}/.ssh".format(homedir)
+ if not os.path.exists(sshdir):
+ os.mkdir(sshdir)
+
+ private_key_path = "{}/id_n2vc_rsa".format(sshdir)
+ public_key_path = "{}.pub".format(private_key_path)
+
+ # If we don't have a key generated, generate it.
+ if not os.path.exists(private_key_path):
+ cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format(
+ "rsa",
+ "4096",
+ private_key_path
+ )
+ subprocess.check_output(shlex.split(cmd))
+
+ # Read the public key
+ with open(public_key_path, "r") as f:
+ public_key = f.readline()
+
+ return public_key
+
+ async def ExecuteInitialPrimitives(self, model_name, application_name,
+ params, callback=None, *callback_args):
+ """Execute multiple primitives.
+
+ Execute multiple primitives as declared in initial-config-primitive.
+ This is useful in cases where the primitives initially failed -- for
+ example, if the charm is a proxy but the proxy hasn't been configured
+ yet.
+ """
+ uuids = []
+ primitives = {}
+
+ # Build a sequential list of the primitives to execute
+ for primitive in params['initial-config-primitive']:
+ try:
+ if primitive['name'] == 'config':
+ pass
+ else:
+ seq = primitive['seq']
+
+ params_ = {}
+ if 'parameter' in primitive:
+ params_ = primitive['parameter']
+
+ user_values = params.get("user_values", {})
+ if 'rw_mgmt_ip' not in user_values:
+ user_values['rw_mgmt_ip'] = None
+ # just for backward compatibility, because it will be provided always by modern version of LCM
+
+ primitives[seq] = {
+ 'name': primitive['name'],
+ 'parameters': self._map_primitive_parameters(
+ params_,
+ user_values
+ ),
+ }
+
+ for primitive in sorted(primitives):
+ uuids.append(
+ await self.ExecutePrimitive(
+ model_name,
+ application_name,
+ primitives[primitive]['name'],
+ callback,
+ callback_args,
+ **primitives[primitive]['parameters'],
+ )
+ )
+ except N2VCPrimitiveExecutionFailed as e:
+ self.log.debug(
+ "[N2VC] Exception executing primitive: {}".format(e)
+ )
+ raise
+ return uuids
+
+ async def ExecutePrimitive(self, model_name, application_name, primitive,
+ callback, *callback_args, **params):
+ """Execute a primitive of a charm for Day 1 or Day 2 configuration.
+
+ Execute a primitive defined in the VNF descriptor.
+
+ :param str model_name: The name or unique id of the network service.
+ :param str application_name: The name of the application
+ :param str primitive: The name of the primitive to execute.
+ :param obj callback: A callback function to receive status changes.
+ :param tuple callback_args: A list of arguments to be passed to the
+ callback function.
+ :param dict params: A dictionary of key=value pairs representing the
+ primitive's parameters
+ Examples::
+ {
+ 'rw_mgmt_ip': '1.2.3.4',
+ # Pass the initial-config-primitives section of the vnf or vdu
+ 'initial-config-primitives': {...}
+ }
+ """
+ self.log.debug("Executing primitive={} params={}".format(primitive, params))
+ uuid = None
+ try:
+ if not self.authenticated:
+ await self.login()
+
+ model = await self.get_model(model_name)
+
+ if primitive == 'config':
+ # config is special, and expecting params to be a dictionary
+ await self.set_config(
+ model,
+ application_name,
+ params['params'],
+ )
+ else:
+ app = await self.get_application(model, application_name)
+ if app:
+ # Run against the first (and probably only) unit in the app
+ unit = app.units[0]
+ if unit:
+ action = await unit.run_action(primitive, **params)
+ uuid = action.id
+ except Exception as e:
+ self.log.debug(
+ "Caught exception while executing primitive: {}".format(e)
+ )
+ raise N2VCPrimitiveExecutionFailed(e)
+ return uuid
+
+ async def RemoveCharms(self, model_name, application_name, callback=None,
+ *callback_args):
+ """Remove a charm from the VCA.
+
+ Remove a charm referenced in a VNF Descriptor.
+
+ :param str model_name: The name of the network service.
+ :param str application_name: The name of the application
+ :param obj callback: A callback function to receive status changes.
+ :param tuple callback_args: A list of arguments to be passed to the
+ callback function.
+ """
+ try:
+ if not self.authenticated:
+ await self.login()
+
+ model = await self.get_model(model_name)
+ app = await self.get_application(model, application_name)
+ if app:
+ # Remove this application from event monitoring
+ self.monitors[model_name].RemoveApplication(application_name)
+
+ # self.notify_callback(model_name, application_name, "removing", callback, *callback_args)
+ self.log.debug(
+ "Removing the application {}".format(application_name)
+ )
+ await app.remove()
+
+ await self.disconnect_model(self.monitors[model_name])
+
+ self.notify_callback(
+ model_name,
+ application_name,
+ "removed",
+ "Removing charm {}".format(application_name),
+ callback,
+ *callback_args,
+ )
+
+ except Exception as e:
+ print("Caught exception: {}".format(e))
+ self.log.debug(e)
+ raise e
+
+ async def CreateNetworkService(self, ns_uuid):
+ """Create a new Juju model for the Network Service.
+
+ Creates a new Model in the Juju Controller.
+
+ :param str ns_uuid: A unique id representing an instaance of a
+ Network Service.
+
+ :returns: True if the model was created. Raises JujuError on failure.
+ """
+ if not self.authenticated:
+ await self.login()
+
+ models = await self.controller.list_models()
+ if ns_uuid not in models:
+ try:
+ self.models[ns_uuid] = await self.controller.add_model(
+ ns_uuid
+ )
+ except JujuError as e:
+ if "already exists" not in e.message:
+ raise e
+
+ # Create an observer for this model
+ await self.create_model_monitor(ns_uuid)
+
+ return True
+
+ async def DestroyNetworkService(self, ns_uuid):
+ """Destroy a Network Service.
+
+ Destroy the Network Service and any deployed charms.
+
+ :param ns_uuid The unique id of the Network Service
+
+ :returns: True if the model was created. Raises JujuError on failure.
+ """
+
+ # Do not delete the default model. The default model was used by all
+ # Network Services, prior to the implementation of a model per NS.
+ if ns_uuid.lower() == "default":
+ return False
+
+ if not self.authenticated:
+ self.log.debug("Authenticating with Juju")
+ await self.login()
+
+ # Disconnect from the Model
+ if ns_uuid in self.models:
+ await self.disconnect_model(self.models[ns_uuid])
+
+ try:
+ await self.controller.destroy_models(ns_uuid)
+ except JujuError:
+ raise NetworkServiceDoesNotExist(
+ "The Network Service '{}' does not exist".format(ns_uuid)
+ )
+
+ return True
+
+ async def GetMetrics(self, model_name, application_name):
+ """Get the metrics collected by the VCA.
+
+ :param model_name The name or unique id of the network service
+ :param application_name The name of the application
+ """
+ metrics = {}
+ model = await self.get_model(model_name)
+ app = await self.get_application(model, application_name)
+ if app:
+ metrics = await app.get_metrics()
+
+ return metrics
+
+ async def HasApplication(self, model_name, application_name):
+ model = await self.get_model(model_name)
+ app = await self.get_application(model, application_name)
+ if app:
+ return True
+ return False
+
+ # Non-public methods
+ async def add_relation(self, model_name, relation1, relation2):
+ """
+ Add a relation between two application endpoints.
+
+ :param str model_name: The name or unique id of the network service
+ :param str relation1: '<application>[:<relation_name>]'
+ :param str relation2: '<application>[:<relation_name>]'
+ """
+
+ if not self.authenticated:
+ await self.login()
+
+ m = await self.get_model(model_name)
+ try:
+ await m.add_relation(relation1, relation2)
+ except JujuAPIError as e:
+ # If one of the applications in the relationship doesn't exist,
+ # or the relation has already been added, let the operation fail
+ # silently.
+ if 'not found' in e.message:
+ return
+ if 'already exists' in e.message:
+ return
+
+ raise e
+
+ # async def apply_config(self, config, application):
+ # """Apply a configuration to the application."""
+ # print("JujuApi: Applying configuration to {}.".format(
+ # application
+ # ))
+ # return await self.set_config(application=application, config=config)
+
+ def _get_config_from_dict(self, config_primitive, values):
+ """Transform the yang config primitive to dict.
+
+ Expected result:
+
+ config = {
+ 'config':
+ }
+ """
+ config = {}
+ for primitive in config_primitive:
+ if primitive['name'] == 'config':
+ # config = self._map_primitive_parameters()
+ for parameter in primitive['parameter']:
+ param = str(parameter['name'])
+ if parameter['value'] == "<rw_mgmt_ip>":
+ config[param] = str(values[parameter['value']])
+ else:
+ config[param] = str(parameter['value'])
+
+ return config
+
+ def _map_primitive_parameters(self, parameters, user_values):
+ params = {}
+ for parameter in parameters:
+ param = str(parameter['name'])
+ value = parameter.get('value')
+
+ # map parameters inside a < >; e.g. <rw_mgmt_ip>. with the provided user_values.
+ # Must exist at user_values except if there is a default value
+ if isinstance(value, str) and value.startswith("<") and value.endswith(">"):
+ if parameter['value'][1:-1] in user_values:
+ value = user_values[parameter['value'][1:-1]]
+ elif 'default-value' in parameter:
+ value = parameter['default-value']
+ else:
+ raise KeyError("parameter {}='{}' not supplied ".format(param, value))
+
+ # If there's no value, use the default-value (if set)
+ if value is None and 'default-value' in parameter:
+ value = parameter['default-value']
+
+ # Typecast parameter value, if present
+ paramtype = "string"
+ try:
+ if 'data-type' in parameter:
+ paramtype = str(parameter['data-type']).lower()
+
+ if paramtype == "integer":
+ value = int(value)
+ elif paramtype == "boolean":
+ value = bool(value)
+ else:
+ value = str(value)
+ else:
+ # If there's no data-type, assume the value is a string
+ value = str(value)
+ except ValueError:
+ raise ValueError("parameter {}='{}' cannot be converted to type {}".format(param, value, paramtype))
+
+ params[param] = value
+ return params
+
+ def _get_config_from_yang(self, config_primitive, values):
+ """Transform the yang config primitive to dict."""
+ config = {}
+ for primitive in config_primitive.values():
+ if primitive['name'] == 'config':
+ for parameter in primitive['parameter'].values():
+ param = str(parameter['name'])
+ if parameter['value'] == "<rw_mgmt_ip>":
+ config[param] = str(values[parameter['value']])
+ else:
+ config[param] = str(parameter['value'])
+
+ return config
+
+ def FormatApplicationName(self, *args):
+ """
+ Generate a Juju-compatible Application name
+
+ :param args tuple: Positional arguments to be used to construct the
+ application name.
+
+ Limitations::
+ - Only accepts characters a-z and non-consequitive dashes (-)
+ - Application name should not exceed 50 characters
+
+ Examples::
+
+ FormatApplicationName("ping_pong_ns", "ping_vnf", "a")
+ """
+ appname = ""
+ for c in "-".join(list(args)):
+ if c.isdigit():
+ c = chr(97 + int(c))
+ elif not c.isalpha():
+ c = "-"
+ appname += c
+ return re.sub('-+', '-', appname.lower())
+
+ # def format_application_name(self, nsd_name, vnfr_name, member_vnf_index=0):
+ # """Format the name of the application
+ #
+ # Limitations:
+ # - Only accepts characters a-z and non-consequitive dashes (-)
+ # - Application name should not exceed 50 characters
+ # """
+ # name = "{}-{}-{}".format(nsd_name, vnfr_name, member_vnf_index)
+ # new_name = ''
+ # for c in name:
+ # if c.isdigit():
+ # c = chr(97 + int(c))
+ # elif not c.isalpha():
+ # c = "-"
+ # new_name += c
+ # return re.sub('\-+', '-', new_name.lower())
+
+ def format_model_name(self, name):
+ """Format the name of model.
+
+ Model names may only contain lowercase letters, digits and hyphens
+ """
+
+ return name.replace('_', '-').lower()
+
+ async def get_application(self, model, application):
+ """Get the deployed application."""
+ if not self.authenticated:
+ await self.login()
+
+ app = None
+ if application and model:
+ if model.applications:
+ if application in model.applications:
+ app = model.applications[application]
+
+ return app
+
+ async def get_model(self, model_name):
+ """Get a model from the Juju Controller.
+
+ Note: Model objects returned must call disconnected() before it goes
+ out of scope."""
+ if not self.authenticated:
+ await self.login()
+
+ if model_name not in self.models:
+ # Get the models in the controller
+ models = await self.controller.list_models()
+
+ if model_name not in models:
+ try:
+ self.models[model_name] = await self.controller.add_model(
+ model_name
+ )
+ except JujuError as e:
+ if "already exists" not in e.message:
+ raise e
+ else:
+ self.models[model_name] = await self.controller.get_model(
+ model_name
+ )
+
+ self.refcount['model'] += 1
+
+ # Create an observer for this model
+ await self.create_model_monitor(model_name)
+
+ return self.models[model_name]
+
+ async def create_model_monitor(self, model_name):
+ """Create a monitor for the model, if none exists."""
+ if not self.authenticated:
+ await self.login()
+
+ if model_name not in self.monitors:
+ self.monitors[model_name] = VCAMonitor(model_name)
+ self.models[model_name].add_observer(self.monitors[model_name])
+
+ return True
+
+ async def login(self):
+ """Login to the Juju controller."""
+
+ if self.authenticated:
+ return
+
+ self.connecting = True
+
+ self.log.debug("JujuApi: Logging into controller")
+
+ cacert = None
+ self.controller = Controller(loop=self.loop)
+
+ if self.secret:
+ self.log.debug(
+ "Connecting to controller... ws://{}:{} as {}/{}".format(
+ self.endpoint,
+ self.port,
+ self.user,
+ self.secret,
+ )
+ )
+ await self.controller.connect(
+ endpoint=self.endpoint,
+ username=self.user,
+ password=self.secret,
+ cacert=cacert,
+ )
+ self.refcount['controller'] += 1
+ else:
+ # current_controller no longer exists
+ # self.log.debug("Connecting to current controller...")
+ # await self.controller.connect_current()
+ # await self.controller.connect(
+ # endpoint=self.endpoint,
+ # username=self.user,
+ # cacert=cacert,
+ # )
+ self.log.fatal("VCA credentials not configured.")
+
+ self.authenticated = True
+ self.log.debug("JujuApi: Logged into controller")
+
+ async def logout(self):
+ """Logout of the Juju controller."""
+ if not self.authenticated:
+ return False
+
+ try:
+ for model in self.models:
+ await self.disconnect_model(model)
+
+ if self.controller:
+ self.log.debug("Disconnecting controller {}".format(
+ self.controller
+ ))
+ await self.controller.disconnect()
+ self.refcount['controller'] -= 1
+ self.controller = None
+
+ self.authenticated = False
+
+ self.log.debug(self.refcount)
+
+ except Exception as e:
+ self.log.fatal(
+ "Fatal error logging out of Juju Controller: {}".format(e)
+ )
+ raise e
+ return True
+
+ async def disconnect_model(self, model):
+ self.log.debug("Disconnecting model {}".format(model))
+ if model in self.models:
+ print("Disconnecting model")
+ await self.models[model].disconnect()
+ self.refcount['model'] -= 1
+ self.models[model] = None
+
+ # async def remove_application(self, name):
+ # """Remove the application."""
+ # if not self.authenticated:
+ # await self.login()
+ #
+ # app = await self.get_application(name)
+ # if app:
+ # self.log.debug("JujuApi: Destroying application {}".format(
+ # name,
+ # ))
+ #
+ # await app.destroy()
+
+ async def remove_relation(self, a, b):
+ """
+ Remove a relation between two application endpoints
+
+ :param a An application endpoint
+ :param b An application endpoint
+ """
+ if not self.authenticated:
+ await self.login()
+
+ m = await self.get_model()
+ try:
+ m.remove_relation(a, b)
+ finally:
+ await m.disconnect()
+
+ async def resolve_error(self, model_name, application=None):
+ """Resolve units in error state."""
+ if not self.authenticated:
+ await self.login()
+
+ model = await self.get_model(model_name)
+
+ app = await self.get_application(model, application)
+ if app:
+ self.log.debug(
+ "JujuApi: Resolving errors for application {}".format(
+ application,
+ )
+ )
+
+ for unit in app.units:
+ app.resolved(retry=True)
+
+ async def run_action(self, model_name, application, action_name, **params):
+ """Execute an action and return an Action object."""
+ if not self.authenticated:
+ await self.login()
+ result = {
+ 'status': '',
+ 'action': {
+ 'tag': None,
+ 'results': None,
+ }
+ }
+
+ model = await self.get_model(model_name)
+
+ app = await self.get_application(model, application)
+ if app:
+ # We currently only have one unit per application
+ # so use the first unit available.
+ unit = app.units[0]
+
+ self.log.debug(
+ "JujuApi: Running Action {} against Application {}".format(
+ action_name,
+ application,
+ )
+ )
+
+ action = await unit.run_action(action_name, **params)
+
+ # Wait for the action to complete
+ await action.wait()
+
+ result['status'] = action.status
+ result['action']['tag'] = action.data['id']
+ result['action']['results'] = action.results
+
+ return result
+
+ async def set_config(self, model_name, application, config):
+ """Apply a configuration to the application."""
+ if not self.authenticated:
+ await self.login()
+
+ app = await self.get_application(model_name, application)
+ if app:
+ self.log.debug("JujuApi: Setting config for Application {}".format(
+ application,
+ ))
+ await app.set_config(config)
+
+ # Verify the config is set
+ newconf = await app.get_config()
+ for key in config:
+ if config[key] != newconf[key]['value']:
+ self.log.debug("JujuApi: Config not set! Key {} Value {} doesn't match {}".format(key, config[key], newconf[key]))
+
+ # async def set_parameter(self, parameter, value, application=None):
+ # """Set a config parameter for a service."""
+ # if not self.authenticated:
+ # await self.login()
+ #
+ # self.log.debug("JujuApi: Setting {}={} for Application {}".format(
+ # parameter,
+ # value,
+ # application,
+ # ))
+ # return await self.apply_config(
+ # {parameter: value},
+ # application=application,
+ # )
+
+ async def wait_for_application(self, model_name, application_name,
+ timeout=300):
+ """Wait for an application to become active."""
+ if not self.authenticated:
+ await self.login()
+
+ model = await self.get_model(model_name)
+
+ app = await self.get_application(model, application_name)
+ self.log.debug("Application: {}".format(app))
+ if app:
+ self.log.debug(
+ "JujuApi: Waiting {} seconds for Application {}".format(
+ timeout,
+ application_name,
+ )
+ )
+
+ await model.block_until(
+ lambda: all(
+ unit.agent_status == 'idle' and unit.workload_status in
+ ['active', 'unknown'] for unit in app.units
+ ),
+ timeout=timeout
+ )
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..9c558e3
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1 @@
+.
diff --git a/setup.py b/setup.py
index 01036da..b27dcbe 100644
--- a/setup.py
+++ b/setup.py
@@ -12,48 +12,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from pathlib import Path
from setuptools import setup, find_packages
-here = Path(__file__).absolute().parent
-readme = here / 'docs' / 'readme.rst'
-changelog = here / 'docs' / 'changelog.rst'
-long_description = '{}\n\n{}'.format(
- readme.read_text(),
- changelog.read_text()
-)
-version = here / 'VERSION'
-
setup(
- name='juju',
- version=version.read_text().strip(),
+ name='N2VC',
+ version_command=('git describe --match v* --tags --long --dirty',
+ 'pep440-git-full'),
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=[
'macaroonbakery>=1.1,<2.0',
'pyRFC3339>=1.0,<2.0',
- 'pyyaml>=3.0,<=4.2',
+ 'pyyaml>=3.0,<4.0',
'theblues>=0.3.8,<1.0',
- 'websockets>=7.0,<8.0',
- 'paramiko>=2.4.0,<3.0.0',
- 'pyasn1>=0.4.4',
+ 'websockets>=4.0,<5.0',
+ 'paramiko',
],
include_package_data=True,
- maintainer='Juju Ecosystem Engineering',
- maintainer_email='juju@lists.ubuntu.com',
- description=('Python library for Juju'),
- long_description=long_description,
- url='https://github.com/juju/python-libjuju',
+ maintainer='Adam Israel',
+ maintainer_email='adam.israel@canonical.com',
+ description=(''),
+ url='',
license='Apache 2',
- classifiers=[
- "Development Status :: 3 - Alpha",
- "Intended Audience :: Developers",
- "Programming Language :: Python",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- ],
entry_points={
'console_scripts': [
],
diff --git a/stdeb.cfg b/stdeb.cfg
new file mode 100644
index 0000000..c07507b
--- /dev/null
+++ b/stdeb.cfg
@@ -0,0 +1,5 @@
+[DEFAULT]
+Suite: xenial
+XS-Python-Version: >= 3.5
+Maintainer: Adam Israel <adam.israel@canonical.com>
+Depends: python3-pip
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 0000000..56380a4
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,51 @@
+# N2VC Testing
+
+
+# Preparation
+## Environment variables
+
+The test currently requires some environment variables set in order to run, but these will be deprecated as soon as possible.
+
+## LXD
+
+LXD should be installed via snap.
+
+The connection to the LXD API server expects to use a self-signed SSL certificate, generated by lxc (`lxc list`, et al) is first one.
+
+## Juju
+
+Juju is expected to be installed via snap and bootstrapped.
+
+Run `juju status -m controller` and capture the IP address of machine 0. This is the Juju controller, specified in VCA_HOST
+
+export VCA_HOST=1.2.3.4
+export VCA_USER=admin
+export VCA_SECRET=admin
+
+
+# Running tests
+
+Tests are written with pytest, driven by tox. All tests are run from the root directory of the repository.
+
+## Run one test
+
+To run a single integration test, we tell tox which environment we need, and then the path to the test.
+
+```bash
+tox -e integration -- tests/integration/test_non-string_parameter.py
+```
+
+## Running all tests
+
+`make test` will invoke tox to run all unit tests. Alternatively, you can limit this to a specific type of test by invoking tox manually:
+```bash
+tox -e integration -- tests/integration/
+```
+
+# TODO
+- Update CI environment to have Juju and LXD available via snap
+- Investigate running via Docker
+- Remove the requirement for setting environment variables
+- Integrate into Jenkins so that tests run against every commit
+- Add global timeout to abort tests that are hung
+- Only build a charm once per test run, i.e., if two or more tests use the same charm, we should only call `charm build` once.
diff --git a/tests/base.py b/tests/base.py
index 600372c..3ae5f4f 100644
--- a/tests/base.py
+++ b/tests/base.py
@@ -1,32 +1,37 @@
-import inspect
+#!/usr/bin/env python3
+import asyncio
+import datetime
+import logging
+import n2vc.vnf
+import pylxd
+import pytest
+import os
+import shlex
import subprocess
+import time
import uuid
-from contextlib import contextmanager
-from pathlib import Path
+import yaml
-import mock
-from juju.client.jujudata import FileJujuData
from juju.controller import Controller
-import pytest
+# Disable InsecureRequestWarning w/LXD
+import urllib3
+urllib3.disable_warnings()
+logging.getLogger("urllib3").setLevel(logging.WARNING)
+
+here = os.path.dirname(os.path.realpath(__file__))
def is_bootstrapped():
- try:
- result = subprocess.run(['juju', 'switch'], stdout=subprocess.PIPE)
- return (
- result.returncode == 0 and
- len(result.stdout.decode().strip()) > 0)
- except FileNotFoundError:
- return False
+ result = subprocess.run(['juju', 'switch'], stdout=subprocess.PIPE)
+ return (
+ result.returncode == 0 and len(result.stdout.decode().strip()) > 0)
bootstrapped = pytest.mark.skipif(
not is_bootstrapped(),
reason='bootstrapped Juju environment required')
-test_run_nonce = uuid.uuid4().hex[-4:]
-
class CleanController():
"""
@@ -48,101 +53,1081 @@
await self._controller.disconnect()
-class CleanModel():
+def debug(msg):
+ """Format debug messages in a consistent way."""
+ now = datetime.datetime.now()
+
+ # TODO: Decide on the best way to log. Output from `logging.debug` shows up
+ # when a test fails, but print() will always show up when running tox with
+ # `-s`, which is really useful for debugging single tests without having to
+ # insert a False assert to see the log.
+ logging.debug(
+ "[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg)
+ )
+ print(
+ "[{}] {}".format(now.strftime('%Y-%m-%dT%H:%M:%S'), msg)
+ )
+
+
+def get_charm_path():
+ return "{}/charms".format(here)
+
+
+def get_layer_path():
+ return "{}/charms/layers".format(here)
+
+
+def collect_metrics(application):
+ """Invoke Juju's metrics collector.
+
+ Caveat: this shells out to the `juju collect-metrics` command, rather than
+ making an API call. At the time of writing, that API is not exposed through
+ the client library.
"""
- Context manager that automatically connects to the currently active
- controller, adds a fresh model, returns the connection to that model,
- and automatically disconnects and cleans up the model.
- The new model is also set as the current default for the controller
- connection.
- """
- def __init__(self, bakery_client=None):
- self._controller = None
- self._model = None
- self._model_uuid = None
- self._bakery_client = bakery_client
-
- async def __aenter__(self):
- model_nonce = uuid.uuid4().hex[-4:]
- frame = inspect.stack()[1]
- test_name = frame.function.replace('_', '-')
- jujudata = TestJujuData()
- self._controller = Controller(
- jujudata=jujudata,
- bakery_client=self._bakery_client,
- )
- controller_name = jujudata.current_controller()
- user_name = jujudata.accounts()[controller_name]['user']
- await self._controller.connect(controller_name)
-
- model_name = 'test-{}-{}-{}'.format(
- test_run_nonce,
- test_name,
- model_nonce,
- )
- self._model = await self._controller.add_model(model_name)
-
- # Change the JujuData instance so that it will return the new
- # model as the current model name, so that we'll connect
- # to it by default.
- jujudata.set_model(
- controller_name,
- user_name + "/" + model_name,
- self._model.info.uuid,
- )
-
- # save the model UUID in case test closes model
- self._model_uuid = self._model.info.uuid
-
- return self._model
-
- async def __aexit__(self, exc_type, exc, tb):
- await self._model.disconnect()
- await self._controller.destroy_model(self._model_uuid)
- await self._controller.disconnect()
-
-
-class TestJujuData(FileJujuData):
- def __init__(self):
- self.__controller_name = None
- self.__model_name = None
- self.__model_uuid = None
- super().__init__()
-
- def set_model(self, controller_name, model_name, model_uuid):
- self.__controller_name = controller_name
- self.__model_name = model_name
- self.__model_uuid = model_uuid
-
- def current_model(self, *args, **kwargs):
- return self.__model_name or super().current_model(*args, **kwargs)
-
- def models(self):
- all_models = super().models()
- if self.__model_name is None:
- return all_models
- all_models.setdefault(self.__controller_name, {})
- all_models[self.__controller_name].setdefault('models', {})
- cmodels = all_models[self.__controller_name]['models']
- cmodels[self.__model_name] = {'uuid': self.__model_uuid}
- return all_models
-
-
-class AsyncMock(mock.MagicMock):
- async def __call__(self, *args, **kwargs):
- return super().__call__(*args, **kwargs)
-
-
-@contextmanager
-def patch_file(filename):
- """
- "Patch" a file so that its current contents are automatically restored
- when the context is exited.
- """
- filepath = Path(filename).expanduser()
- data = filepath.read_bytes()
try:
- yield
- finally:
- filepath.write_bytes(data)
+ subprocess.check_call(['juju', 'collect-metrics', application])
+ except subprocess.CalledProcessError as e:
+ raise Exception("Unable to collect metrics: {}".format(e))
+
+
+def has_metrics(charm):
+ """Check if a charm has metrics defined."""
+ metricsyaml = "{}/{}/metrics.yaml".format(
+ get_layer_path(),
+ charm,
+ )
+ if os.path.exists(metricsyaml):
+ return True
+ return False
+
+
+def get_descriptor(descriptor):
+ desc = None
+ try:
+ tmp = yaml.load(descriptor)
+
+ # Remove the envelope
+ root = list(tmp.keys())[0]
+ if root == "nsd:nsd-catalog":
+ desc = tmp['nsd:nsd-catalog']['nsd'][0]
+ elif root == "vnfd:vnfd-catalog":
+ desc = tmp['vnfd:vnfd-catalog']['vnfd'][0]
+ except ValueError:
+ assert False
+ return desc
+
+
+def get_n2vc(loop=None):
+ """Return an instance of N2VC.VNF."""
+ log = logging.getLogger()
+ log.level = logging.DEBUG
+
+ # Extract parameters from the environment in order to run our test
+ vca_host = os.getenv('VCA_HOST', '127.0.0.1')
+ vca_port = os.getenv('VCA_PORT', 17070)
+ vca_user = os.getenv('VCA_USER', 'admin')
+ vca_charms = os.getenv('VCA_CHARMS', None)
+ vca_secret = os.getenv('VCA_SECRET', None)
+
+ client = n2vc.vnf.N2VC(
+ log=log,
+ server=vca_host,
+ port=vca_port,
+ user=vca_user,
+ secret=vca_secret,
+ artifacts=vca_charms,
+ loop=loop
+ )
+ return client
+
+
+def create_lxd_container(public_key=None, name="test_name"):
+ """
+ Returns a container object
+
+ If public_key isn't set, we'll use the Juju ssh key
+
+ :param public_key: The public key to inject into the container
+ :param name: The name of the test being run
+ """
+ container = None
+
+ # Format name so it's valid
+ name = name.replace("_", "-").replace(".", "")
+
+ client = get_lxd_client()
+ test_machine = "test-{}-{}".format(
+ uuid.uuid4().hex[-4:],
+ name,
+ )
+
+ private_key_path, public_key_path = find_n2vc_ssh_keys()
+
+ try:
+ # create profile w/cloud-init and juju ssh key
+ if not public_key:
+ public_key = ""
+ with open(public_key_path, "r") as f:
+ public_key = f.readline()
+
+ client.profiles.create(
+ test_machine,
+ config={
+ 'user.user-data': '#cloud-config\nssh_authorized_keys:\n- {}'.format(public_key)},
+ devices={
+ 'root': {'path': '/', 'pool': 'default', 'type': 'disk'},
+ 'eth0': {
+ 'nictype': 'bridged',
+ 'parent': 'lxdbr0',
+ 'type': 'nic'
+ }
+ }
+ )
+ except Exception as ex:
+ debug("Error creating lxd profile {}: {}".format(test_machine, ex))
+ raise ex
+
+ try:
+ # create lxc machine
+ config = {
+ 'name': test_machine,
+ 'source': {
+ 'type': 'image',
+ 'alias': 'xenial',
+ 'mode': 'pull',
+ 'protocol': 'simplestreams',
+ 'server': 'https://cloud-images.ubuntu.com/releases',
+ },
+ 'profiles': [test_machine],
+ }
+ container = client.containers.create(config, wait=True)
+ container.start(wait=True)
+ except Exception as ex:
+ debug("Error creating lxd container {}: {}".format(test_machine, ex))
+ # This is a test-ending failure.
+ raise ex
+
+ def wait_for_network(container, timeout=30):
+ """Wait for eth0 to have an ipv4 address."""
+ starttime = time.time()
+ while(time.time() < starttime + timeout):
+ time.sleep(1)
+ if 'eth0' in container.state().network:
+ addresses = container.state().network['eth0']['addresses']
+ if len(addresses) > 0:
+ if addresses[0]['family'] == 'inet':
+ return addresses[0]
+ return None
+
+ try:
+ wait_for_network(container)
+ except Exception as ex:
+ debug(
+ "Error waiting for container {} network: {}".format(
+ test_machine,
+ ex,
+ )
+ )
+
+ # HACK: We need to give sshd a chance to bind to the interface,
+ # and pylxd's container.execute seems to be broken and fails and/or
+ # hangs trying to properly check if the service is up.
+ (exit_code, stdout, stderr) = container.execute([
+ 'ping',
+ '-c', '5', # Wait for 5 ECHO_REPLY
+ '8.8.8.8', # Ping Google's public DNS
+ '-W', '15', # Set a 15 second deadline
+ ])
+ if exit_code > 0:
+ # The network failed
+ raise Exception("Unable to verify container network")
+
+ return container
+
+
+def destroy_lxd_container(container):
+ """Stop and delete a LXD container.
+
+ Sometimes we see errors talking to LXD -- ephemerial issues like
+ load or a bug that's killed the API. We'll do our best to clean
+ up here, and we should run a cleanup after all tests are finished
+ to remove any extra containers and profiles belonging to us.
+ """
+
+ if type(container) is bool:
+ return
+
+ name = container.name
+ debug("Destroying container {}".format(name))
+
+ client = get_lxd_client()
+
+ def wait_for_stop(timeout=30):
+ """Wait for eth0 to have an ipv4 address."""
+ starttime = time.time()
+ while(time.time() < starttime + timeout):
+ time.sleep(1)
+ if container.state == "Stopped":
+ return
+
+ def wait_for_delete(timeout=30):
+ starttime = time.time()
+ while(time.time() < starttime + timeout):
+ time.sleep(1)
+ if client.containers.exists(name) is False:
+ return
+
+ try:
+ container.stop(wait=False)
+ wait_for_stop()
+ except Exception as ex:
+ debug(
+ "Error stopping container {}: {}".format(
+ name,
+ ex,
+ )
+ )
+
+ try:
+ container.delete(wait=False)
+ wait_for_delete()
+ except Exception as ex:
+ debug(
+ "Error deleting container {}: {}".format(
+ name,
+ ex,
+ )
+ )
+
+ try:
+ # Delete the profile created for this container
+ profile = client.profiles.get(name)
+ if profile:
+ profile.delete()
+ except Exception as ex:
+ debug(
+ "Error deleting profile {}: {}".format(
+ name,
+ ex,
+ )
+ )
+
+
+def find_lxd_config():
+ """Find the LXD configuration directory."""
+ paths = []
+ paths.append(os.path.expanduser("~/.config/lxc"))
+ paths.append(os.path.expanduser("~/snap/lxd/current/.config/lxc"))
+
+ for path in paths:
+ if os.path.exists(path):
+ crt = os.path.expanduser("{}/client.crt".format(path))
+ key = os.path.expanduser("{}/client.key".format(path))
+ if os.path.exists(crt) and os.path.exists(key):
+ return (crt, key)
+ return (None, None)
+
+
+def find_n2vc_ssh_keys():
+ """Find the N2VC ssh keys."""
+
+ paths = []
+ paths.append(os.path.expanduser("~/.ssh/"))
+
+ for path in paths:
+ if os.path.exists(path):
+ private = os.path.expanduser("{}/id_n2vc_rsa".format(path))
+ public = os.path.expanduser("{}/id_n2vc_rsa.pub".format(path))
+ if os.path.exists(private) and os.path.exists(public):
+ return (private, public)
+ return (None, None)
+
+
+def find_juju_ssh_keys():
+ """Find the Juju ssh keys."""
+
+ paths = []
+ paths.append(os.path.expanduser("~/.local/share/juju/ssh/"))
+
+ for path in paths:
+ if os.path.exists(path):
+ private = os.path.expanduser("{}/juju_id_rsa".format(path))
+ public = os.path.expanduser("{}/juju_id_rsa.pub".format(path))
+ if os.path.exists(private) and os.path.exists(public):
+ return (private, public)
+ return (None, None)
+
+
+def get_juju_private_key():
+ keys = find_juju_ssh_keys()
+ return keys[0]
+
+
+def get_lxd_client(host="127.0.0.1", port="8443", verify=False):
+ """ Get the LXD client."""
+ client = None
+ (crt, key) = find_lxd_config()
+
+ if crt and key:
+ client = pylxd.Client(
+ endpoint="https://{}:{}".format(host, port),
+ cert=(crt, key),
+ verify=verify,
+ )
+
+ return client
+
+
+# TODO: This is marked serial but can be run in parallel with work, including:
+# - Fixing an event loop issue; seems that all tests stop when one test stops?
+
+
+@pytest.mark.serial
+class TestN2VC(object):
+ """TODO:
+ 1. Validator Validation
+
+ Automatically validate the descriptors we're using here, unless the test author explicitly wants to skip them. Useful to make sure tests aren't being run against invalid descriptors, validating functionality that may fail against a properly written descriptor.
+
+ We need to have a flag (instance variable) that controls this behavior. It may be necessary to skip validation and run against a descriptor implementing features that have not yet been released in the Information Model.
+ """
+
+ """
+ The six phases of integration testing, for the test itself and each charm?:
+
+ setup/teardown_class:
+ 1. Prepare - Verify the environment and create a new model
+ 2. Deploy - Mark the test as ready to execute
+ 3. Configure - Configuration to reach Active state
+ 4. Test - Execute primitive(s) to verify success
+ 5. Collect - Collect any useful artifacts for debugging (charm, logs)
+ 6. Destroy - Destroy the model
+
+
+ 1. Prepare - Building of charm
+ 2. Deploy - Deploying charm
+ 3. Configure - Configuration to reach Active state
+ 4. Test - Execute primitive(s) to verify success
+ 5. Collect - Collect any useful artifacts for debugging (charm, logs)
+ 6. Destroy - Destroy the charm
+
+ """
+ @classmethod
+ def setup_class(self):
+ """ setup any state specific to the execution of the given class (which
+ usually contains tests).
+ """
+ # Initialize instance variable(s)
+ self.n2vc = None
+
+ # Track internal state for each test run
+ self.state = {}
+
+ # Parse the test's descriptors
+ self.nsd = get_descriptor(self.NSD_YAML)
+ self.vnfd = get_descriptor(self.VNFD_YAML)
+
+ self.ns_name = self.nsd['name']
+ self.vnf_name = self.vnfd['name']
+
+ self.charms = {}
+ self.parse_vnf_descriptor()
+ assert self.charms is not {}
+
+ # Track artifacts, like compiled charms, that will need to be removed
+ self.artifacts = {}
+
+ # Build the charm(s) needed for this test
+ for charm in self.get_charm_names():
+ self.get_charm(charm)
+
+ # A bit of a hack, in order to allow the N2VC callback to run parallel
+ # to pytest. Test(s) should wait for this flag to change to False
+ # before returning.
+ self._running = True
+ self._stopping = False
+
+ @classmethod
+ def teardown_class(self):
+ """ teardown any state that was previously setup with a call to
+ setup_class.
+ """
+ debug("Running teardown_class...")
+ try:
+
+ debug("Destroying LXD containers...")
+ for application in self.state:
+ if self.state[application]['container']:
+ destroy_lxd_container(self.state[application]['container'])
+ debug("Destroying LXD containers...done.")
+
+ # Logout of N2VC
+ if self.n2vc:
+ debug("teardown_class(): Logging out of N2VC...")
+ yield from self.n2vc.logout()
+ debug("teardown_class(): Logging out of N2VC...done.")
+
+ debug("Running teardown_class...done.")
+ except Exception as ex:
+ debug("Exception in teardown_class: {}".format(ex))
+
+ @classmethod
+ def all_charms_active(self):
+ """Determine if the all deployed charms are active."""
+ active = 0
+
+ for application in self.state:
+ if 'status' in self.state[application]:
+ debug("status of {} is '{}'".format(
+ application,
+ self.state[application]['status'],
+ ))
+ if self.state[application]['status'] == 'active':
+ active += 1
+
+ debug("Active charms: {}/{}".format(
+ active,
+ len(self.charms),
+ ))
+
+ if active == len(self.charms):
+ return True
+
+ return False
+
+ @classmethod
+ def are_tests_finished(self):
+ appcount = len(self.state)
+
+ # If we don't have state yet, keep running.
+ if appcount == 0:
+ debug("No applications")
+ return False
+
+ if self._stopping:
+ debug("_stopping is True")
+ return True
+
+ appdone = 0
+ for application in self.state:
+ if self.state[application]['done']:
+ appdone += 1
+
+ debug("{}/{} charms tested".format(appdone, appcount))
+
+ if appcount == appdone:
+ return True
+
+ return False
+
+ @classmethod
+ async def running(self, timeout=600):
+ """Returns if the test is still running.
+
+ @param timeout The time, in seconds, to wait for the test to complete.
+ """
+ if self.are_tests_finished():
+ await self.stop()
+ return False
+
+ await asyncio.sleep(30)
+
+ return self._running
+
+ @classmethod
+ def get_charm(self, charm):
+ """Build and return the path to the test charm.
+
+ Builds one of the charms in tests/charms/layers and returns the path
+ to the compiled charm. The charm will automatically be removed when
+ when the test is complete.
+
+ Returns: The path to the built charm or None if `charm build` failed.
+ """
+
+ # Make sure the charm snap is installed
+ try:
+ subprocess.check_call(['which', 'charm'])
+ except subprocess.CalledProcessError:
+ raise Exception("charm snap not installed.")
+
+ if charm not in self.artifacts:
+ try:
+ # Note: This builds the charm under N2VC/tests/charms/builds/
+ # Currently, the snap-installed command only has write access
+ # to the $HOME (changing in an upcoming release) so writing to
+ # /tmp isn't possible at the moment.
+ builds = get_charm_path()
+
+ if not os.path.exists("{}/builds/{}".format(builds, charm)):
+ cmd = "charm build --no-local-layers {}/{} -o {}/".format(
+ get_layer_path(),
+ charm,
+ builds,
+ )
+ subprocess.check_call(shlex.split(cmd))
+
+ except subprocess.CalledProcessError as e:
+ # charm build will return error code 100 if the charm fails
+ # the auto-run of charm proof, which we can safely ignore for
+ # our CI charms.
+ if e.returncode != 100:
+ raise Exception("charm build failed: {}.".format(e))
+
+ self.artifacts[charm] = {
+ 'tmpdir': builds,
+ 'charm': "{}/builds/{}".format(builds, charm),
+ }
+
+ return self.artifacts[charm]['charm']
+
+ @classmethod
+ async def deploy(self, vnf_index, charm, params, loop):
+ """An inner function to do the deployment of a charm from
+ either a vdu or vnf.
+ """
+
+ if not self.n2vc:
+ self.n2vc = get_n2vc(loop=loop)
+
+ debug("Creating model for Network Service {}".format(self.ns_name))
+ await self.n2vc.CreateNetworkService(self.ns_name)
+
+ application = self.n2vc.FormatApplicationName(
+ self.ns_name,
+ self.vnf_name,
+ str(vnf_index),
+ )
+
+ # Initialize the state of the application
+ self.state[application] = {
+ 'status': None, # Juju status
+ 'container': None, # lxd container, for proxy charms
+ 'actions': {}, # Actions we've executed
+ 'done': False, # Are we done testing this charm?
+ 'phase': "deploy", # What phase is this application in?
+ }
+
+ debug("Deploying charm at {}".format(self.artifacts[charm]))
+
+ # If this is a native charm, we need to provision the underlying
+ # machine ala an LXC container.
+ machine_spec = {}
+
+ if not self.isproxy(application):
+ debug("Creating container for native charm")
+ # args = ("default", application, None, None)
+ self.state[application]['container'] = create_lxd_container(
+ name=os.path.basename(__file__)
+ )
+
+ hostname = self.get_container_ip(
+ self.state[application]['container'],
+ )
+
+ machine_spec = {
+ 'host': hostname,
+ 'user': 'ubuntu',
+ }
+
+ await self.n2vc.DeployCharms(
+ self.ns_name,
+ application,
+ self.vnfd,
+ self.get_charm(charm),
+ params,
+ machine_spec,
+ self.n2vc_callback,
+ )
+
+ @classmethod
+ def parse_vnf_descriptor(self):
+ """Parse the VNF descriptor to make running tests easier.
+
+ Parse the charm information in the descriptor to make it easy to write
+ tests to run again it.
+
+ Each charm becomes a dictionary in a list:
+ [
+ 'is-proxy': True,
+ 'vnf-member-index': 1,
+ 'vnf-name': '',
+ 'charm-name': '',
+ 'initial-config-primitive': {},
+ 'config-primitive': {}
+ ]
+ - charm name
+ - is this a proxy charm?
+ - what are the initial-config-primitives (day 1)?
+ - what are the config primitives (day 2)?
+
+ """
+ charms = {}
+
+ # You'd think this would be explicit, but it's just an incremental
+ # value that should be consistent.
+ vnf_member_index = 0
+
+ """Get all vdu and/or vdu config in a descriptor."""
+ config = self.get_config()
+ for cfg in config:
+ if 'juju' in cfg:
+
+ # Get the name to be used for the deployed application
+ application_name = n2vc.vnf.N2VC().FormatApplicationName(
+ self.ns_name,
+ self.vnf_name,
+ str(vnf_member_index),
+ )
+
+ charm = {
+ 'application-name': application_name,
+ 'proxy': True,
+ 'vnf-member-index': vnf_member_index,
+ 'vnf-name': self.vnf_name,
+ 'name': None,
+ 'initial-config-primitive': {},
+ 'config-primitive': {},
+ }
+
+ juju = cfg['juju']
+ charm['name'] = juju['charm']
+
+ if 'proxy' in juju:
+ charm['proxy'] = juju['proxy']
+
+ if 'initial-config-primitive' in cfg:
+ charm['initial-config-primitive'] = \
+ cfg['initial-config-primitive']
+
+ if 'config-primitive' in cfg:
+ charm['config-primitive'] = cfg['config-primitive']
+
+ charms[application_name] = charm
+
+ # Increment the vnf-member-index
+ vnf_member_index += 1
+
+ self.charms = charms
+
+ @classmethod
+ def isproxy(self, application_name):
+
+ assert application_name in self.charms
+ assert 'proxy' in self.charms[application_name]
+ assert type(self.charms[application_name]['proxy']) is bool
+
+ # debug(self.charms[application_name])
+ return self.charms[application_name]['proxy']
+
+ @classmethod
+ def get_config(self):
+ """Return an iterable list of config items (vdu and vnf).
+
+ As far as N2VC is concerned, the config section for vdu and vnf are
+ identical. This joins them together so tests only need to iterate
+ through one list.
+ """
+ configs = []
+
+ """Get all vdu and/or vdu config in a descriptor."""
+ vnf_config = self.vnfd.get("vnf-configuration")
+ if vnf_config:
+ juju = vnf_config['juju']
+ if juju:
+ configs.append(vnf_config)
+
+ for vdu in self.vnfd['vdu']:
+ vdu_config = vdu.get('vdu-configuration')
+ if vdu_config:
+ juju = vdu_config['juju']
+ if juju:
+ configs.append(vdu_config)
+
+ return configs
+
+ @classmethod
+ def get_charm_names(self):
+ """Return a list of charms used by the test descriptor."""
+
+ charms = {}
+
+ # Check if the VDUs in this VNF have a charm
+ for config in self.get_config():
+ juju = config['juju']
+
+ name = juju['charm']
+ if name not in charms:
+ charms[name] = 1
+
+ return charms.keys()
+
+ @classmethod
+ def get_phase(self, application):
+ return self.state[application]['phase']
+
+ @classmethod
+ def set_phase(self, application, phase):
+ self.state[application]['phase'] = phase
+
+ @classmethod
+ async def configure_proxy_charm(self, *args):
+ """Configure a container for use via ssh."""
+ (model, application, _, _) = args
+
+ try:
+ if self.get_phase(application) == "deploy":
+ self.set_phase(application, "configure")
+
+ debug("Start CreateContainer for {}".format(application))
+ self.state[application]['container'] = \
+ await self.CreateContainer(*args)
+ debug("Done CreateContainer for {}".format(application))
+
+ if self.state[application]['container']:
+ debug("Configure {} for container".format(application))
+ if await self.configure_ssh_proxy(application):
+ await asyncio.sleep(0.1)
+ return True
+ else:
+ debug("Failed to configure container for {}".format(application))
+ else:
+ debug("skipping CreateContainer for {}: {}".format(
+ application,
+ self.get_phase(application),
+ ))
+
+ except Exception as ex:
+ debug("configure_proxy_charm exception: {}".format(ex))
+ finally:
+ await asyncio.sleep(0.1)
+
+ return False
+
+ @classmethod
+ async def execute_charm_tests(self, *args):
+ (model, application, _, _) = args
+
+ debug("Executing charm test(s) for {}".format(application))
+
+ if self.state[application]['done']:
+ debug("Trying to execute tests against finished charm...aborting")
+ return False
+
+ try:
+ phase = self.get_phase(application)
+ # We enter the test phase when after deploy (for native charms) or
+ # configure, for proxy charms.
+ if phase in ["deploy", "configure"]:
+ self.set_phase(application, "test")
+ if self.are_tests_finished():
+ raise Exception("Trying to execute init-config on finished test")
+
+ if await self.execute_initial_config_primitives(application):
+ # check for metrics
+ await self.check_metrics(application)
+
+ debug("Done testing {}".format(application))
+ self.state[application]['done'] = True
+
+ except Exception as ex:
+ debug("Exception in execute_charm_tests: {}".format(ex))
+ finally:
+ await asyncio.sleep(0.1)
+
+ return True
+
+ @classmethod
+ async def CreateContainer(self, *args):
+ """Create a LXD container for use with a proxy charm.abs
+
+ 1. Get the public key from the charm via `get-ssh-public-key` action
+ 2. Create container with said key injected for the ubuntu user
+
+ Returns a Container object
+ """
+ # Create and configure a LXD container for use with a proxy charm.
+ (model, application, _, _) = args
+
+ debug("[CreateContainer] {}".format(args))
+ container = None
+
+ try:
+ # Execute 'get-ssh-public-key' primitive and get returned value
+ uuid = await self.n2vc.ExecutePrimitive(
+ model,
+ application,
+ "get-ssh-public-key",
+ None,
+ )
+
+ result = await self.n2vc.GetPrimitiveOutput(model, uuid)
+ pubkey = result['pubkey']
+
+ container = create_lxd_container(
+ public_key=pubkey,
+ name=os.path.basename(__file__)
+ )
+
+ return container
+ except Exception as ex:
+ debug("Error creating container: {}".format(ex))
+ pass
+
+ return None
+
+ @classmethod
+ async def stop(self):
+ """Stop the test.
+
+ - Remove charms
+ - Stop and delete containers
+ - Logout of N2VC
+
+ TODO: Clean up duplicate code between teardown_class() and stop()
+ """
+ debug("stop() called")
+
+ if self.n2vc and self._running and not self._stopping:
+ self._running = False
+ self._stopping = True
+
+ for application in self.charms:
+ try:
+ await self.n2vc.RemoveCharms(self.ns_name, application)
+
+ await self.n2vc.DestroyNetworkService(self.ns_name)
+
+ while True:
+ # Wait for the application to be removed
+ await asyncio.sleep(10)
+ if not await self.n2vc.HasApplication(
+ self.ns_name,
+ application,
+ ):
+ break
+ await self.n2vc.DestroyNetworkService(self.ns_name)
+
+ # Need to wait for the charm to finish, because native charms
+ if self.state[application]['container']:
+ debug("Deleting LXD container...")
+ destroy_lxd_container(
+ self.state[application]['container']
+ )
+ self.state[application]['container'] = None
+ debug("Deleting LXD container...done.")
+ else:
+ debug("No container found for {}".format(application))
+ except Exception as e:
+ debug("Error while deleting container: {}".format(e))
+
+ # Logout of N2VC
+ try:
+ debug("stop(): Logging out of N2VC...")
+ await self.n2vc.logout()
+ self.n2vc = None
+ debug("stop(): Logging out of N2VC...Done.")
+ except Exception as ex:
+ debug(ex)
+
+ # Let the test know we're finished.
+ debug("Marking test as finished.")
+ # self._running = False
+ else:
+ debug("Skipping stop()")
+
+ @classmethod
+ def get_container_ip(self, container):
+ """Return the IPv4 address of container's eth0 interface."""
+ ipaddr = None
+ if container:
+ addresses = container.state().network['eth0']['addresses']
+ # The interface may have more than one address, but we only need
+ # the first one for testing purposes.
+ ipaddr = addresses[0]['address']
+
+ return ipaddr
+
+ @classmethod
+ async def configure_ssh_proxy(self, application, task=None):
+ """Configure the proxy charm to use the lxd container.
+
+ Configure the charm to use a LXD container as it's VNF.
+ """
+ debug("Configuring ssh proxy for {}".format(application))
+
+ mgmtaddr = self.get_container_ip(
+ self.state[application]['container'],
+ )
+
+ debug(
+ "Setting ssh-hostname for {} to {}".format(
+ application,
+ mgmtaddr,
+ )
+ )
+
+ await self.n2vc.ExecutePrimitive(
+ self.ns_name,
+ application,
+ "config",
+ None,
+ params={
+ 'ssh-hostname': mgmtaddr,
+ 'ssh-username': 'ubuntu',
+ }
+ )
+
+ return True
+
+ @classmethod
+ async def execute_initial_config_primitives(self, application, task=None):
+ debug("Executing initial_config_primitives for {}".format(application))
+ try:
+ init_config = self.charms[application]
+
+ """
+ The initial-config-primitive is run during deploy but may fail
+ on some steps because proxy charm access isn't configured.
+
+ Re-run those actions so we can inspect the status.
+ """
+ uuids = await self.n2vc.ExecuteInitialPrimitives(
+ self.ns_name,
+ application,
+ init_config,
+ )
+
+ """
+ ExecutePrimitives will return a list of uuids. We need to check the
+ status of each. The test continues if all Actions succeed, and
+ fails if any of them fail.
+ """
+ await self.wait_for_uuids(application, uuids)
+ debug("Primitives for {} finished.".format(application))
+
+ return True
+ except Exception as ex:
+ debug("execute_initial_config_primitives exception: {}".format(ex))
+
+ return False
+
+ @classmethod
+ async def check_metrics(self, application, task=None):
+ """Check and run metrics, if present.
+
+ Checks to see if metrics are specified by the charm. If so, collects
+ the metrics.
+
+ If no metrics, then mark the test as finished.
+ """
+ if has_metrics(self.charms[application]['name']):
+ debug("Collecting metrics for {}".format(application))
+
+ metrics = await self.n2vc.GetMetrics(
+ self.ns_name,
+ application,
+ )
+
+ return await self.verify_metrics(application, metrics)
+
+ @classmethod
+ async def verify_metrics(self, application, metrics):
+ """Verify the charm's metrics.
+
+ Verify that the charm has sent metrics successfully.
+
+ Stops the test when finished.
+ """
+ debug("Verifying metrics for {}: {}".format(application, metrics))
+
+ if len(metrics):
+ return True
+
+ else:
+ # TODO: Ran into a case where it took 9 attempts before metrics
+ # were available; the controller is slow sometimes.
+ await asyncio.sleep(30)
+ return await self.check_metrics(application)
+
+ @classmethod
+ async def wait_for_uuids(self, application, uuids):
+ """Wait for primitives to execute.
+
+ The task will provide a list of uuids representing primitives that are
+ queued to run.
+ """
+ debug("Waiting for uuids for {}: {}".format(application, uuids))
+ waitfor = len(uuids)
+ finished = 0
+
+ while waitfor > finished:
+ for uid in uuids:
+ await asyncio.sleep(10)
+
+ if uuid not in self.state[application]['actions']:
+ self.state[application]['actions'][uid] = "pending"
+
+ status = self.state[application]['actions'][uid]
+
+ # Have we already marked this as done?
+ if status in ["pending", "running"]:
+
+ debug("Getting status of {} ({})...".format(uid, status))
+ status = await self.n2vc.GetPrimitiveStatus(
+ self.ns_name,
+ uid,
+ )
+ debug("...state of {} is {}".format(uid, status))
+ self.state[application]['actions'][uid] = status
+
+ if status in ['completed', 'failed']:
+ finished += 1
+
+ debug("{}/{} actions complete".format(finished, waitfor))
+
+ # Wait for the primitive to finish and try again
+ if waitfor > finished:
+ debug("Waiting 10s for action to finish...")
+ await asyncio.sleep(10)
+
+ @classmethod
+ def n2vc_callback(self, *args, **kwargs):
+ (model, application, status, message) = args
+ # debug("callback: {}".format(args))
+
+ if application not in self.state:
+ # Initialize the state of the application
+ self.state[application] = {
+ 'status': None, # Juju status
+ 'container': None, # lxd container, for proxy charms
+ 'actions': {}, # Actions we've executed
+ 'done': False, # Are we done testing this charm?
+ 'phase': "deploy", # What phase is this application in?
+ }
+
+ self.state[application]['status'] = status
+
+ if status in ['waiting', 'maintenance', 'unknown']:
+ # Nothing to do for these
+ return
+
+ debug("callback: {}".format(args))
+
+ if self.state[application]['done']:
+ debug("{} is done".format(application))
+ return
+
+ if status in ["blocked"] and self.isproxy(application):
+ if self.state[application]['phase'] == "deploy":
+ debug("Configuring proxy charm for {}".format(application))
+ asyncio.ensure_future(self.configure_proxy_charm(*args))
+
+ elif status in ["active"]:
+ """When a charm is active, we can assume that it has been properly
+ configured (not blocked), regardless of if it's a proxy or not.
+
+ All primitives should be complete by init_config_primitive
+ """
+ asyncio.ensure_future(self.execute_charm_tests(*args))
diff --git a/tests/charms/layers/broken/README.md b/tests/charms/layers/broken/README.md
new file mode 100644
index 0000000..9234e57
--- /dev/null
+++ b/tests/charms/layers/broken/README.md
@@ -0,0 +1,3 @@
+# Overview
+
+This charm is intended to install and break, requiring it to be removed.
diff --git a/tests/charms/layers/broken/actions.yaml b/tests/charms/layers/broken/actions.yaml
new file mode 100644
index 0000000..6cd6f8c
--- /dev/null
+++ b/tests/charms/layers/broken/actions.yaml
@@ -0,0 +1,9 @@
+touch:
+ description: "Touch a file on the VNF."
+ params:
+ filename:
+ description: "The name of the file to touch."
+ type: string
+ default: ""
+ required:
+ - filename
diff --git a/tests/charms/layers/broken/actions/touch b/tests/charms/layers/broken/actions/touch
new file mode 100755
index 0000000..7e30af4
--- /dev/null
+++ b/tests/charms/layers/broken/actions/touch
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+##
+# Copyright 2016 Canonical Ltd.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+import sys
+sys.path.append('lib')
+
+from charms.reactive import main, set_flag
+from charmhelpers.core.hookenv import action_fail, action_name
+
+"""
+`set_state` only works here because it's flushed to disk inside the `main()`
+loop. remove_state will need to be called inside the action method.
+"""
+set_flag('actions.{}'.format(action_name()))
+
+try:
+ main()
+except Exception as e:
+ action_fail(repr(e))
diff --git a/tests/charms/layers/broken/config.yaml b/tests/charms/layers/broken/config.yaml
new file mode 100644
index 0000000..51f2ce4
--- /dev/null
+++ b/tests/charms/layers/broken/config.yaml
@@ -0,0 +1,14 @@
+options:
+ string-option:
+ type: string
+ default: "Default Value"
+ description: "A short description of the configuration option"
+ boolean-option:
+ type: boolean
+ default: False
+ description: "A short description of the configuration option"
+ int-option:
+ type: int
+ default: 9001
+ description: "A short description of the configuration option"
+
diff --git a/tests/charms/layers/broken/icon.svg b/tests/charms/layers/broken/icon.svg
new file mode 100644
index 0000000..e092eef
--- /dev/null
+++ b/tests/charms/layers/broken/icon.svg
@@ -0,0 +1,279 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="96"
+ height="96"
+ id="svg6517"
+ version="1.1"
+ inkscape:version="0.48+devel r12274"
+ sodipodi:docname="Juju_charm_icon_template.svg">
+ <defs
+ id="defs6519">
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#Background"
+ id="linearGradient6461"
+ gradientUnits="userSpaceOnUse"
+ x1="0"
+ y1="970.29498"
+ x2="144"
+ y2="970.29498"
+ gradientTransform="matrix(0,-0.66666669,0.6660448,0,-866.25992,731.29077)" />
+ <linearGradient
+ id="Background">
+ <stop
+ id="stop4178"
+ offset="0"
+ style="stop-color:#b8b8b8;stop-opacity:1" />
+ <stop
+ id="stop4180"
+ offset="1"
+ style="stop-color:#c9c9c9;stop-opacity:1" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Inner Shadow"
+ id="filter1121">
+ <feFlood
+ flood-opacity="0.59999999999999998"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood1123" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="out"
+ result="composite1"
+ id="feComposite1125" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur1127" />
+ <feOffset
+ dx="0"
+ dy="2"
+ result="offset"
+ id="feOffset1129" />
+ <feComposite
+ in="offset"
+ in2="SourceGraphic"
+ operator="atop"
+ result="composite2"
+ id="feComposite1131" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Drop Shadow"
+ id="filter950">
+ <feFlood
+ flood-opacity="0.25"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood952" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="in"
+ result="composite1"
+ id="feComposite954" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur956" />
+ <feOffset
+ dx="0"
+ dy="1"
+ result="offset"
+ id="feOffset958" />
+ <feComposite
+ in="SourceGraphic"
+ in2="offset"
+ operator="over"
+ result="composite2"
+ id="feComposite960" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath873">
+ <g
+ transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
+ id="g875"
+ inkscape:label="Layer 1"
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
+ <path
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
+ d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
+ id="path877"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ </g>
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter891"
+ inkscape:label="Badge Shadow">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.71999962"
+ id="feGaussianBlur893" />
+ </filter>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="4.0745362"
+ inkscape:cx="18.514671"
+ inkscape:cy="49.018169"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="true"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1920"
+ inkscape:window-height="1029"
+ inkscape:window-x="0"
+ inkscape:window-y="24"
+ inkscape:window-maximized="1"
+ showborder="true"
+ showguides="true"
+ inkscape:guide-bbox="true"
+ inkscape:showpageshadow="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid821" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="16,48"
+ id="guide823" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,80"
+ id="guide825" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="80,40"
+ id="guide827" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,16"
+ id="guide829" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata6522">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="BACKGROUND"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(268,-635.29076)"
+ style="display:inline">
+ <path
+ style="fill:url(#linearGradient6461);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
+ d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
+ id="path6455"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer3"
+ inkscape:label="PLACE YOUR PICTOGRAM HERE"
+ style="display:inline" />
+ <g
+ inkscape:groupmode="layer"
+ id="layer2"
+ inkscape:label="BADGE"
+ style="display:none"
+ sodipodi:insensitive="true">
+ <g
+ style="display:inline"
+ transform="translate(-340.00001,-581)"
+ id="g4394"
+ clip-path="none">
+ <g
+ id="g855">
+ <g
+ inkscape:groupmode="maskhelper"
+ id="g870"
+ clip-path="url(#clipPath873)"
+ style="opacity:0.6;filter:url(#filter891)">
+ <path
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
+ d="m 264,552.36218 a 12,12 0 1 1 -24,0 A 12,12 0 1 1 264,552.36218 Z"
+ sodipodi:ry="12"
+ sodipodi:rx="12"
+ sodipodi:cy="552.36218"
+ sodipodi:cx="252"
+ id="path844"
+ style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ sodipodi:type="arc" />
+ </g>
+ <g
+ id="g862">
+ <path
+ sodipodi:type="arc"
+ style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ id="path4398"
+ sodipodi:cx="252"
+ sodipodi:cy="552.36218"
+ sodipodi:rx="12"
+ sodipodi:ry="12"
+ d="m 264,552.36218 a 12,12 0 1 1 -24,0 A 12,12 0 1 1 264,552.36218 Z"
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
+ <path
+ transform="matrix(1.25,0,0,1.25,33,-100.45273)"
+ d="m 264,552.36218 a 12,12 0 1 1 -24,0 A 12,12 0 1 1 264,552.36218 Z"
+ sodipodi:ry="12"
+ sodipodi:rx="12"
+ sodipodi:cy="552.36218"
+ sodipodi:cx="252"
+ id="path4400"
+ style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ sodipodi:type="arc" />
+ <path
+ sodipodi:type="star"
+ style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ id="path4459"
+ sodipodi:sides="5"
+ sodipodi:cx="666.19574"
+ sodipodi:cy="589.50385"
+ sodipodi:r1="7.2431178"
+ sodipodi:r2="4.3458705"
+ sodipodi:arg1="1.0471976"
+ sodipodi:arg2="1.6755161"
+ inkscape:flatsided="false"
+ inkscape:rounded="0.1"
+ inkscape:randomized="0"
+ d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 C 669.9821,591.68426 670.20862,595.55064 669.8173,595.77657 Z"
+ transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
+ </g>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/tests/charms/layers/broken/layer.yaml b/tests/charms/layers/broken/layer.yaml
new file mode 100644
index 0000000..3fed5e2
--- /dev/null
+++ b/tests/charms/layers/broken/layer.yaml
@@ -0,0 +1,4 @@
+includes: ['layer:basic', 'layer:vnfproxy']
+options:
+ basic:
+ use_venv: false
diff --git a/tests/charms/layers/broken/metadata.yaml b/tests/charms/layers/broken/metadata.yaml
new file mode 100644
index 0000000..1780d3f
--- /dev/null
+++ b/tests/charms/layers/broken/metadata.yaml
@@ -0,0 +1,5 @@
+name: broken
+summary: A (broken) simple VNF proxy charm
+maintainer: Adam Israel <adam.israel@canonical.com>
+subordinate: false
+series: ['xenial']
diff --git a/tests/charms/layers/broken/metrics.yaml b/tests/charms/layers/broken/metrics.yaml
new file mode 100644
index 0000000..6ebb605
--- /dev/null
+++ b/tests/charms/layers/broken/metrics.yaml
@@ -0,0 +1,5 @@
+metrics:
+ uptime:
+ type: gauge
+ description: "Uptime of the VNF"
+ command: awk '{print $1}' /proc/uptime
diff --git a/tests/charms/layers/broken/reactive/simple.py b/tests/charms/layers/broken/reactive/simple.py
new file mode 100644
index 0000000..1529eee
--- /dev/null
+++ b/tests/charms/layers/broken/reactive/simple.py
@@ -0,0 +1,45 @@
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_fail,
+ action_set,
+ status_set,
+)
+from charms.reactive import (
+ clear_flag,
+ set_flag,
+ when,
+ when_not,
+)
+import charms.sshproxy
+
+
+@when('sshproxy.configured')
+@when_not('simple.installed')
+def install_simple_proxy_charm():
+ """Post-install actions.
+
+ This function will run when two conditions are met:
+ 1. The 'sshproxy.configured' state is set
+ 2. The 'simple.installed' state is not set
+
+ This ensures that the workload status is set to active only when the SSH
+ proxy is properly configured.
+ """
+ set_flag('simple.installed')
+ status_set('active', 'Ready!')
+
+
+@when('actions.touch')
+def touch():
+ raise Exception("I am broken.")
+ err = ''
+ try:
+ filename = action_get('filename')
+ cmd = ['touch {}'.format(filename)]
+ result, err = charms.sshproxy._run(cmd)
+ except Exception:
+ action_fail('command failed:' + err)
+ else:
+ action_set({'output': result})
+ finally:
+ clear_flag('actions.touch')
diff --git a/tests/charms/layers/broken/tests/00-setup b/tests/charms/layers/broken/tests/00-setup
new file mode 100644
index 0000000..f0616a5
--- /dev/null
+++ b/tests/charms/layers/broken/tests/00-setup
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo add-apt-repository ppa:juju/stable -y
+sudo apt-get update
+sudo apt-get install amulet python-requests -y
diff --git a/tests/charms/layers/broken/tests/10-deploy b/tests/charms/layers/broken/tests/10-deploy
new file mode 100644
index 0000000..9a26117
--- /dev/null
+++ b/tests/charms/layers/broken/tests/10-deploy
@@ -0,0 +1,35 @@
+#!/usr/bin/python3
+
+import amulet
+import requests
+import unittest
+
+
+class TestCharm(unittest.TestCase):
+ def setUp(self):
+ self.d = amulet.Deployment()
+
+ self.d.add('simple')
+ self.d.expose('simple')
+
+ self.d.setup(timeout=900)
+ self.d.sentry.wait()
+
+ self.unit = self.d.sentry['simple'][0]
+
+ def test_service(self):
+ # test we can access over http
+ page = requests.get('http://{}'.format(self.unit.info['public-address']))
+ self.assertEqual(page.status_code, 200)
+ # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
+ # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
+ # - .info - An array of the information of that unit from Juju
+ # - .file(PATH) - Get the details of a file on that unit
+ # - .file_contents(PATH) - Get plain text output of PATH file from that unit
+ # - .directory(PATH) - Get details of directory
+ # - .directory_contents(PATH) - List files and folders in PATH on that unit
+ # - .relation(relation, service:rel) - Get relation data from return service
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/charms/layers/metrics-ci/README.ex b/tests/charms/layers/metrics-ci/README.ex
new file mode 100755
index 0000000..b6816b2
--- /dev/null
+++ b/tests/charms/layers/metrics-ci/README.ex
@@ -0,0 +1,65 @@
+# Overview
+
+Describe the intended usage of this charm and anything unique about how this
+charm relates to others here.
+
+This README will be displayed in the Charm Store, it should be either Markdown
+or RST. Ideal READMEs include instructions on how to use the charm, expected
+usage, and charm features that your audience might be interested in. For an
+example of a well written README check out Hadoop:
+http://jujucharms.com/charms/precise/hadoop
+
+Use this as a Markdown reference if you need help with the formatting of this
+README: http://askubuntu.com/editing-help
+
+This charm provides [service][]. Add a description here of what the service
+itself actually does.
+
+Also remember to check the [icon guidelines][] so that your charm looks good
+in the Juju GUI.
+
+# Usage
+
+Step by step instructions on using the charm:
+
+juju deploy servicename
+
+and so on. If you're providing a web service or something that the end user
+needs to go to, tell them here, especially if you're deploying a service that
+might listen to a non-default port.
+
+You can then browse to http://ip-address to configure the service.
+
+## Scale out Usage
+
+If the charm has any recommendations for running at scale, outline them in
+examples here. For example if you have a memcached relation that improves
+performance, mention it here.
+
+## Known Limitations and Issues
+
+This not only helps users but gives people a place to start if they want to help
+you add features to your charm.
+
+# Configuration
+
+The configuration options will be listed on the charm store, however If you're
+making assumptions or opinionated decisions in the charm (like setting a default
+administrator password), you should detail that here so the user knows how to
+change it immediately, etc.
+
+# Contact Information
+
+Though this will be listed in the charm store itself don't assume a user will
+know that, so include that information here:
+
+## Upstream Project Name
+
+ - Upstream website
+ - Upstream bug tracker
+ - Upstream mailing list or contact information
+ - Feel free to add things if it's useful for users
+
+
+[service]: http://example.com
+[icon guidelines]: https://jujucharms.com/docs/stable/authors-charm-icon
diff --git a/tests/charms/layers/metrics-ci/config.yaml b/tests/charms/layers/metrics-ci/config.yaml
new file mode 100755
index 0000000..51f2ce4
--- /dev/null
+++ b/tests/charms/layers/metrics-ci/config.yaml
@@ -0,0 +1,14 @@
+options:
+ string-option:
+ type: string
+ default: "Default Value"
+ description: "A short description of the configuration option"
+ boolean-option:
+ type: boolean
+ default: False
+ description: "A short description of the configuration option"
+ int-option:
+ type: int
+ default: 9001
+ description: "A short description of the configuration option"
+
diff --git a/tests/charms/layers/metrics-ci/layer.yaml b/tests/charms/layers/metrics-ci/layer.yaml
new file mode 100755
index 0000000..bd3a2b9
--- /dev/null
+++ b/tests/charms/layers/metrics-ci/layer.yaml
@@ -0,0 +1 @@
+includes: ['layer:basic', 'layer:metrics'] # if you use any interfaces, add them here
diff --git a/tests/charms/layers/metrics-ci/metadata.yaml b/tests/charms/layers/metrics-ci/metadata.yaml
new file mode 100755
index 0000000..060274d
--- /dev/null
+++ b/tests/charms/layers/metrics-ci/metadata.yaml
@@ -0,0 +1,12 @@
+name: metrics-ci
+summary: <Fill in summary here>
+maintainer: Adam Israel <Adam.Israel@ronin>
+description: |
+ <Multi-line description here>
+tags:
+ # Replace "misc" with one or more whitelisted tags from this list:
+ # https://jujucharms.com/docs/stable/authors-charm-metadata
+ - misc
+subordinate: false
+series:
+ - xenial
diff --git a/tests/charms/layers/metrics-ci/metrics.yaml b/tests/charms/layers/metrics-ci/metrics.yaml
new file mode 100755
index 0000000..dae092f
--- /dev/null
+++ b/tests/charms/layers/metrics-ci/metrics.yaml
@@ -0,0 +1,9 @@
+metrics:
+ users:
+ type: gauge
+ description: "# of users"
+ command: who|wc -l
+ load:
+ type: gauge
+ description: "5 minute load average"
+ command: cat /proc/loadavg |awk '{print $1}'
diff --git a/tests/charms/layers/metrics-ci/reactive/metrics_ci.py b/tests/charms/layers/metrics-ci/reactive/metrics_ci.py
new file mode 100755
index 0000000..9217be4
--- /dev/null
+++ b/tests/charms/layers/metrics-ci/reactive/metrics_ci.py
@@ -0,0 +1,13 @@
+from charmhelpers.core.hookenv import (
+ status_set,
+)
+from charms.reactive import (
+ set_flag,
+ when_not,
+)
+
+
+@when_not('metrics-ci.installed')
+def install_metrics_ci():
+ status_set('active', "Ready!")
+ set_flag('metrics-ci.installed')
diff --git a/tests/charms/layers/metrics-ci/tests/00-setup b/tests/charms/layers/metrics-ci/tests/00-setup
new file mode 100755
index 0000000..f0616a5
--- /dev/null
+++ b/tests/charms/layers/metrics-ci/tests/00-setup
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo add-apt-repository ppa:juju/stable -y
+sudo apt-get update
+sudo apt-get install amulet python-requests -y
diff --git a/tests/charms/layers/metrics-ci/tests/10-deploy b/tests/charms/layers/metrics-ci/tests/10-deploy
new file mode 100755
index 0000000..7595ecf
--- /dev/null
+++ b/tests/charms/layers/metrics-ci/tests/10-deploy
@@ -0,0 +1,35 @@
+#!/usr/bin/python3
+
+import amulet
+import requests
+import unittest
+
+
+class TestCharm(unittest.TestCase):
+ def setUp(self):
+ self.d = amulet.Deployment()
+
+ self.d.add('metrics-demo')
+ self.d.expose('metrics-demo')
+
+ self.d.setup(timeout=900)
+ self.d.sentry.wait()
+
+ self.unit = self.d.sentry['metrics-demo'][0]
+
+ def test_service(self):
+ # test we can access over http
+ page = requests.get('http://{}'.format(self.unit.info['public-address']))
+ self.assertEqual(page.status_code, 200)
+ # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
+ # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
+ # - .info - An array of the information of that unit from Juju
+ # - .file(PATH) - Get the details of a file on that unit
+ # - .file_contents(PATH) - Get plain text output of PATH file from that unit
+ # - .directory(PATH) - Get details of directory
+ # - .directory_contents(PATH) - List files and folders in PATH on that unit
+ # - .relation(relation, service:rel) - Get relation data from return service
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/charms/layers/metrics-proxy-ci/README.ex b/tests/charms/layers/metrics-proxy-ci/README.ex
new file mode 100644
index 0000000..b6816b2
--- /dev/null
+++ b/tests/charms/layers/metrics-proxy-ci/README.ex
@@ -0,0 +1,65 @@
+# Overview
+
+Describe the intended usage of this charm and anything unique about how this
+charm relates to others here.
+
+This README will be displayed in the Charm Store, it should be either Markdown
+or RST. Ideal READMEs include instructions on how to use the charm, expected
+usage, and charm features that your audience might be interested in. For an
+example of a well written README check out Hadoop:
+http://jujucharms.com/charms/precise/hadoop
+
+Use this as a Markdown reference if you need help with the formatting of this
+README: http://askubuntu.com/editing-help
+
+This charm provides [service][]. Add a description here of what the service
+itself actually does.
+
+Also remember to check the [icon guidelines][] so that your charm looks good
+in the Juju GUI.
+
+# Usage
+
+Step by step instructions on using the charm:
+
+juju deploy servicename
+
+and so on. If you're providing a web service or something that the end user
+needs to go to, tell them here, especially if you're deploying a service that
+might listen to a non-default port.
+
+You can then browse to http://ip-address to configure the service.
+
+## Scale out Usage
+
+If the charm has any recommendations for running at scale, outline them in
+examples here. For example if you have a memcached relation that improves
+performance, mention it here.
+
+## Known Limitations and Issues
+
+This not only helps users but gives people a place to start if they want to help
+you add features to your charm.
+
+# Configuration
+
+The configuration options will be listed on the charm store, however If you're
+making assumptions or opinionated decisions in the charm (like setting a default
+administrator password), you should detail that here so the user knows how to
+change it immediately, etc.
+
+# Contact Information
+
+Though this will be listed in the charm store itself don't assume a user will
+know that, so include that information here:
+
+## Upstream Project Name
+
+ - Upstream website
+ - Upstream bug tracker
+ - Upstream mailing list or contact information
+ - Feel free to add things if it's useful for users
+
+
+[service]: http://example.com
+[icon guidelines]: https://jujucharms.com/docs/stable/authors-charm-icon
diff --git a/tests/charms/layers/metrics-proxy-ci/config.yaml b/tests/charms/layers/metrics-proxy-ci/config.yaml
new file mode 100644
index 0000000..51f2ce4
--- /dev/null
+++ b/tests/charms/layers/metrics-proxy-ci/config.yaml
@@ -0,0 +1,14 @@
+options:
+ string-option:
+ type: string
+ default: "Default Value"
+ description: "A short description of the configuration option"
+ boolean-option:
+ type: boolean
+ default: False
+ description: "A short description of the configuration option"
+ int-option:
+ type: int
+ default: 9001
+ description: "A short description of the configuration option"
+
diff --git a/tests/charms/layers/metrics-proxy-ci/layer.yaml b/tests/charms/layers/metrics-proxy-ci/layer.yaml
new file mode 100644
index 0000000..790dee6
--- /dev/null
+++ b/tests/charms/layers/metrics-proxy-ci/layer.yaml
@@ -0,0 +1,4 @@
+includes:
+ - 'layer:basic'
+ - 'layer:vnfproxy'
+ - 'layer:sshproxy'
diff --git a/tests/charms/layers/metrics-proxy-ci/metadata.yaml b/tests/charms/layers/metrics-proxy-ci/metadata.yaml
new file mode 100644
index 0000000..ae42434
--- /dev/null
+++ b/tests/charms/layers/metrics-proxy-ci/metadata.yaml
@@ -0,0 +1,12 @@
+name: metrics-proxy-ci
+summary: <Fill in summary here>
+maintainer: Adam Israel <Adam.Israel@ronin>
+description: |
+ <Multi-line description here>
+tags:
+ # Replace "misc" with one or more whitelisted tags from this list:
+ # https://jujucharms.com/docs/stable/authors-charm-metadata
+ - misc
+subordinate: false
+series:
+ - xenial
diff --git a/tests/charms/layers/metrics-proxy-ci/metrics.yaml b/tests/charms/layers/metrics-proxy-ci/metrics.yaml
new file mode 100644
index 0000000..dae092f
--- /dev/null
+++ b/tests/charms/layers/metrics-proxy-ci/metrics.yaml
@@ -0,0 +1,9 @@
+metrics:
+ users:
+ type: gauge
+ description: "# of users"
+ command: who|wc -l
+ load:
+ type: gauge
+ description: "5 minute load average"
+ command: cat /proc/loadavg |awk '{print $1}'
diff --git a/tests/charms/layers/metrics-proxy-ci/reactive/metrics_ci.py b/tests/charms/layers/metrics-proxy-ci/reactive/metrics_ci.py
new file mode 100644
index 0000000..51ce49e
--- /dev/null
+++ b/tests/charms/layers/metrics-proxy-ci/reactive/metrics_ci.py
@@ -0,0 +1,13 @@
+from charmhelpers.core.hookenv import (
+ status_set,
+)
+from charms.reactive import (
+ set_flag,
+ when_not,
+)
+
+
+@when_not('metrics-ci.installed')
+def install_metrics_ci():
+ status_set('blocked', "Waiting for SSH credentials.")
+ set_flag('metrics-ci.installed')
diff --git a/tests/charms/layers/metrics-proxy-ci/tests/00-setup b/tests/charms/layers/metrics-proxy-ci/tests/00-setup
new file mode 100644
index 0000000..f0616a5
--- /dev/null
+++ b/tests/charms/layers/metrics-proxy-ci/tests/00-setup
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo add-apt-repository ppa:juju/stable -y
+sudo apt-get update
+sudo apt-get install amulet python-requests -y
diff --git a/tests/charms/layers/metrics-proxy-ci/tests/10-deploy b/tests/charms/layers/metrics-proxy-ci/tests/10-deploy
new file mode 100644
index 0000000..7595ecf
--- /dev/null
+++ b/tests/charms/layers/metrics-proxy-ci/tests/10-deploy
@@ -0,0 +1,35 @@
+#!/usr/bin/python3
+
+import amulet
+import requests
+import unittest
+
+
+class TestCharm(unittest.TestCase):
+ def setUp(self):
+ self.d = amulet.Deployment()
+
+ self.d.add('metrics-demo')
+ self.d.expose('metrics-demo')
+
+ self.d.setup(timeout=900)
+ self.d.sentry.wait()
+
+ self.unit = self.d.sentry['metrics-demo'][0]
+
+ def test_service(self):
+ # test we can access over http
+ page = requests.get('http://{}'.format(self.unit.info['public-address']))
+ self.assertEqual(page.status_code, 200)
+ # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
+ # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
+ # - .info - An array of the information of that unit from Juju
+ # - .file(PATH) - Get the details of a file on that unit
+ # - .file_contents(PATH) - Get plain text output of PATH file from that unit
+ # - .directory(PATH) - Get details of directory
+ # - .directory_contents(PATH) - List files and folders in PATH on that unit
+ # - .relation(relation, service:rel) - Get relation data from return service
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/charms/layers/native-ci/README.md b/tests/charms/layers/native-ci/README.md
new file mode 100644
index 0000000..d58b762
--- /dev/null
+++ b/tests/charms/layers/native-ci/README.md
@@ -0,0 +1,3 @@
+# Overview
+
+A native charm.
diff --git a/tests/charms/layers/native-ci/actions.yaml b/tests/charms/layers/native-ci/actions.yaml
new file mode 100644
index 0000000..6adcba7
--- /dev/null
+++ b/tests/charms/layers/native-ci/actions.yaml
@@ -0,0 +1,8 @@
+test:
+ description: "Verify that the action can run."
+testint:
+ description: "Test a primitive with a non-string parameter"
+ params:
+ intval:
+ type: integer
+ default: 0
diff --git a/tests/charms/layers/native-ci/actions/test b/tests/charms/layers/native-ci/actions/test
new file mode 100755
index 0000000..7e30af4
--- /dev/null
+++ b/tests/charms/layers/native-ci/actions/test
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+##
+# Copyright 2016 Canonical Ltd.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+import sys
+sys.path.append('lib')
+
+from charms.reactive import main, set_flag
+from charmhelpers.core.hookenv import action_fail, action_name
+
+"""
+`set_state` only works here because it's flushed to disk inside the `main()`
+loop. remove_state will need to be called inside the action method.
+"""
+set_flag('actions.{}'.format(action_name()))
+
+try:
+ main()
+except Exception as e:
+ action_fail(repr(e))
diff --git a/tests/charms/layers/native-ci/actions/testint b/tests/charms/layers/native-ci/actions/testint
new file mode 100755
index 0000000..7e30af4
--- /dev/null
+++ b/tests/charms/layers/native-ci/actions/testint
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+##
+# Copyright 2016 Canonical Ltd.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+import sys
+sys.path.append('lib')
+
+from charms.reactive import main, set_flag
+from charmhelpers.core.hookenv import action_fail, action_name
+
+"""
+`set_state` only works here because it's flushed to disk inside the `main()`
+loop. remove_state will need to be called inside the action method.
+"""
+set_flag('actions.{}'.format(action_name()))
+
+try:
+ main()
+except Exception as e:
+ action_fail(repr(e))
diff --git a/tests/charms/layers/native-ci/layer.yaml b/tests/charms/layers/native-ci/layer.yaml
new file mode 100644
index 0000000..138d9d3
--- /dev/null
+++ b/tests/charms/layers/native-ci/layer.yaml
@@ -0,0 +1,7 @@
+includes:
+ - 'layer:basic'
+ - 'interface:mysql'
+
+options:
+ basic:
+ use_venv: false
diff --git a/tests/charms/layers/native-ci/metadata.yaml b/tests/charms/layers/native-ci/metadata.yaml
new file mode 100644
index 0000000..0460e48
--- /dev/null
+++ b/tests/charms/layers/native-ci/metadata.yaml
@@ -0,0 +1,12 @@
+name: native-ci
+summary: A native VNF charm
+description: A native VNF charm
+maintainer: Adam Israel <adam.israel@canonical.com>
+subordinate: false
+series: ['xenial']
+provides:
+ db:
+ interface: mysql
+requires:
+ app:
+ interface: mysql
diff --git a/tests/charms/layers/native-ci/reactive/native-ci.py b/tests/charms/layers/native-ci/reactive/native-ci.py
new file mode 100644
index 0000000..9e5fe67
--- /dev/null
+++ b/tests/charms/layers/native-ci/reactive/native-ci.py
@@ -0,0 +1,62 @@
+from charmhelpers.core.hookenv import (
+ action_fail,
+ action_set,
+ action_get,
+ status_set,
+)
+from charms.reactive import (
+ clear_flag,
+ set_flag,
+ when,
+ when_not,
+)
+
+
+@when_not('native-ci.installed')
+def install_native_ci_charm():
+ set_flag('native-ci.installed')
+ status_set('active', 'Ready!')
+
+
+@when('actions.test', 'native-ci.installed')
+def test():
+ try:
+ result = True
+ except Exception as e:
+ action_fail('command failed: {}'.format(e))
+ else:
+ action_set({'output': result})
+ finally:
+ clear_flag('actions.test')
+
+
+@when('actions.testint', 'native-ci.installed')
+def testint():
+ try:
+ # Test the value is an int by performing a mathmatical operation on it.
+ intval = action_get('intval')
+ intval = intval + 1
+ except Exception as e:
+ action_fail('command failed: {}'.format(e))
+ else:
+ action_set({'output': intval})
+ finally:
+ clear_flag('actions.testint')
+
+
+@when('db.joined')
+def provides_db(db):
+ """Simulate providing database credentials."""
+ db.configure(
+ database="mydb",
+ user="myuser",
+ password="mypassword",
+ host="myhost",
+ slave="myslave",
+ )
+
+
+@when('db.available')
+def requires_db(db):
+ """Simulate receiving database credentials."""
+ pass
diff --git a/tests/charms/layers/proxy-ci/README.md b/tests/charms/layers/proxy-ci/README.md
new file mode 100644
index 0000000..c16d9d8
--- /dev/null
+++ b/tests/charms/layers/proxy-ci/README.md
@@ -0,0 +1,3 @@
+# Overview
+
+A `charm layer` to test the functionality of proxy charms.
diff --git a/tests/charms/layers/proxy-ci/actions.yaml b/tests/charms/layers/proxy-ci/actions.yaml
new file mode 100644
index 0000000..5af8591
--- /dev/null
+++ b/tests/charms/layers/proxy-ci/actions.yaml
@@ -0,0 +1,2 @@
+test:
+ description: "Verify that the action can run."
diff --git a/tests/charms/layers/proxy-ci/actions/test b/tests/charms/layers/proxy-ci/actions/test
new file mode 100755
index 0000000..7e30af4
--- /dev/null
+++ b/tests/charms/layers/proxy-ci/actions/test
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+##
+# Copyright 2016 Canonical Ltd.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+import sys
+sys.path.append('lib')
+
+from charms.reactive import main, set_flag
+from charmhelpers.core.hookenv import action_fail, action_name
+
+"""
+`set_state` only works here because it's flushed to disk inside the `main()`
+loop. remove_state will need to be called inside the action method.
+"""
+set_flag('actions.{}'.format(action_name()))
+
+try:
+ main()
+except Exception as e:
+ action_fail(repr(e))
diff --git a/tests/charms/layers/proxy-ci/layer.yaml b/tests/charms/layers/proxy-ci/layer.yaml
new file mode 100644
index 0000000..790dee6
--- /dev/null
+++ b/tests/charms/layers/proxy-ci/layer.yaml
@@ -0,0 +1,4 @@
+includes:
+ - 'layer:basic'
+ - 'layer:vnfproxy'
+ - 'layer:sshproxy'
diff --git a/tests/charms/layers/proxy-ci/metadata.yaml b/tests/charms/layers/proxy-ci/metadata.yaml
new file mode 100644
index 0000000..bb00a03
--- /dev/null
+++ b/tests/charms/layers/proxy-ci/metadata.yaml
@@ -0,0 +1,18 @@
+name: proxy-ci
+summary: <Fill in summary here>
+maintainer: Adam Israel <Adam.Israel@ronin>
+description: |
+ <Multi-line description here>
+tags:
+ # Replace "misc" with one or more whitelisted tags from this list:
+ # https://jujucharms.com/docs/stable/authors-charm-metadata
+ - misc
+subordinate: false
+series:
+ - xenial
+provides:
+ db:
+ interface: mysql
+requires:
+ app:
+ interface: mysql
diff --git a/tests/charms/layers/proxy-ci/reactive/proxy_ci.py b/tests/charms/layers/proxy-ci/reactive/proxy_ci.py
new file mode 100644
index 0000000..9c0136e
--- /dev/null
+++ b/tests/charms/layers/proxy-ci/reactive/proxy_ci.py
@@ -0,0 +1,52 @@
+from charmhelpers.core.hookenv import (
+ action_fail,
+ action_set,
+ status_set,
+)
+from charms.reactive import (
+ set_flag,
+ clear_flag,
+ when_not,
+ when,
+)
+import charms.sshproxy
+
+
+@when_not('proxy-ci.installed')
+def install_proxy_ci():
+ status_set('blocked', "Waiting for SSH credentials.")
+ set_flag('proxy-ci.installed')
+
+
+@when('actions.test', 'proxy-ci.installed')
+def test():
+ err = ''
+ try:
+ cmd = ['hostname']
+ result, err = charms.sshproxy._run(cmd)
+ if len(result) == 0:
+ raise Exception("Proxy failed")
+ except Exception as e:
+ action_fail('command failed: {}'.format(e))
+ else:
+ action_set({'output': result})
+ finally:
+ clear_flag('actions.test')
+
+
+@when('db.joined')
+def provides_db(db):
+ """Simulate providing database credentials."""
+ db.configure(
+ database="mydb",
+ user="myuser",
+ password="mypassword",
+ host="myhost",
+ slave="myslave",
+ )
+
+
+@when('db.available')
+def requires_db(db):
+ """Simulate receiving database credentials."""
+ pass
diff --git a/tests/charms/layers/simple/README.md b/tests/charms/layers/simple/README.md
new file mode 100644
index 0000000..f9d6eed
--- /dev/null
+++ b/tests/charms/layers/simple/README.md
@@ -0,0 +1,53 @@
+# Overview
+
+This is an example charm as demonstrated in the OSM [Hackfest](https://osm.etsi.org/wikipub/index.php/OSM_workshops_and_events) series.
+
+This is intended to provide a well-documented example of the proxy charm written by Hackfest participants.
+
+# Prerequisites
+
+There are two ways that you can exercise this charm: install the latest stable release of OSM or use Juju directly.
+
+The workshop materials and tutorials cover using charms as part of OSM. You can follow that approach, but this README will focus on using Juju directly. We highly recommend that vendors and charm developers use this approach for the initial development of the charm.
+
+## Ubuntu 16.04 or higher
+
+We recommend using Ubuntu 16.04 or higher for the development and testing of charms. It is assumed that you have installed Ubuntu either on physical hardware or in a Virtual Machine.
+
+## Install LXD and Juju
+
+We will be installing the required software via snap. Snaps are containerised software packages, preferred because they are easy to create and install, will automatically update to the latest stable version, and contain bundled dependencies.
+
+```
+snap install lxd
+snap install juju
+snap install charm
+```
+
+# Usage
+
+
+## Known Limitations and Issues
+
+This not only helps users but gives people a place to start if they want to help
+you add features to your charm.
+
+# Configuration
+
+The configuration options will be listed on the charm store, however If you're
+making assumptions or opinionated decisions in the charm (like setting a default
+administrator password), you should detail that here so the user knows how to
+change it immediately, etc.
+
+# Contact Information
+
+## Upstream Project Name
+
+ - Upstream website
+ - Upstream bug tracker
+ - Upstream mailing list or contact information
+ - Feel free to add things if it's useful for users
+
+
+[service]: http://example.com
+[icon guidelines]: https://jujucharms.com/docs/stable/authors-charm-icon
diff --git a/tests/charms/layers/simple/actions.yaml b/tests/charms/layers/simple/actions.yaml
new file mode 100644
index 0000000..6cd6f8c
--- /dev/null
+++ b/tests/charms/layers/simple/actions.yaml
@@ -0,0 +1,9 @@
+touch:
+ description: "Touch a file on the VNF."
+ params:
+ filename:
+ description: "The name of the file to touch."
+ type: string
+ default: ""
+ required:
+ - filename
diff --git a/tests/charms/layers/simple/actions/touch b/tests/charms/layers/simple/actions/touch
new file mode 100755
index 0000000..7e30af4
--- /dev/null
+++ b/tests/charms/layers/simple/actions/touch
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+##
+# Copyright 2016 Canonical Ltd.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+##
+import sys
+sys.path.append('lib')
+
+from charms.reactive import main, set_flag
+from charmhelpers.core.hookenv import action_fail, action_name
+
+"""
+`set_state` only works here because it's flushed to disk inside the `main()`
+loop. remove_state will need to be called inside the action method.
+"""
+set_flag('actions.{}'.format(action_name()))
+
+try:
+ main()
+except Exception as e:
+ action_fail(repr(e))
diff --git a/tests/charms/layers/simple/config.yaml b/tests/charms/layers/simple/config.yaml
new file mode 100644
index 0000000..51f2ce4
--- /dev/null
+++ b/tests/charms/layers/simple/config.yaml
@@ -0,0 +1,14 @@
+options:
+ string-option:
+ type: string
+ default: "Default Value"
+ description: "A short description of the configuration option"
+ boolean-option:
+ type: boolean
+ default: False
+ description: "A short description of the configuration option"
+ int-option:
+ type: int
+ default: 9001
+ description: "A short description of the configuration option"
+
diff --git a/tests/charms/layers/simple/icon.svg b/tests/charms/layers/simple/icon.svg
new file mode 100644
index 0000000..e092eef
--- /dev/null
+++ b/tests/charms/layers/simple/icon.svg
@@ -0,0 +1,279 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:xlink="http://www.w3.org/1999/xlink"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="96"
+ height="96"
+ id="svg6517"
+ version="1.1"
+ inkscape:version="0.48+devel r12274"
+ sodipodi:docname="Juju_charm_icon_template.svg">
+ <defs
+ id="defs6519">
+ <linearGradient
+ inkscape:collect="always"
+ xlink:href="#Background"
+ id="linearGradient6461"
+ gradientUnits="userSpaceOnUse"
+ x1="0"
+ y1="970.29498"
+ x2="144"
+ y2="970.29498"
+ gradientTransform="matrix(0,-0.66666669,0.6660448,0,-866.25992,731.29077)" />
+ <linearGradient
+ id="Background">
+ <stop
+ id="stop4178"
+ offset="0"
+ style="stop-color:#b8b8b8;stop-opacity:1" />
+ <stop
+ id="stop4180"
+ offset="1"
+ style="stop-color:#c9c9c9;stop-opacity:1" />
+ </linearGradient>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Inner Shadow"
+ id="filter1121">
+ <feFlood
+ flood-opacity="0.59999999999999998"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood1123" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="out"
+ result="composite1"
+ id="feComposite1125" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur1127" />
+ <feOffset
+ dx="0"
+ dy="2"
+ result="offset"
+ id="feOffset1129" />
+ <feComposite
+ in="offset"
+ in2="SourceGraphic"
+ operator="atop"
+ result="composite2"
+ id="feComposite1131" />
+ </filter>
+ <filter
+ style="color-interpolation-filters:sRGB;"
+ inkscape:label="Drop Shadow"
+ id="filter950">
+ <feFlood
+ flood-opacity="0.25"
+ flood-color="rgb(0,0,0)"
+ result="flood"
+ id="feFlood952" />
+ <feComposite
+ in="flood"
+ in2="SourceGraphic"
+ operator="in"
+ result="composite1"
+ id="feComposite954" />
+ <feGaussianBlur
+ in="composite1"
+ stdDeviation="1"
+ result="blur"
+ id="feGaussianBlur956" />
+ <feOffset
+ dx="0"
+ dy="1"
+ result="offset"
+ id="feOffset958" />
+ <feComposite
+ in="SourceGraphic"
+ in2="offset"
+ operator="over"
+ result="composite2"
+ id="feComposite960" />
+ </filter>
+ <clipPath
+ clipPathUnits="userSpaceOnUse"
+ id="clipPath873">
+ <g
+ transform="matrix(0,-0.66666667,0.66604479,0,-258.25992,677.00001)"
+ id="g875"
+ inkscape:label="Layer 1"
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline">
+ <path
+ style="fill:#ff00ff;fill-opacity:1;stroke:none;display:inline"
+ d="m 46.702703,898.22775 50.594594,0 C 138.16216,898.22775 144,904.06497 144,944.92583 l 0,50.73846 c 0,40.86071 -5.83784,46.69791 -46.702703,46.69791 l -50.594594,0 C 5.8378378,1042.3622 0,1036.525 0,995.66429 L 0,944.92583 C 0,904.06497 5.8378378,898.22775 46.702703,898.22775 Z"
+ id="path877"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ </g>
+ </clipPath>
+ <filter
+ inkscape:collect="always"
+ id="filter891"
+ inkscape:label="Badge Shadow">
+ <feGaussianBlur
+ inkscape:collect="always"
+ stdDeviation="0.71999962"
+ id="feGaussianBlur893" />
+ </filter>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="4.0745362"
+ inkscape:cx="18.514671"
+ inkscape:cy="49.018169"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="true"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0"
+ inkscape:window-width="1920"
+ inkscape:window-height="1029"
+ inkscape:window-x="0"
+ inkscape:window-y="24"
+ inkscape:window-maximized="1"
+ showborder="true"
+ showguides="true"
+ inkscape:guide-bbox="true"
+ inkscape:showpageshadow="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid821" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="16,48"
+ id="guide823" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,80"
+ id="guide825" />
+ <sodipodi:guide
+ orientation="1,0"
+ position="80,40"
+ id="guide827" />
+ <sodipodi:guide
+ orientation="0,1"
+ position="64,16"
+ id="guide829" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata6522">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="BACKGROUND"
+ inkscape:groupmode="layer"
+ id="layer1"
+ transform="translate(268,-635.29076)"
+ style="display:inline">
+ <path
+ style="fill:url(#linearGradient6461);fill-opacity:1;stroke:none;display:inline;filter:url(#filter1121)"
+ d="m -268,700.15563 0,-33.72973 c 0,-27.24324 3.88785,-31.13513 31.10302,-31.13513 l 33.79408,0 c 27.21507,0 31.1029,3.89189 31.1029,31.13513 l 0,33.72973 c 0,27.24325 -3.88783,31.13514 -31.1029,31.13514 l -33.79408,0 C -264.11215,731.29077 -268,727.39888 -268,700.15563 Z"
+ id="path6455"
+ inkscape:connector-curvature="0"
+ sodipodi:nodetypes="sssssssss" />
+ </g>
+ <g
+ inkscape:groupmode="layer"
+ id="layer3"
+ inkscape:label="PLACE YOUR PICTOGRAM HERE"
+ style="display:inline" />
+ <g
+ inkscape:groupmode="layer"
+ id="layer2"
+ inkscape:label="BADGE"
+ style="display:none"
+ sodipodi:insensitive="true">
+ <g
+ style="display:inline"
+ transform="translate(-340.00001,-581)"
+ id="g4394"
+ clip-path="none">
+ <g
+ id="g855">
+ <g
+ inkscape:groupmode="maskhelper"
+ id="g870"
+ clip-path="url(#clipPath873)"
+ style="opacity:0.6;filter:url(#filter891)">
+ <path
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-237.54282)"
+ d="m 264,552.36218 a 12,12 0 1 1 -24,0 A 12,12 0 1 1 264,552.36218 Z"
+ sodipodi:ry="12"
+ sodipodi:rx="12"
+ sodipodi:cy="552.36218"
+ sodipodi:cx="252"
+ id="path844"
+ style="color:#000000;fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ sodipodi:type="arc" />
+ </g>
+ <g
+ id="g862">
+ <path
+ sodipodi:type="arc"
+ style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ id="path4398"
+ sodipodi:cx="252"
+ sodipodi:cy="552.36218"
+ sodipodi:rx="12"
+ sodipodi:ry="12"
+ d="m 264,552.36218 a 12,12 0 1 1 -24,0 A 12,12 0 1 1 264,552.36218 Z"
+ transform="matrix(1.4999992,0,0,1.4999992,-29.999795,-238.54282)" />
+ <path
+ transform="matrix(1.25,0,0,1.25,33,-100.45273)"
+ d="m 264,552.36218 a 12,12 0 1 1 -24,0 A 12,12 0 1 1 264,552.36218 Z"
+ sodipodi:ry="12"
+ sodipodi:rx="12"
+ sodipodi:cy="552.36218"
+ sodipodi:cx="252"
+ id="path4400"
+ style="color:#000000;fill:#dd4814;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:4;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ sodipodi:type="arc" />
+ <path
+ sodipodi:type="star"
+ style="color:#000000;fill:#f5f5f5;fill-opacity:1;fill-rule:nonzero;stroke:none;stroke-width:3;marker:none;visibility:visible;display:inline;overflow:visible;enable-background:accumulate"
+ id="path4459"
+ sodipodi:sides="5"
+ sodipodi:cx="666.19574"
+ sodipodi:cy="589.50385"
+ sodipodi:r1="7.2431178"
+ sodipodi:r2="4.3458705"
+ sodipodi:arg1="1.0471976"
+ sodipodi:arg2="1.6755161"
+ inkscape:flatsided="false"
+ inkscape:rounded="0.1"
+ inkscape:randomized="0"
+ d="m 669.8173,595.77657 c -0.39132,0.22593 -3.62645,-1.90343 -4.07583,-1.95066 -0.44938,-0.0472 -4.05653,1.36297 -4.39232,1.06062 -0.3358,-0.30235 0.68963,-4.03715 0.59569,-4.47913 -0.0939,-0.44198 -2.5498,-3.43681 -2.36602,-3.8496 0.18379,-0.41279 4.05267,-0.59166 4.44398,-0.81759 0.39132,-0.22593 2.48067,-3.48704 2.93005,-3.4398 0.44938,0.0472 1.81505,3.67147 2.15084,3.97382 0.3358,0.30236 4.08294,1.2817 4.17689,1.72369 0.0939,0.44198 -2.9309,2.86076 -3.11469,3.27355 C 669.9821,591.68426 670.20862,595.55064 669.8173,595.77657 Z"
+ transform="matrix(1.511423,-0.16366377,0.16366377,1.511423,-755.37346,-191.93651)" />
+ </g>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/tests/charms/layers/simple/layer.yaml b/tests/charms/layers/simple/layer.yaml
new file mode 100644
index 0000000..3fed5e2
--- /dev/null
+++ b/tests/charms/layers/simple/layer.yaml
@@ -0,0 +1,4 @@
+includes: ['layer:basic', 'layer:vnfproxy']
+options:
+ basic:
+ use_venv: false
diff --git a/tests/charms/layers/simple/metadata.yaml b/tests/charms/layers/simple/metadata.yaml
new file mode 100644
index 0000000..fd80d1a
--- /dev/null
+++ b/tests/charms/layers/simple/metadata.yaml
@@ -0,0 +1,5 @@
+name: simple
+summary: A simple VNF proxy charm
+maintainer: Adam Israel <adam.israel@canonical.com>
+subordinate: false
+series: ['xenial']
diff --git a/tests/charms/layers/simple/metrics.yaml b/tests/charms/layers/simple/metrics.yaml
new file mode 100644
index 0000000..6ebb605
--- /dev/null
+++ b/tests/charms/layers/simple/metrics.yaml
@@ -0,0 +1,5 @@
+metrics:
+ uptime:
+ type: gauge
+ description: "Uptime of the VNF"
+ command: awk '{print $1}' /proc/uptime
diff --git a/tests/charms/layers/simple/reactive/simple.py b/tests/charms/layers/simple/reactive/simple.py
new file mode 100644
index 0000000..802d60c
--- /dev/null
+++ b/tests/charms/layers/simple/reactive/simple.py
@@ -0,0 +1,44 @@
+from charmhelpers.core.hookenv import (
+ action_get,
+ action_fail,
+ action_set,
+ status_set,
+)
+from charms.reactive import (
+ clear_flag,
+ set_flag,
+ when,
+ when_not,
+)
+import charms.sshproxy
+
+
+@when('sshproxy.configured')
+@when_not('simple.installed')
+def install_simple_proxy_charm():
+ """Post-install actions.
+
+ This function will run when two conditions are met:
+ 1. The 'sshproxy.configured' state is set
+ 2. The 'simple.installed' state is not set
+
+ This ensures that the workload status is set to active only when the SSH
+ proxy is properly configured.
+ """
+ set_flag('simple.installed')
+ status_set('active', 'Ready!')
+
+
+@when('actions.touch')
+def touch():
+ err = ''
+ try:
+ filename = action_get('filename')
+ cmd = ['touch {}'.format(filename)]
+ result, err = charms.sshproxy._run(cmd)
+ except Exception:
+ action_fail('command failed:' + err)
+ else:
+ action_set({'output': result})
+ finally:
+ clear_flag('actions.touch')
diff --git a/tests/charms/layers/simple/tests/00-setup b/tests/charms/layers/simple/tests/00-setup
new file mode 100755
index 0000000..f0616a5
--- /dev/null
+++ b/tests/charms/layers/simple/tests/00-setup
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+sudo add-apt-repository ppa:juju/stable -y
+sudo apt-get update
+sudo apt-get install amulet python-requests -y
diff --git a/tests/charms/layers/simple/tests/10-deploy b/tests/charms/layers/simple/tests/10-deploy
new file mode 100755
index 0000000..9a26117
--- /dev/null
+++ b/tests/charms/layers/simple/tests/10-deploy
@@ -0,0 +1,35 @@
+#!/usr/bin/python3
+
+import amulet
+import requests
+import unittest
+
+
+class TestCharm(unittest.TestCase):
+ def setUp(self):
+ self.d = amulet.Deployment()
+
+ self.d.add('simple')
+ self.d.expose('simple')
+
+ self.d.setup(timeout=900)
+ self.d.sentry.wait()
+
+ self.unit = self.d.sentry['simple'][0]
+
+ def test_service(self):
+ # test we can access over http
+ page = requests.get('http://{}'.format(self.unit.info['public-address']))
+ self.assertEqual(page.status_code, 200)
+ # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
+ # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
+ # - .info - An array of the information of that unit from Juju
+ # - .file(PATH) - Get the details of a file on that unit
+ # - .file_contents(PATH) - Get plain text output of PATH file from that unit
+ # - .directory(PATH) - Get details of directory
+ # - .directory_contents(PATH) - List files and folders in PATH on that unit
+ # - .relation(relation, service:rel) - Get relation data from return service
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/integration/test_broken_charm.py b/tests/integration/test_broken_charm.py
new file mode 100644
index 0000000..296096f
--- /dev/null
+++ b/tests/integration/test_broken_charm.py
@@ -0,0 +1,177 @@
+"""
+Test a charm that breaks post-deployment
+"""
+
+import asyncio
+import logging
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: brokencharm-ns
+ name: brokencharm-ns
+ short-name: brokencharm-ns
+ description: NS with 1 VNF connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: hackfest-simplecharm-vnf
+ name: hackfest-simplecharm-vnf
+ short-name: hackfest-simplecharm-vnf
+ version: '1.0'
+ description: A VNF consisting of 2 VDUs connected to an internal VL, and one VDU with cloud-init
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: hackfest3-mgmt
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: PARAVIRT
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: PARAVIRT
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ - id: dataVM
+ name: dataVM
+ image: hackfest3-mgmt
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: dataVM-eth0
+ position: '1'
+ type: INTERNAL
+ virtual-interface:
+ type: PARAVIRT
+ internal-connection-point-ref: dataVM-internal
+ - name: dataVM-xe0
+ position: '2'
+ type: EXTERNAL
+ virtual-interface:
+ type: PARAVIRT
+ external-connection-point-ref: vnf-data
+ internal-connection-point:
+ - id: dataVM-internal
+ name: dataVM-internal
+ short-name: dataVM-internal
+ type: VPORT
+ vnf-configuration:
+ juju:
+ charm: broken
+ proxy: true
+ initial-config-primitive:
+ - seq: '1'
+ name: touch
+ parameter:
+ - name: filename
+ value: '/home/ubuntu/first-touch'
+ config-primitive:
+ - name: touch
+ parameter:
+ - name: filename
+ data-type: STRING
+ default-value: '/home/ubuntu/touched'
+ """
+
+ # @pytest.mark.serial
+ @pytest.mark.asyncio
+ async def test_charm_proxy(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ event_loop,
+ )
+
+ while await self.running():
+ print("Waiting for test to finish...")
+ await asyncio.sleep(15)
+ logging.debug("test_charm_proxy stopped")
+
+ return 'ok'
diff --git a/tests/integration/test_charm_native.py b/tests/integration/test_charm_native.py
new file mode 100644
index 0000000..85a282e
--- /dev/null
+++ b/tests/integration/test_charm_native.py
@@ -0,0 +1,141 @@
+"""
+Deploy a native charm (to LXD) and execute a primitive
+"""
+
+import asyncio
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: charmnative-ns
+ name: charmnative-ns
+ short-name: charmnative-ns
+ description: NS with 1 VNFs charmnative-vnf connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: charmnative-vnf
+ name: charmnative-vnf
+ short-name: charmnative-vnf
+ version: '1.0'
+ description: A VNF consisting of 2 VDUs w/charms connected to an internal VL, and one VDU with cloud-init
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vdu-configuration:
+ juju:
+ charm: native-ci
+ proxy: false
+ initial-config-primitive:
+ - seq: '1'
+ name: test
+ """
+
+ @pytest.mark.asyncio
+ async def test_charm_native(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ loop=event_loop,
+ )
+
+ while await self.running():
+ print("Waiting for test to finish...")
+ await asyncio.sleep(15)
+
+ print("test_charm_native stopped")
+
+ return 'ok'
diff --git a/tests/integration/test_charm_proxy.py b/tests/integration/test_charm_proxy.py
new file mode 100644
index 0000000..a05df5f
--- /dev/null
+++ b/tests/integration/test_charm_proxy.py
@@ -0,0 +1,142 @@
+"""
+Deploy a VNF with a proxy charm, executing an initial-config-primitive
+"""
+
+import asyncio
+import logging
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: charmproxy-ns
+ name: charmproxy-ns
+ short-name: charmproxy-ns
+ description: NS with 1 VNF connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: charmproxy-vnf
+ name: charmproxy-vnf
+ short-name: charmproxy-vnf
+ version: '1.0'
+ description: A VNF consisting of 1 VDUs w/proxy charm
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vdu-configuration:
+ juju:
+ charm: proxy-ci
+ proxy: true
+ initial-config-primitive:
+ - seq: '1'
+ name: test
+ """
+
+ # @pytest.mark.serial
+ @pytest.mark.asyncio
+ async def test_charm_proxy(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ event_loop,
+ )
+
+ while await self.running():
+ print("Waiting for test to finish...")
+ await asyncio.sleep(15)
+ logging.debug("test_charm_proxy stopped")
+
+ return 'ok'
diff --git a/tests/integration/test_metrics_native.py b/tests/integration/test_metrics_native.py
new file mode 100644
index 0000000..4288915
--- /dev/null
+++ b/tests/integration/test_metrics_native.py
@@ -0,0 +1,145 @@
+"""
+Deploy a VNF w/native charm that collects metrics
+"""
+import asyncio
+import logging
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: metricsnative-ns
+ name: metricsnative-ns
+ short-name: metricsnative-ns
+ description: NS with 1 VNFs metricsnative-vnf connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: metricsnative-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: metricsnative-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: metricsnative-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: metricsnative-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: metricsnative-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: metricsnative-vnf
+ name: metricsnative-vnf
+ short-name: metricsnative-vnf
+ version: '1.0'
+ description: A VNF consisting of 2 VDUs w/charms connected to an internal VL, and one VDU with cloud-init
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vnf-configuration:
+ juju:
+ charm: metrics-ci
+ proxy: false
+ config-primitive:
+ - name: touch
+ parameter:
+ - name: filename
+ data-type: STRING
+ default-value: '/home/ubuntu/touched'
+ """
+
+ # @pytest.mark.serial
+ @pytest.mark.asyncio
+ async def test_metrics_native(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ event_loop,
+ )
+
+ while await self.running():
+ print("Waiting for test to finish...")
+ await asyncio.sleep(15)
+
+ logging.debug("test_metrics_native stopped")
+
+ return 'ok'
diff --git a/tests/integration/test_metrics_proxy.py b/tests/integration/test_metrics_proxy.py
new file mode 100644
index 0000000..e7fa920
--- /dev/null
+++ b/tests/integration/test_metrics_proxy.py
@@ -0,0 +1,139 @@
+"""
+Deploy a VNF w/proxy charm that collects metrics
+"""
+import asyncio
+import logging
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: metricsproxy-ns
+ name: metricsproxy-ns
+ short-name: metricsproxy-ns
+ description: NS with 1 VNFs metricsproxy-vnf connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: metricsproxy-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: metricsproxy-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: metricsproxy-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: metricsproxy-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: metricsproxy-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: metricsproxy-vnf
+ name: metricsproxy-vnf
+ short-name: metricsproxy-vnf
+ version: '1.0'
+ description: A VNF consisting of 2 VDUs w/charms connected to an internal VL, and one VDU with cloud-init
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vnf-configuration:
+ juju:
+ charm: metrics-proxy-ci
+ proxy: true
+ """
+
+ # @pytest.mark.serial
+ @pytest.mark.asyncio
+ async def test_metrics_proxy(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ event_loop,
+ )
+
+ while await self.running():
+ print("Waiting for test to finish...")
+ await asyncio.sleep(15)
+
+ logging.debug("test_metrics_proxy stopped")
+
+ return 'ok'
diff --git a/tests/integration/test_multivdu_multicharm.py b/tests/integration/test_multivdu_multicharm.py
new file mode 100644
index 0000000..b879373
--- /dev/null
+++ b/tests/integration/test_multivdu_multicharm.py
@@ -0,0 +1,184 @@
+"""
+Deploy a multi-vdu, multi-charm VNF
+"""
+
+import asyncio
+import logging
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: multivdumulticharm-ns
+ name: multivdumulticharm-ns
+ short-name: multivdumulticharm-ns
+ description: NS with 1 VNF connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: multivdumulticharm-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: multivdumulticharm-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: multivdumulticharm-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: multivdumulticharm-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: multivdumulticharm-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: multivdumulticharm-vnf
+ name: multivdumulticharm-vnf
+ short-name: multivdumulticharm-vnf
+ version: '1.0'
+ description: A VNF consisting of 1 VDUs w/proxy charm
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vdu-configuration:
+ juju:
+ charm: proxy-ci
+ proxy: true
+ initial-config-primitive:
+ - seq: '1'
+ name: test
+ - id: dataVM
+ name: dataVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: dataVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: dataVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: dataVM-internal
+ internal-connection-point:
+ - id: dataVM-internal
+ name: dataVM-internal
+ short-name: dataVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vdu-configuration:
+ juju:
+ charm: proxy-ci
+ proxy: true
+ # Relation needs to map to the vdu providing or
+ # requiring, so that we can map to the deployed app.
+ relation:
+ - provides: dataVM:db
+ requires: mgmtVM:app
+ initial-config-primitive:
+ - seq: '1'
+ name: test
+
+ """
+
+ # @pytest.mark.serial
+ @pytest.mark.asyncio
+ async def test_multivdu_multicharm(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ event_loop,
+ )
+ vnf_index += 1
+
+ while await self.running():
+ logging.debug("Waiting for test to finish...")
+ await asyncio.sleep(15)
+ # assert False
+ logging.debug("test_multivdu_multicharm stopped")
+
+ return 'ok'
diff --git a/tests/integration/test_no_initial_config_primitive.py b/tests/integration/test_no_initial_config_primitive.py
new file mode 100644
index 0000000..0d90205
--- /dev/null
+++ b/tests/integration/test_no_initial_config_primitive.py
@@ -0,0 +1,142 @@
+"""
+Test N2VC when the VNF descriptor does not contain an initial-config-primitive.
+"""
+import asyncio
+import logging
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: noinitconfig-ns
+ name: noinitconfig-ns
+ short-name: noinitconfig-ns
+ description: NS with 1 VNFs noinitconfig-vnf connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: noinitconfig-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: noinitconfig-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: noinitconfig-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: noinitconfig-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: noinitconfig-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: noinitconfig-vnf
+ name: noinitconfig-vnf
+ short-name: noinitconfig-vnf
+ version: '1.0'
+ description: A VNF consisting of 2 VDUs w/charms connected to an internal VL, and one VDU with cloud-init
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vdu-configuration:
+ juju:
+ charm: native-ci
+ proxy: false
+ config-primitive:
+ - name: test
+
+ """
+
+ # @pytest.mark.serial
+ @pytest.mark.asyncio
+ async def test_charm_no_initial_config_primitive(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ event_loop,
+ )
+
+ while await self.running():
+ print("Waiting for test to finish...")
+ await asyncio.sleep(15)
+
+ logging.debug("test_charm_no_initial_config_primitive stopped")
+
+ return 'ok'
diff --git a/tests/integration/test_no_parameter.py b/tests/integration/test_no_parameter.py
new file mode 100644
index 0000000..55c2c3a
--- /dev/null
+++ b/tests/integration/test_no_parameter.py
@@ -0,0 +1,140 @@
+"""
+Describe what this test is meant to do.
+"""
+import asyncio
+import logging
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: noparam-ns
+ name: noparam-ns
+ short-name: noparam-ns
+ description: NS with 1 VNFs noparam-vnf connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: noparam-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: noparam-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: noparam-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: noparam-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: noparam-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: noparam-vnf
+ name: noparam-vnf
+ short-name: noparam-vnf
+ version: '1.0'
+ description: A VNF consisting of 2 VDUs w/charms connected to an internal VL, and one VDU with cloud-init
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vdu-configuration:
+ juju:
+ charm: native-ci
+ proxy: false
+ initial-config-primitive:
+ - seq: '1'
+ name: test
+ """
+
+ # @pytest.mark.serial
+ @pytest.mark.asyncio
+ async def test_charm_no_parameter(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+ logging.warning("event_loop: {}".format(event_loop))
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ event_loop,
+ )
+
+ while await self.running():
+ print("Waiting for test to finish...")
+ await asyncio.sleep(15)
+
+ return 'ok'
diff --git a/tests/integration/test_non_string_parameter.py b/tests/integration/test_non_string_parameter.py
new file mode 100644
index 0000000..b93dfed
--- /dev/null
+++ b/tests/integration/test_non_string_parameter.py
@@ -0,0 +1,147 @@
+"""
+Deploy a VNF with a non-string parameter passed to a primitive
+"""
+import asyncio
+import logging
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: charmnative-ns
+ name: charmnative-ns
+ short-name: charmnative-ns
+ description: NS with 1 VNFs charmnative-vnf connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: charmnative-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: charmnative-vnf
+ name: charmnative-vnf
+ short-name: charmnative-vnf
+ version: '1.0'
+ description: A VNF consisting of 2 VDUs w/charms connected to an internal VL, and one VDU with cloud-init
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: xenial
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: VIRTIO
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: VIRTIO
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ vdu-configuration:
+ juju:
+ charm: native-ci
+ proxy: false
+ initial-config-primitive:
+ - seq: '1'
+ name: test
+ - seq: '2'
+ name: testint
+ parameter:
+ - name: intval
+ data-type: INTEGER
+ value: 1
+ """
+
+ # @pytest.mark.serial
+ @pytest.mark.asyncio
+ async def test_charm_non_string_parameter(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ event_loop,
+ )
+
+ while await self.running():
+ print("Waiting for test to finish...")
+ await asyncio.sleep(15)
+ logging.debug("test_charm_non_string_parameter stopped")
+
+ return 'ok'
diff --git a/tests/integration/test_simplecharm.py b/tests/integration/test_simplecharm.py
new file mode 100644
index 0000000..7f4cafd
--- /dev/null
+++ b/tests/integration/test_simplecharm.py
@@ -0,0 +1,178 @@
+"""
+Exercise the simplecharm hackfest example:
+https://osm-download.etsi.org/ftp/osm-4.0-four/4th-hackfest/packages/hackfest_simplecharm_vnf.tar.gz
+"""
+
+import asyncio
+import logging
+import pytest
+from .. import base
+
+
+# @pytest.mark.serial
+class TestCharm(base.TestN2VC):
+
+ NSD_YAML = """
+ nsd:nsd-catalog:
+ nsd:
+ - id: charmproxy-ns
+ name: charmproxy-ns
+ short-name: charmproxy-ns
+ description: NS with 1 VNF connected by datanet and mgmtnet VLs
+ version: '1.0'
+ logo: osm.png
+ constituent-vnfd:
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index: '1'
+ vld:
+ - id: mgmtnet
+ name: mgmtnet
+ short-name: mgmtnet
+ type: ELAN
+ mgmt-network: 'true'
+ vim-network-name: mgmt
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-mgmt
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-mgmt
+ - id: datanet
+ name: datanet
+ short-name: datanet
+ type: ELAN
+ vnfd-connection-point-ref:
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '1'
+ vnfd-connection-point-ref: vnf-data
+ - vnfd-id-ref: charmproxy-vnf
+ member-vnf-index-ref: '2'
+ vnfd-connection-point-ref: vnf-data
+ """
+
+ VNFD_YAML = """
+ vnfd:vnfd-catalog:
+ vnfd:
+ - id: hackfest-simplecharm-vnf
+ name: hackfest-simplecharm-vnf
+ short-name: hackfest-simplecharm-vnf
+ version: '1.0'
+ description: A VNF consisting of 2 VDUs connected to an internal VL, and one VDU with cloud-init
+ logo: osm.png
+ connection-point:
+ - id: vnf-mgmt
+ name: vnf-mgmt
+ short-name: vnf-mgmt
+ type: VPORT
+ - id: vnf-data
+ name: vnf-data
+ short-name: vnf-data
+ type: VPORT
+ mgmt-interface:
+ cp: vnf-mgmt
+ internal-vld:
+ - id: internal
+ name: internal
+ short-name: internal
+ type: ELAN
+ internal-connection-point:
+ - id-ref: mgmtVM-internal
+ - id-ref: dataVM-internal
+ vdu:
+ - id: mgmtVM
+ name: mgmtVM
+ image: hackfest3-mgmt
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: mgmtVM-eth0
+ position: '1'
+ type: EXTERNAL
+ virtual-interface:
+ type: PARAVIRT
+ external-connection-point-ref: vnf-mgmt
+ - name: mgmtVM-eth1
+ position: '2'
+ type: INTERNAL
+ virtual-interface:
+ type: PARAVIRT
+ internal-connection-point-ref: mgmtVM-internal
+ internal-connection-point:
+ - id: mgmtVM-internal
+ name: mgmtVM-internal
+ short-name: mgmtVM-internal
+ type: VPORT
+ cloud-init-file: cloud-config.txt
+ - id: dataVM
+ name: dataVM
+ image: hackfest3-mgmt
+ count: '1'
+ vm-flavor:
+ vcpu-count: '1'
+ memory-mb: '1024'
+ storage-gb: '10'
+ interface:
+ - name: dataVM-eth0
+ position: '1'
+ type: INTERNAL
+ virtual-interface:
+ type: PARAVIRT
+ internal-connection-point-ref: dataVM-internal
+ - name: dataVM-xe0
+ position: '2'
+ type: EXTERNAL
+ virtual-interface:
+ type: PARAVIRT
+ external-connection-point-ref: vnf-data
+ internal-connection-point:
+ - id: dataVM-internal
+ name: dataVM-internal
+ short-name: dataVM-internal
+ type: VPORT
+ vnf-configuration:
+ juju:
+ charm: simple
+ proxy: true
+ initial-config-primitive:
+ - seq: '1'
+ name: touch
+ parameter:
+ - name: filename
+ value: '/home/ubuntu/first-touch'
+ config-primitive:
+ - name: touch
+ parameter:
+ - name: filename
+ data-type: STRING
+ default-value: '/home/ubuntu/touched'
+ """
+
+ # @pytest.mark.serial
+ @pytest.mark.asyncio
+ async def test_charm_proxy(self, event_loop):
+ """Deploy and execute the initial-config-primitive of a VNF."""
+
+ if self.nsd and self.vnfd:
+ vnf_index = 0
+
+ for config in self.get_config():
+ juju = config['juju']
+ charm = juju['charm']
+
+ await self.deploy(
+ vnf_index,
+ charm,
+ config,
+ event_loop,
+ )
+
+ while await self.running():
+ print("Waiting for test to finish...")
+ await asyncio.sleep(15)
+ logging.debug("test_charm_proxy stopped")
+
+ return 'ok'
diff --git a/tests/test_libjuju.py b/tests/test_libjuju.py
new file mode 100644
index 0000000..8adc202
--- /dev/null
+++ b/tests/test_libjuju.py
@@ -0,0 +1,18 @@
+# A simple test to verify we're using the right libjuju module
+from n2vc.vnf import N2VC # noqa: F401
+import sys
+
+
+def test_libjuju():
+ """Test the module import for our vendored version of libjuju.
+
+ Test and verify that the version of libjuju being imported by N2VC is our
+ vendored version, not one installed externally.
+ """
+ for name in sys.modules:
+ if name.startswith("juju"):
+ module = sys.modules[name]
+ if getattr(module, "__file__"):
+ assert module.__file__.find("N2VC/modules/libjuju/juju")
+
+ return
diff --git a/tests/test_lxd.py b/tests/test_lxd.py
new file mode 100644
index 0000000..f68fa3a
--- /dev/null
+++ b/tests/test_lxd.py
@@ -0,0 +1,96 @@
+"""
+This test exercises LXD, to make sure that we can:
+1. Create a container profile
+2. Launch a container with a profile
+3. Stop a container
+4. Destroy a container
+5. Delete a container profile
+
+"""
+import logging
+# import os
+import pytest
+from . import base
+import subprocess
+import shlex
+import tempfile
+
+
+@pytest.mark.asyncio
+async def test_lxd():
+
+ container = base.create_lxd_container(name="test-lxd")
+ assert container is not None
+
+ # Get the hostname of the container
+ hostname = container.name
+
+ # Delete the container
+ base.destroy_lxd_container(container)
+
+ # Verify the container is deleted
+ client = base.get_lxd_client()
+ assert client.containers.exists(hostname) is False
+
+
+@pytest.mark.asyncio
+async def test_lxd_ssh():
+
+ with tempfile.TemporaryDirectory() as tmp:
+ try:
+ # Create a temporary keypair
+ cmd = shlex.split(
+ "ssh-keygen -t rsa -b 4096 -N '' -f {}/id_lxd_rsa".format(
+ tmp,
+ )
+ )
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError as e:
+ logging.debug(e)
+ assert False
+
+ # Slurp the public key
+ public_key = None
+ with open("{}/id_lxd_rsa.pub".format(tmp), "r") as f:
+ public_key = f.read()
+
+ assert public_key is not None
+
+ # Create the container with the keypair injected via profile
+ container = base.create_lxd_container(
+ public_key=public_key,
+ name="test-lxd"
+ )
+ assert container is not None
+
+ # Get the hostname of the container
+ hostname = container.name
+
+ addresses = container.state().network['eth0']['addresses']
+ # The interface may have more than one address, but we only need
+ # the first one for testing purposes.
+ ipaddr = addresses[0]['address']
+
+ # Verify we can SSH into container
+ try:
+ cmd = shlex.split(
+ "ssh -i {}/id_lxd_rsa {} root@{} hostname".format(
+ tmp,
+ "-oStrictHostKeyChecking=no",
+ ipaddr,
+ )
+ )
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError as e:
+ logging.debug(e)
+ assert False
+
+ # Delete the container
+ base.destroy_lxd_container(container)
+
+ # Verify the container is deleted
+ client = base.get_lxd_client()
+ assert client.containers.exists(hostname) is False
+
+ # Verify the container profile is deleted
+ assert client.profiles.exists(hostname) is False
diff --git a/tests/test_model.py b/tests/test_model.py
new file mode 100644
index 0000000..ff164fa
--- /dev/null
+++ b/tests/test_model.py
@@ -0,0 +1,55 @@
+"""
+Test N2VC's ssh key generation
+"""
+import n2vc
+import pytest
+from . import base
+import uuid
+
+
+@pytest.mark.asyncio
+async def test_model_create():
+ """Test the creation of a new model."""
+ client = base.get_n2vc()
+
+ model_name = "test-{}".format(
+ uuid.uuid4().hex[-4:],
+ )
+
+ pytest.assume(await client.CreateNetworkService(model_name))
+ pytest.assume(await client.DestroyNetworkService(model_name))
+ pytest.assume(await client.logout())
+
+
+@pytest.mark.asyncio
+async def test_destroy_non_existing_network_service():
+ """Destroy a model that doesn't exist."""
+
+ client = base.get_n2vc()
+
+ model_name = "test-{}".format(
+ uuid.uuid4().hex[-4:],
+ )
+
+ with pytest.raises(n2vc.vnf.NetworkServiceDoesNotExist):
+ pytest.assume(await client.DestroyNetworkService(model_name))
+
+ pytest.assume(await client.logout())
+
+
+@pytest.mark.asyncio
+async def test_model_create_duplicate():
+ """Create a new model, and try to create the same model."""
+ client = base.get_n2vc()
+
+ model_name = "test-{}".format(
+ uuid.uuid4().hex[-4:],
+ )
+
+ # Try to recreate bug 628
+ for x in range(0, 1000):
+ model = await client.get_model(model_name)
+ pytest.assume(model)
+
+ pytest.assume(await client.DestroyNetworkService(model_name))
+ pytest.assume(await client.logout())
diff --git a/tests/test_ssh_keygen.py b/tests/test_ssh_keygen.py
new file mode 100644
index 0000000..3a129a3
--- /dev/null
+++ b/tests/test_ssh_keygen.py
@@ -0,0 +1,18 @@
+"""
+Test N2VC's ssh key generation
+"""
+import os
+import pytest
+from . import base
+import tempfile
+
+
+@pytest.mark.asyncio
+async def test_ssh_keygen(monkeypatch):
+ with tempfile.TemporaryDirectory() as tmpdirname:
+ monkeypatch.setitem(os.environ, "HOME", tmpdirname)
+
+ client = base.get_n2vc()
+
+ public_key = await client.GetPublicKey()
+ assert len(public_key)
diff --git a/tox.ini b/tox.ini
index 350a1fc..b353ce8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,7 +4,7 @@
# and then run "tox" from this directory.
[tox]
-envlist = lint,py3
+envlist = py3,lint,integration
skipsdist=True
[pytest]
@@ -15,52 +15,44 @@
basepython=python3
usedevelop=True
# for testing with other python versions
-commands = py.test --tb native -ra -v -s -n auto -k 'not integration' -m 'not serial' {posargs}
+commands = py.test --ignore modules/ --tb native -ra -v -s -n auto -k 'not integration' -m 'not serial' {posargs}
passenv =
HOME
- TEST_AGENTS
+ VCA_HOST
+ VCA_PORT
+ VCA_USER
+ VCA_SECRET
+ # These are needed so executing `charm build` succeeds
+ TERM
+ TERMINFO
deps =
- asynctest
- ipdb
mock
+ pyyaml
pytest
pytest-asyncio
pytest-xdist
- Twine
- # use fork to pick up fix for https://github.com/aaugustin/websockets/pull/528
- git+https://github.com/johnsca/websockets@bug/client-redirects#egg=websockets
+ pytest-assume
+ paramiko
+ pylxd
[testenv:py3]
-# default tox env excludes integration and serial tests
+# default tox env, excludes integration and serial tests
commands =
- # These need to be installed in a specific order
- pip install urllib3==1.22
- pip install pylxd
- py.test --tb native -ra -v -s -n auto -k 'not integration' -m 'not serial' {posargs}
+ pytest --ignore modules/ --tb native -ra -v -s -n auto -k 'not integration' -m 'not serial' {posargs}
[testenv:lint]
envdir = {toxworkdir}/py3
commands =
- flake8 --ignore E501,W504 {posargs} juju tests
+ flake8 --ignore E501,E402 --exclude tests/charms/builds,tests/charms/deps {posargs} n2vc tests
deps =
flake8
[testenv:integration]
envdir = {toxworkdir}/py3
-commands =
- # These need to be installed in a specific order
- pip install urllib3==1.22
- pip install pylxd
- py.test --tb native -ra -v -s -n auto -k 'integration' -m 'not serial' {posargs}
+commands = py.test --ignore modules/ --tb native -ra -v -s -n 1 -k 'integration' -m 'serial' {posargs}
-[testenv:serial]
-# tests that can't be run in parallel
-envdir = {toxworkdir}/py3
-commands = py.test --tb native -ra -v -s {posargs:-m 'serial'}
-
-[testenv:example]
-envdir = {toxworkdir}/py3
-commands = python {posargs}
-
-[flake8]
-exclude = juju/client/_*
+[testenv:build]
+deps =
+ stdeb
+ setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb