diff --git a/hackfest_virtual-pc_ns/README.md b/hackfest_virtual-pc_ns/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7c2c2af5bfc0cadb01d9b28eaf85fc90c213d023
--- /dev/null
+++ b/hackfest_virtual-pc_ns/README.md
@@ -0,0 +1,3 @@
+# Descriptor created by OSM descriptor package generated
+
+**Created on 02/18/2021, 05:51:56 **
\ No newline at end of file
diff --git a/hackfest_virtual-pc_ns/hackfest_virtual-pc_nsd.yaml b/hackfest_virtual-pc_ns/hackfest_virtual-pc_nsd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..124625188e463ad98cfd4341b4e5365a1738e308
--- /dev/null
+++ b/hackfest_virtual-pc_ns/hackfest_virtual-pc_nsd.yaml
@@ -0,0 +1,37 @@
+# Copyright 2019 ETSI OSM
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+nsd:
+ nsd:
+ - description: Virtual Desktop Computer with Xubuntu Desktop and RDP
+ designer: OSM
+ df:
+ - id: default-df
+ vnf-profile:
+ - id: '1'
+ virtual-link-connectivity:
+ - constituent-cpd-id:
+ - constituent-base-element-id: '1'
+ constituent-cpd-id: virtual-pc-mgmt-ext
+ virtual-link-profile-id: mgmtnet
+ vnfd-id: virtual-pc_vnfd
+ id: hackfest_virtual-pc_nsd
+ name: hackfest_virtual-pc_nsd
+ version: '1.0'
+ virtual-link-desc:
+ - id: mgmtnet
+ mgmt-network: 'true'
+ vnfd-id:
+ - virtual-pc_vnfd
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/.jujuignore b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/.jujuignore
new file mode 100644
index 0000000000000000000000000000000000000000..6ccd559eabeae93e4d23215fa450130fa9b37ace
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/.jujuignore
@@ -0,0 +1,3 @@
+/venv
+*.py[cod]
+*.charm
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/LICENSE b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/README.md b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..834c2b5b5b8b4b0a49a855bfb0c8af9d41b590f7
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/README.md
@@ -0,0 +1,55 @@
+# virtual-pc
+
+## Description
+
+
+## Usage
+
+### Prepare the environment
+
+```bash
+sudo snap install juju --classic --channel 2.8/stable
+sudo snap install lxd
+lxd.init
+juju bootstrap lxd
+juju add-model test-virtual-pc
+```
+
+### Deploy (from the Store)
+
+```bash
+juju deploy cs:~charmed-osm/virtual-pc --channel edge
+```
+
+### Deploy (locally)
+
+Build the charm:
+
+```bash
+virtualenv -p python3 venv
+source venv/bin/activate
+pip install -r requirements-dev.txt
+pip install charmcraft
+./venv/bin/charmcraft build
+```
+
+Deploy:
+
+```bash
+juju deploy ./virtual-pc.charm
+```
+
+## Developing
+
+Create and activate a virtualenv with the development requirements:
+
+ virtualenv -p python3 venv
+ source venv/bin/activate
+ pip install -r requirements-dev.txt
+
+## Testing
+
+The Python operator framework includes a very nice harness for testing
+operator behaviour without full deployment. Just `run_tests`:
+
+ ./run_tests
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/actions.yaml b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/actions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9a14be4d84b051a438fa574c9be45de53902e828
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/actions.yaml
@@ -0,0 +1,6 @@
+# Copyright 2020 ETSI OSM Contributors
+# See LICENSE file for licensing details.
+#
+# This is only an example, and you should edit to suit your needs.
+# If you don't need actions, you can remove the file entirely.
+# It ties in to the example _on_fortune_action handler in src/charm.py
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/config.yaml b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c2c67568178ac63cc757a0a68bb013fb1bfbd404
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/config.yaml
@@ -0,0 +1,6 @@
+# Copyright 2020 ETSI OSM Contributors
+# See LICENSE file for licensing details.
+#
+# This is only an example, and you should edit to suit your needs.
+# If you don't need config, you can remove the file entirely.
+options:
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/metadata.yaml b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d30a5b5d2ce0a8d867fe00ac7974a6133e4d50b
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/metadata.yaml
@@ -0,0 +1,9 @@
+# Copyright 2020 David Garcia
+# See LICENSE file for licensing details.
+name: virtual-pc
+description: |
+ TODO: fill out the charm's description
+summary: |
+ TODO: fill out the charm's summary
+series:
+ - focal
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/requirements-dev.txt b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/requirements-dev.txt
new file mode 100644
index 0000000000000000000000000000000000000000..34e6ef82473af5ec6fb293e11ef353d30ddcdca6
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/requirements-dev.txt
@@ -0,0 +1,3 @@
+-r requirements.txt
+flake8
+charmcraft
\ No newline at end of file
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/requirements.txt b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2d81d3bb6fea804d1db7a1549d67244b513aa145
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/requirements.txt
@@ -0,0 +1 @@
+ops
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/run_tests b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/run_tests
new file mode 100755
index 0000000000000000000000000000000000000000..08ec01e05a8965a5009902a6732adf13c027add6
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/run_tests
@@ -0,0 +1,16 @@
+#!/bin/sh -e
+# Copyright 2020 David Garcia
+# See LICENSE file for licensing details.
+
+if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then
+ . venv/bin/activate
+fi
+
+if [ -z "$PYTHONPATH" ]; then
+ export PYTHONPATH=src
+else
+ export PYTHONPATH="src:$PYTHONPATH"
+fi
+
+flake8
+python3 -m unittest -v "$@"
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/charm.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..e068304f72a6fcaa2fce64ea711685f5c64c1690
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/charm.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python3
+# Copyright 2020 David Garcia
+# See LICENSE file for licensing details.
+
+from apt.progress.base import InstallProgress
+import logging
+import os
+import shutil
+
+from jinja2 import Template
+from ops.charm import CharmBase
+from ops.framework import StoredState
+from ops.main import main
+from ops.model import (
+ MaintenanceStatus,
+ ActiveStatus,
+ # BlockedStatus,
+)
+from utils import (
+ service_stop,
+ service_restart,
+ install_apt,
+ shell,
+)
+
+
+# from typing import Dict, Any
+logger = logging.getLogger(__name__)
+
+APT_REQUIREMENTS = [
+ "firefox",
+ "mate-desktop", # 469 packages
+ "mate-applets",
+ "mate-applet-brisk-menu",
+ "mate-indicator-applet",
+ "mate-session-manager",
+ "mate-terminal",
+ "xrdp",
+]
+SNAP_INSTALLS = [
+ "code --classic",
+]
+POLKIT_TEMPLATE = "./templates/color.pkla"
+POLKIT_PATH = "/etc/polkit-1/localauthority/50-local.d/color.pkla"
+STARTWM_TEMPLATE = "./templates/startwm.sh"
+STARTWM_PATH = "/etc/xrdp/startwm.sh"
+# WM_COMMAND = "startxfce4" # xubuntu-desktop
+# WM_COMMAND = "budgie-desktop" # budgie-desktop-environment
+WM_COMMAND = "mate-session" # mate-desktop
+
+
+class VirtualPCCharm(CharmBase, InstallProgress):
+ _stored = StoredState()
+
+ def __init__(self, *args):
+ super().__init__(*args)
+ InstallProgress.__init__(self)
+
+ self._stored.set_default()
+
+ # Basic hooks
+ self.framework.observe(self.on.install, self._on_install)
+ self.framework.observe(self.on.start, self._on_start)
+ self.framework.observe(self.on.stop, self._on_stop)
+ self.framework.observe(self.on.config_changed, self._on_config_changed)
+ self.framework.observe(self.on.update_status, self._on_update_status)
+
+ # Actions hooks
+
+ # Relations hooks
+
+ # Override InstallProgress to update our status
+ def status_change(self, pkg, percent, status):
+ message = str(int(percent)) + "% " + status
+ self.unit.status = MaintenanceStatus(message)
+
+ # Basic hooks
+ def _on_install(self, _):
+ self.unit.status = MaintenanceStatus("Installing apt packages")
+ install_apt(packages=APT_REQUIREMENTS, update=True, progress=self)
+ service_stop('xrdp')
+
+ self.unit.status = MaintenanceStatus("Installing snaps")
+ for snap in SNAP_INSTALLS:
+ shell("sudo snap install " + snap)
+
+ self.unit.status = MaintenanceStatus("Setting default display manager")
+ shell("echo /usr/sbin/lightdm | sudo tee /etc/X11/default-display-manager")
+
+ self.unit.status = MaintenanceStatus("Adding XRDP to ssl-cert group")
+ shell("sudo adduser xrdp ssl-cert")
+
+ self.unit.status = MaintenanceStatus("Generating Window Manager startup script")
+ with open(STARTWM_TEMPLATE, "r") as template:
+ content = Template(template.read()).render(command=WM_COMMAND)
+ with open(STARTWM_PATH, "w") as startwm:
+ startwm.write(content)
+
+ self.unit.status = MaintenanceStatus("Generating Polkit files")
+ with open(POLKIT_TEMPLATE, "r") as template:
+ content = Template(template.read()).render()
+ with open(POLKIT_PATH, "w") as polkit:
+ polkit.write(content)
+
+ self._stored.installed = True
+
+ def _on_start(self, _):
+ self.unit.status = MaintenanceStatus("Starting XRDP server")
+ service_restart('xrdp')
+ self._stored.started = True
+ self.unit.status = self._get_current_status()
+
+ def _on_stop(self, _):
+ service_stop('xrdp')
+ self._stored.started = False
+ self.unit.status = self._get_current_status()
+
+ def _on_config_changed(self, _):
+ self.unit.status = self._get_current_status()
+
+ def _on_update_status(self, _):
+ self.unit.status = self._get_current_status()
+
+ # Action hooks
+ # Relation hooks
+
+ # Private functions
+ def _get_current_status(self):
+ status_type = ActiveStatus
+ status_msg = ""
+ if self._stored.installed:
+ status_msg = "Ready"
+ return status_type(status_msg)
+
+
+if __name__ == "__main__":
+ main(VirtualPCCharm)
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/test.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4cb4f758d048e240819dc2dad33ae5d5519e1bc
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/test.py
@@ -0,0 +1,17 @@
+from apt.progress.base import InstallProgress
+
+from utils import (
+ install_apt,
+)
+
+
+class Progress(InstallProgress):
+
+ def status_change(self, pkg, percent, status):
+ print("status change\n")
+ #print(str(int(percent)) + "% \n")
+ True
+
+
+if __name__ == "__main__":
+ install_apt(packages=["mate-backgrounds"], update=True, progress=Progress())
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/utils.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8dce1bf65ca03fc7c4a0c6f4f781f4d155966b4
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/src/utils.py
@@ -0,0 +1,78 @@
+import apt
+from apt.progress.base import OpProgress
+import shutil
+import subprocess
+from typing import Dict, List, NoReturn
+
+
+def service_active(service_name: str):
+ result = subprocess.run(
+ ["systemctl", "is-active", service_name],
+ stdout=subprocess.PIPE,
+ encoding="utf-8",
+ )
+ return result.stdout == "active\n"
+
+
+def all_values_set(dictionary: Dict[str, str]) -> bool:
+ return not any(v is None for v in dictionary.values())
+
+
+def install_apt(packages: List, update: bool = False, progress=None) -> NoReturn:
+
+ cache = apt.cache.Cache()
+ if update:
+ cache.update()
+ cache.open()
+ for package in packages:
+ pkg = cache[package]
+ if not pkg.is_installed:
+ pkg.mark_install()
+ cache.commit(install_progress=progress)
+
+
+def remove_apt(packages: List, update: bool = False) -> NoReturn:
+ cache = apt.cache.Cache()
+ if update:
+ cache.update()
+ cache.open()
+ for package in packages:
+ pkg = cache[package]
+ if not pkg.is_installed:
+ pkg.mark_delete()
+ cache.commit()
+
+
+def shell(command: str) -> NoReturn:
+ subprocess.run(command, shell=True).check_returncode()
+
+
+def copy_files(origin: Dict[str, str], destination: Dict[str, str]) -> NoReturn:
+ for config, origin_path in origin.items():
+ destination_path = destination[config]
+ shutil.copy(origin_path, destination_path)
+
+
+# Service functions
+def _systemctl(action: str, service_name: str) -> NoReturn:
+ subprocess.run(["systemctl", action, service_name]).check_returncode()
+
+
+def service_start(service_name: str) -> NoReturn:
+ _systemctl("start", service_name)
+
+
+def service_restart(service_name: str) -> NoReturn:
+ _systemctl("restart", service_name)
+
+
+def service_stop(service_name: str) -> NoReturn:
+ _systemctl("stop", service_name)
+
+
+def service_enable(service_name: str) -> NoReturn:
+ _systemctl("enable", service_name)
+
+
+def systemctl_daemon_reload():
+ subprocess.run(["systemctl", "daemon-reload"]).check_returncode()
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/templates/color.pkla b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/templates/color.pkla
new file mode 100644
index 0000000000000000000000000000000000000000..c8b94041bdd63ede347565cbdbd091c8be1d89ec
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/templates/color.pkla
@@ -0,0 +1,6 @@
+[Allow colord for all users]
+Identity=unix-user:*
+Action=org.freedesktop.color-manager.create-device;org.freedesktop.color-manager.create-profile;org.freedesktop.color-manager.delete-device;org.freedesktop.color-manager.delete-profile;org.freedesktop.color-manager.modify-device;org.freedesktop.color-manager.modify-profile
+ResultAny=yes
+ResultInactive=yes
+ResultActive=yes
\ No newline at end of file
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/templates/startwm.sh b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/templates/startwm.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0f3bdb211420745ce7b4413895c23c621087541a
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/templates/startwm.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+# xrdp X session start script (c) 2015, 2017 mirabilos
+# published under The MirOS Licence
+
+if test -r /etc/profile; then
+ . /etc/profile
+fi
+
+if test -r /etc/default/locale; then
+ . /etc/default/locale
+ test -z "${LANG+x}" || export LANG
+ test -z "${LANGUAGE+x}" || export LANGUAGE
+ test -z "${LC_ADDRESS+x}" || export LC_ADDRESS
+ test -z "${LC_ALL+x}" || export LC_ALL
+ test -z "${LC_COLLATE+x}" || export LC_COLLATE
+ test -z "${LC_CTYPE+x}" || export LC_CTYPE
+ test -z "${LC_IDENTIFICATION+x}" || export LC_IDENTIFICATION
+ test -z "${LC_MEASUREMENT+x}" || export LC_MEASUREMENT
+ test -z "${LC_MESSAGES+x}" || export LC_MESSAGES
+ test -z "${LC_MONETARY+x}" || export LC_MONETARY
+ test -z "${LC_NAME+x}" || export LC_NAME
+ test -z "${LC_NUMERIC+x}" || export LC_NUMERIC
+ test -z "${LC_PAPER+x}" || export LC_PAPER
+ test -z "${LC_TELEPHONE+x}" || export LC_TELEPHONE
+ test -z "${LC_TIME+x}" || export LC_TIME
+ test -z "${LOCPATH+x}" || export LOCPATH
+fi
+
+if test -r /etc/profile; then
+ . /etc/profile
+fi
+
+{{ command }}
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/tests/__init__.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/tests/test_charm.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/tests/test_charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d456ec7fe3ff4b8033ad910c4aa4f4c57f8c873
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc-src/tests/test_charm.py
@@ -0,0 +1,35 @@
+# Copyright 2020 David Garcia
+# See LICENSE file for licensing details.
+
+import unittest
+from unittest.mock import Mock
+
+from ops.testing import Harness
+from charm import SrsLteCharm
+
+
+class TestCharm(unittest.TestCase):
+ def test_config_changed(self):
+ harness = Harness(SrsLteCharm)
+ self.addCleanup(harness.cleanup)
+ harness.begin()
+ self.assertEqual(list(harness.charm._stored.things), [])
+ harness.update_config({"thing": "foo"})
+ self.assertEqual(list(harness.charm._stored.things), ["foo"])
+
+ def test_action(self):
+ harness = Harness(SrsLteCharm)
+ harness.begin()
+ # the harness doesn't (yet!) help much with actions themselves
+ action_event = Mock(params={"fail": ""})
+ harness.charm._on_fortune_action(action_event)
+
+ self.assertTrue(action_event.set_results.called)
+
+ def test_action_fail(self):
+ harness = Harness(SrsLteCharm)
+ harness.begin()
+ action_event = Mock(params={"fail": "fail this"})
+ harness.charm._on_fortune_action(action_event)
+
+ self.assertEqual(action_event.fail.call_args, [("fail this",)])
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/LICENSE b/hackfest_virtual-pc_vnfd/charms/virtual-pc/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/README.md b/hackfest_virtual-pc_vnfd/charms/virtual-pc/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..834c2b5b5b8b4b0a49a855bfb0c8af9d41b590f7
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/README.md
@@ -0,0 +1,55 @@
+# virtual-pc
+
+## Description
+
+
+## Usage
+
+### Prepare the environment
+
+```bash
+sudo snap install juju --classic --channel 2.8/stable
+sudo snap install lxd
+lxd.init
+juju bootstrap lxd
+juju add-model test-virtual-pc
+```
+
+### Deploy (from the Store)
+
+```bash
+juju deploy cs:~charmed-osm/virtual-pc --channel edge
+```
+
+### Deploy (locally)
+
+Build the charm:
+
+```bash
+virtualenv -p python3 venv
+source venv/bin/activate
+pip install -r requirements-dev.txt
+pip install charmcraft
+./venv/bin/charmcraft build
+```
+
+Deploy:
+
+```bash
+juju deploy ./virtual-pc.charm
+```
+
+## Developing
+
+Create and activate a virtualenv with the development requirements:
+
+ virtualenv -p python3 venv
+ source venv/bin/activate
+ pip install -r requirements-dev.txt
+
+## Testing
+
+The Python operator framework includes a very nice harness for testing
+operator behaviour without full deployment. Just `run_tests`:
+
+ ./run_tests
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/actions.yaml b/hackfest_virtual-pc_vnfd/charms/virtual-pc/actions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9a14be4d84b051a438fa574c9be45de53902e828
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/actions.yaml
@@ -0,0 +1,6 @@
+# Copyright 2020 ETSI OSM Contributors
+# See LICENSE file for licensing details.
+#
+# This is only an example, and you should edit to suit your needs.
+# If you don't need actions, you can remove the file entirely.
+# It ties in to the example _on_fortune_action handler in src/charm.py
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/config.yaml b/hackfest_virtual-pc_vnfd/charms/virtual-pc/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c2c67568178ac63cc757a0a68bb013fb1bfbd404
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/config.yaml
@@ -0,0 +1,6 @@
+# Copyright 2020 ETSI OSM Contributors
+# See LICENSE file for licensing details.
+#
+# This is only an example, and you should edit to suit your needs.
+# If you don't need config, you can remove the file entirely.
+options:
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/dispatch b/hackfest_virtual-pc_vnfd/charms/virtual-pc/dispatch
new file mode 100755
index 0000000000000000000000000000000000000000..fe31c0567bdce62a6542a6470997cb6a874e4bd8
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/dispatch
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/hooks/install b/hackfest_virtual-pc_vnfd/charms/virtual-pc/hooks/install
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/hooks/install
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/hooks/start b/hackfest_virtual-pc_vnfd/charms/virtual-pc/hooks/start
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/hooks/start
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/hooks/upgrade-charm b/hackfest_virtual-pc_vnfd/charms/virtual-pc/hooks/upgrade-charm
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/hooks/upgrade-charm
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/metadata.yaml b/hackfest_virtual-pc_vnfd/charms/virtual-pc/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d30a5b5d2ce0a8d867fe00ac7974a6133e4d50b
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/metadata.yaml
@@ -0,0 +1,9 @@
+# Copyright 2020 David Garcia
+# See LICENSE file for licensing details.
+name: virtual-pc
+description: |
+ TODO: fill out the charm's description
+summary: |
+ TODO: fill out the charm's summary
+series:
+ - focal
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/requirements-dev.txt b/hackfest_virtual-pc_vnfd/charms/virtual-pc/requirements-dev.txt
new file mode 100644
index 0000000000000000000000000000000000000000..34e6ef82473af5ec6fb293e11ef353d30ddcdca6
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/requirements-dev.txt
@@ -0,0 +1,3 @@
+-r requirements.txt
+flake8
+charmcraft
\ No newline at end of file
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/requirements.txt b/hackfest_virtual-pc_vnfd/charms/virtual-pc/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2d81d3bb6fea804d1db7a1549d67244b513aa145
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/requirements.txt
@@ -0,0 +1 @@
+ops
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/run_tests b/hackfest_virtual-pc_vnfd/charms/virtual-pc/run_tests
new file mode 100755
index 0000000000000000000000000000000000000000..08ec01e05a8965a5009902a6732adf13c027add6
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/run_tests
@@ -0,0 +1,16 @@
+#!/bin/sh -e
+# Copyright 2020 David Garcia
+# See LICENSE file for licensing details.
+
+if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then
+ . venv/bin/activate
+fi
+
+if [ -z "$PYTHONPATH" ]; then
+ export PYTHONPATH=src
+else
+ export PYTHONPATH="src:$PYTHONPATH"
+fi
+
+flake8
+python3 -m unittest -v "$@"
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/charm.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..1d72001264ceaa7b563b4ac82650d79669157e45
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/charm.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python3
+# Copyright 2020 David Garcia
+# See LICENSE file for licensing details.
+
+from apt.progress.base import InstallProgress
+import logging
+import os
+import shutil
+
+from jinja2 import Template
+from ops.charm import CharmBase
+from ops.framework import StoredState
+from ops.main import main
+from ops.model import (
+ MaintenanceStatus,
+ ActiveStatus,
+ # BlockedStatus,
+)
+from utils import (
+ service_stop,
+ service_restart,
+ install_apt,
+ shell,
+)
+
+
+# from typing import Dict, Any
+logger = logging.getLogger(__name__)
+
+APT_REQUIREMENTS = [
+ "firefox",
+ "mate-desktop", # 469 packages
+ "mate-applets",
+ "mate-applet-brisk-menu",
+ "mate-indicator-applet",
+ "mate-session-manager",
+ "indicator-applet-session",
+ "mate-terminal",
+ "xrdp",
+]
+SNAP_INSTALLS = [
+ "code --classic",
+]
+POLKIT_TEMPLATE = "./templates/color.pkla"
+POLKIT_PATH = "/etc/polkit-1/localauthority/50-local.d/color.pkla"
+STARTWM_TEMPLATE = "./templates/startwm.sh"
+STARTWM_PATH = "/etc/xrdp/startwm.sh"
+# WM_COMMAND = "startxfce4" # xubuntu-desktop
+# WM_COMMAND = "budgie-desktop" # budgie-desktop-environment
+WM_COMMAND = "mate-session" # mate-desktop
+
+
+class VirtualPCCharm(CharmBase, InstallProgress):
+ _stored = StoredState()
+
+ def __init__(self, *args):
+ super().__init__(*args)
+ InstallProgress.__init__(self)
+
+ self._stored.set_default()
+
+ # Basic hooks
+ self.framework.observe(self.on.install, self._on_install)
+ self.framework.observe(self.on.start, self._on_start)
+ self.framework.observe(self.on.stop, self._on_stop)
+ self.framework.observe(self.on.config_changed, self._on_config_changed)
+ self.framework.observe(self.on.update_status, self._on_update_status)
+
+ # Actions hooks
+
+ # Relations hooks
+
+ # Override InstallProgress to update our status
+ def status_change(self, pkg, percent, status):
+ message = str(int(percent)) + "% " + status
+ self.unit.status = MaintenanceStatus(message)
+
+ # Basic hooks
+ def _on_install(self, _):
+ self.unit.status = MaintenanceStatus("Installing apt packages")
+ install_apt(packages=APT_REQUIREMENTS, update=True, progress=self)
+ service_stop('xrdp')
+
+ self.unit.status = MaintenanceStatus("Installing snaps")
+ for snap in SNAP_INSTALLS:
+ shell("sudo snap install " + snap)
+
+ self.unit.status = MaintenanceStatus("Setting default display manager")
+ shell("echo /usr/sbin/lightdm | sudo tee /etc/X11/default-display-manager")
+
+ self.unit.status = MaintenanceStatus("Adding XRDP to ssl-cert group")
+ shell("sudo adduser xrdp ssl-cert")
+
+ self.unit.status = MaintenanceStatus("Generating Window Manager startup script")
+ with open(STARTWM_TEMPLATE, "r") as template:
+ content = Template(template.read()).render(command=WM_COMMAND)
+ with open(STARTWM_PATH, "w") as startwm:
+ startwm.write(content)
+
+ self.unit.status = MaintenanceStatus("Generating Polkit files")
+ with open(POLKIT_TEMPLATE, "r") as template:
+ content = Template(template.read()).render()
+ with open(POLKIT_PATH, "w") as polkit:
+ polkit.write(content)
+
+ self._stored.installed = True
+
+ def _on_start(self, _):
+ self.unit.status = MaintenanceStatus("Starting XRDP server")
+ service_restart('xrdp')
+ self._stored.started = True
+ self.unit.status = self._get_current_status()
+
+ def _on_stop(self, _):
+ service_stop('xrdp')
+ self._stored.started = False
+ self.unit.status = self._get_current_status()
+
+ def _on_config_changed(self, _):
+ self.unit.status = self._get_current_status()
+
+ def _on_update_status(self, _):
+ self.unit.status = self._get_current_status()
+
+ # Action hooks
+ # Relation hooks
+
+ # Private functions
+ def _get_current_status(self):
+ status_type = ActiveStatus
+ status_msg = ""
+ if self._stored.installed:
+ status_msg = "Ready"
+ return status_type(status_msg)
+
+
+if __name__ == "__main__":
+ main(VirtualPCCharm)
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/test.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4cb4f758d048e240819dc2dad33ae5d5519e1bc
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/test.py
@@ -0,0 +1,17 @@
+from apt.progress.base import InstallProgress
+
+from utils import (
+ install_apt,
+)
+
+
+class Progress(InstallProgress):
+
+ def status_change(self, pkg, percent, status):
+ print("status change\n")
+ #print(str(int(percent)) + "% \n")
+ True
+
+
+if __name__ == "__main__":
+ install_apt(packages=["mate-backgrounds"], update=True, progress=Progress())
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/utils.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8dce1bf65ca03fc7c4a0c6f4f781f4d155966b4
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/src/utils.py
@@ -0,0 +1,78 @@
+import apt
+from apt.progress.base import OpProgress
+import shutil
+import subprocess
+from typing import Dict, List, NoReturn
+
+
+def service_active(service_name: str):
+ result = subprocess.run(
+ ["systemctl", "is-active", service_name],
+ stdout=subprocess.PIPE,
+ encoding="utf-8",
+ )
+ return result.stdout == "active\n"
+
+
+def all_values_set(dictionary: Dict[str, str]) -> bool:
+ return not any(v is None for v in dictionary.values())
+
+
+def install_apt(packages: List, update: bool = False, progress=None) -> NoReturn:
+
+ cache = apt.cache.Cache()
+ if update:
+ cache.update()
+ cache.open()
+ for package in packages:
+ pkg = cache[package]
+ if not pkg.is_installed:
+ pkg.mark_install()
+ cache.commit(install_progress=progress)
+
+
+def remove_apt(packages: List, update: bool = False) -> NoReturn:
+ cache = apt.cache.Cache()
+ if update:
+ cache.update()
+ cache.open()
+ for package in packages:
+ pkg = cache[package]
+ if not pkg.is_installed:
+ pkg.mark_delete()
+ cache.commit()
+
+
+def shell(command: str) -> NoReturn:
+ subprocess.run(command, shell=True).check_returncode()
+
+
+def copy_files(origin: Dict[str, str], destination: Dict[str, str]) -> NoReturn:
+ for config, origin_path in origin.items():
+ destination_path = destination[config]
+ shutil.copy(origin_path, destination_path)
+
+
+# Service functions
+def _systemctl(action: str, service_name: str) -> NoReturn:
+ subprocess.run(["systemctl", action, service_name]).check_returncode()
+
+
+def service_start(service_name: str) -> NoReturn:
+ _systemctl("start", service_name)
+
+
+def service_restart(service_name: str) -> NoReturn:
+ _systemctl("restart", service_name)
+
+
+def service_stop(service_name: str) -> NoReturn:
+ _systemctl("stop", service_name)
+
+
+def service_enable(service_name: str) -> NoReturn:
+ _systemctl("enable", service_name)
+
+
+def systemctl_daemon_reload():
+ subprocess.run(["systemctl", "daemon-reload"]).check_returncode()
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/templates/color.pkla b/hackfest_virtual-pc_vnfd/charms/virtual-pc/templates/color.pkla
new file mode 100644
index 0000000000000000000000000000000000000000..c8b94041bdd63ede347565cbdbd091c8be1d89ec
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/templates/color.pkla
@@ -0,0 +1,6 @@
+[Allow colord for all users]
+Identity=unix-user:*
+Action=org.freedesktop.color-manager.create-device;org.freedesktop.color-manager.create-profile;org.freedesktop.color-manager.delete-device;org.freedesktop.color-manager.delete-profile;org.freedesktop.color-manager.modify-device;org.freedesktop.color-manager.modify-profile
+ResultAny=yes
+ResultInactive=yes
+ResultActive=yes
\ No newline at end of file
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/templates/startwm.sh b/hackfest_virtual-pc_vnfd/charms/virtual-pc/templates/startwm.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0f3bdb211420745ce7b4413895c23c621087541a
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/templates/startwm.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+# xrdp X session start script (c) 2015, 2017 mirabilos
+# published under The MirOS Licence
+
+if test -r /etc/profile; then
+ . /etc/profile
+fi
+
+if test -r /etc/default/locale; then
+ . /etc/default/locale
+ test -z "${LANG+x}" || export LANG
+ test -z "${LANGUAGE+x}" || export LANGUAGE
+ test -z "${LC_ADDRESS+x}" || export LC_ADDRESS
+ test -z "${LC_ALL+x}" || export LC_ALL
+ test -z "${LC_COLLATE+x}" || export LC_COLLATE
+ test -z "${LC_CTYPE+x}" || export LC_CTYPE
+ test -z "${LC_IDENTIFICATION+x}" || export LC_IDENTIFICATION
+ test -z "${LC_MEASUREMENT+x}" || export LC_MEASUREMENT
+ test -z "${LC_MESSAGES+x}" || export LC_MESSAGES
+ test -z "${LC_MONETARY+x}" || export LC_MONETARY
+ test -z "${LC_NAME+x}" || export LC_NAME
+ test -z "${LC_NUMERIC+x}" || export LC_NUMERIC
+ test -z "${LC_PAPER+x}" || export LC_PAPER
+ test -z "${LC_TELEPHONE+x}" || export LC_TELEPHONE
+ test -z "${LC_TIME+x}" || export LC_TIME
+ test -z "${LOCPATH+x}" || export LOCPATH
+fi
+
+if test -r /etc/profile; then
+ . /etc/profile
+fi
+
+{{ command }}
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/tests/__init__.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/tests/test_charm.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/tests/test_charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d456ec7fe3ff4b8033ad910c4aa4f4c57f8c873
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/tests/test_charm.py
@@ -0,0 +1,35 @@
+# Copyright 2020 David Garcia
+# See LICENSE file for licensing details.
+
+import unittest
+from unittest.mock import Mock
+
+from ops.testing import Harness
+from charm import SrsLteCharm
+
+
+class TestCharm(unittest.TestCase):
+ def test_config_changed(self):
+ harness = Harness(SrsLteCharm)
+ self.addCleanup(harness.cleanup)
+ harness.begin()
+ self.assertEqual(list(harness.charm._stored.things), [])
+ harness.update_config({"thing": "foo"})
+ self.assertEqual(list(harness.charm._stored.things), ["foo"])
+
+ def test_action(self):
+ harness = Harness(SrsLteCharm)
+ harness.begin()
+ # the harness doesn't (yet!) help much with actions themselves
+ action_event = Mock(params={"fail": ""})
+ harness.charm._on_fortune_action(action_event)
+
+ self.assertTrue(action_event.set_results.called)
+
+ def test_action_fail(self):
+ harness = Harness(SrsLteCharm)
+ harness.begin()
+ action_event = Mock(params={"fail": "fail this"})
+ harness.charm._on_fortune_action(action_event)
+
+ self.assertEqual(action_event.fail.call_args, [("fail this",)])
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/INSTALLER b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/LICENSE b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..2f1b8e15e5627d92f0521605c9870bc8e5505cb4
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2017-2021 Ingy döt Net
+Copyright (c) 2006-2016 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/METADATA b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..565f05b73714eb85d96beb669a1aa42920c21c3a
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/METADATA
@@ -0,0 +1,46 @@
+Metadata-Version: 2.1
+Name: PyYAML
+Version: 5.4.1
+Summary: YAML parser and emitter for Python
+Home-page: https://pyyaml.org/
+Author: Kirill Simonov
+Author-email: xi@resolvent.net
+License: MIT
+Download-URL: https://pypi.org/project/PyYAML/
+Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues
+Project-URL: CI, https://github.com/yaml/pyyaml/actions
+Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation
+Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core
+Project-URL: Source Code, https://github.com/yaml/pyyaml
+Platform: Any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Cython
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*
+
+YAML is a data serialization format designed for human readability
+and interaction with scripting languages. PyYAML is a YAML parser
+and emitter for Python.
+
+PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
+support, capable extension API, and sensible error messages. PyYAML
+supports standard YAML tags and provides Python-specific tags that
+allow to represent an arbitrary Python object.
+
+PyYAML is applicable for a broad range of tasks from complex
+configuration files to object serialization and persistence.
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/RECORD b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..68ca4da2c4df950a57ff47f60fbcacbb5256d161
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/RECORD
@@ -0,0 +1,43 @@
+PyYAML-5.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+PyYAML-5.4.1.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101
+PyYAML-5.4.1.dist-info/METADATA,sha256=XnrM5LY-uS85ica26gKUK0dGG-xmPjmGfDTSLpIHQFk,2087
+PyYAML-5.4.1.dist-info/RECORD,,
+PyYAML-5.4.1.dist-info/WHEEL,sha256=Dh4w5P6PPWbqyqoE6MHlzbFQwZXlM-voWJDf2WUsS2g,108
+PyYAML-5.4.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11
+_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402
+_yaml/__pycache__/__init__.cpython-38.pyc,,
+yaml/__init__.py,sha256=gfp2CbRVhzknghkiiJD2l6Z0pI-mv_iZHPSJ4aj0-nY,13170
+yaml/__pycache__/__init__.cpython-38.pyc,sha256=n0YyVkjiOLmcjlR2NXE5TIZf7Z2clZ6sqQ5KlyuTWSI,11845
+yaml/__pycache__/composer.cpython-38.pyc,sha256=OVPzAKAvC2-Tiv4HUwUUG9JHCzG17nvsRQcFTCtY9xs,3563
+yaml/__pycache__/constructor.cpython-38.pyc,sha256=EXPDY7Prtv3F6QbXiJc5F4BvJQyCCGRu83WF4u6X7Vo,20822
+yaml/__pycache__/cyaml.cpython-38.pyc,sha256=wI01UFU-WhUcdnnczL5QpKu0ZNQTttSzXbleIvIfcvM,3411
+yaml/__pycache__/dumper.cpython-38.pyc,sha256=9wIctrlMpF4ksMWuCc5QAyZSenGiRVyrtU-1pAfj54U,1823
+yaml/__pycache__/emitter.cpython-38.pyc,sha256=kd_QGJd0GjpfgQPN9DlG_7HwKfJnJ24JxtdiUOxM9iE,25353
+yaml/__pycache__/error.cpython-38.pyc,sha256=j6mkXgDmzV0y0lo6FeUrvZL2vHN6Vkc52k0_R0oOn6g,2300
+yaml/__pycache__/events.cpython-38.pyc,sha256=NFsoAO36pPL_uxoCO-xRxKndQ3vx47mkStOYjfoQVZ8,3974
+yaml/__pycache__/loader.cpython-38.pyc,sha256=lEMB2brjPrfMjXXTJpCEx6-ct4eI6LYovD4hW5ZuGsw,2164
+yaml/__pycache__/nodes.cpython-38.pyc,sha256=Kkxh_oL04gQg-YFWwnfjpIoYspsXO4GEqKTr3NbxOD8,1725
+yaml/__pycache__/parser.cpython-38.pyc,sha256=0R9Qx0cBMUoOLzMOWeXCyXsC4S4KJ7oPHdmTVPQ4FbQ,11924
+yaml/__pycache__/reader.cpython-38.pyc,sha256=ZpOMJ6rZDc8EWffI4vZR_Fhcu3WmhgT_GAkDrKkEtPo,4537
+yaml/__pycache__/representer.cpython-38.pyc,sha256=tR9wWffCThWXwQe47uYFdHg2bCkqNjBcwmG7RSHmWS4,10069
+yaml/__pycache__/resolver.cpython-38.pyc,sha256=zsLBuCKn8KAJPVGo5J_xZSytifJktdTtkUNnltOt__I,5498
+yaml/__pycache__/scanner.cpython-38.pyc,sha256=N8ubxRd6bZBjoRna6CU8wK1Imb_7TWOsudzPh9JDDkQ,25269
+yaml/__pycache__/serializer.cpython-38.pyc,sha256=9JDH7ONP5zFlep0f2yNWRoOSZr5Y28jL012O1EIbuug,3320
+yaml/__pycache__/tokens.cpython-38.pyc,sha256=haBW6UBDhVFog2xIe63OkrAP_9JRFyNKCROFPRJiyu0,4935
+yaml/_yaml.cpython-38-x86_64-linux-gnu.so,sha256=fxjEXaSdzion1SMwhu9Ikx-JOVNtcl6KvW_pyGBt-cU,2342916
+yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883
+yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639
+yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851
+yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837
+yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006
+yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533
+yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445
+yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061
+yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440
+yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495
+yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794
+yaml/representer.py,sha256=82UM3ZxUQKqsKAF4ltWOxCS6jGPIFtXpGs7mvqyv4Xs,14184
+yaml/resolver.py,sha256=Z1W8AOMA6Proy4gIO2OhUO4IPS_bFNAl0Ca3rwChpPg,8999
+yaml/scanner.py,sha256=KeQIKGNlSyPE8QDwionHxy9CgbqE5teJEz05FR9-nAg,51277
+yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165
+yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/WHEEL b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..69d594f055a5127401ebe017f8837cef4c76c020
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: false
+Tag: cp38-cp38-manylinux1_x86_64
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/top_level.txt b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e6475e911f628412049bc4090d86f23ac403adde
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/PyYAML-5.4.1.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+_yaml
+yaml
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/_yaml/__init__.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/_yaml/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7baa8c4b68127d5cdf0be9a799429e61347c2694
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/_yaml/__init__.py
@@ -0,0 +1,33 @@
+# This is a stub package designed to roughly emulate the _yaml
+# extension module, which previously existed as a standalone module
+# and has been moved into the `yaml` package namespace.
+# It does not perfectly mimic its old counterpart, but should get
+# close enough for anyone who's relying on it even when they shouldn't.
+import yaml
+
+# in some circumstances, the yaml module we imoprted may be from a different version, so we need
+# to tread carefully when poking at it here (it may not have the attributes we expect)
+if not getattr(yaml, '__with_libyaml__', False):
+ from sys import version_info
+
+ exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError
+ raise exc("No module named '_yaml'")
+else:
+ from yaml._yaml import *
+ import warnings
+ warnings.warn(
+ 'The _yaml extension module is now located at yaml._yaml'
+ ' and its location is subject to change. To use the'
+ ' LibYAML-based parser and emitter, import from `yaml`:'
+ ' `from yaml import CLoader as Loader, CDumper as Dumper`.',
+ DeprecationWarning
+ )
+ del warnings
+ # Don't `del yaml` here because yaml is actually an existing
+ # namespace member of _yaml.
+
+__name__ = '_yaml'
+# If the module is top-level (i.e. not a part of any specific package)
+# then the attribute should be set to ''.
+# https://docs.python.org/3.8/library/types.html
+__package__ = ''
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/INSTALLER b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/LICENSE.txt b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/METADATA b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..cd45af9374c8209252a546c5a4c5a4221c8fb8af
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/METADATA
@@ -0,0 +1,263 @@
+Metadata-Version: 2.1
+Name: ops
+Version: 1.1.0
+Summary: The Python library behind great charms
+Home-page: https://github.com/canonical/operator
+Author: The Charmcraft team at Canonical Ltd.
+Author-email: charmcraft@lists.launchpad.net
+License: Apache-2.0
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: POSIX :: Linux
+Requires-Python: >=3.5
+Description-Content-Type: text/markdown
+Requires-Dist: PyYAML
+
+# The Operator Framework
+
+This Operator Framework simplifies [Kubernetes
+operator](https://charmhub.io/about) development for
+[model-driven application
+management](https://juju.is/model-driven-operations).
+
+A Kubernetes operator is a container that drives lifecycle management,
+configuration, integration and daily actions for an application.
+Operators simplify software management and operations. They capture
+reusable app domain knowledge from experts in a software component that
+can be shared.
+
+This project extends the operator pattern to enable
+[universal operators](https://juju.is/universal-operators), not just
+for Kubernetes but also operators for traditional Linux or Windows
+application management.
+
+Operators use an [Operator Lifecycle Manager
+(OLM)](https://juju.is/operator-lifecycle-manager) to coordinate their
+work in a cluster. The system uses Golang for concurrent event
+processing under the hood, but enables the operators to be written in
+Python.
+
+## Simple, composable operators
+
+Operators should 'do one thing and do it well'. Each operator drives a
+single microservice and can be [composed with other
+operators](https://juju.is/integration) to deliver a complex application.
+
+It is better to have small, reusable operators that each drive a single
+microservice very well. The operator handles instantiation, scaling,
+configuration, optimisation, networking, service mesh, observability,
+and day-2 operations specific to that microservice.
+
+Operator composition takes place through declarative integration in
+the OLM. Operators declare integration endpoints, and discover lines of
+integration between those endpoints dynamically at runtime.
+
+## Pure Python operators
+
+The framework provides a standard Python library and object model that
+represents the application graph, and an event distribution mechanism for
+distributed system coordination and communication.
+
+The OLM is written in Golang for efficient concurrency in event handling
+and distribution. Operators can be written in any language. We recommend
+this Python framework for ease of design, development and collaboration.
+
+## Better collaboration
+
+Operator developers publish Python libraries that make it easy to integrate
+your operator with their operator. The framework includes standard tools
+to distribute these integration libraries and keep them up to date.
+
+Development collaboration happens at [Charmhub.io](https://charmhub.io/) where
+operators are published along with integration libraries. Design and
+code review discussions are hosted in the
+[Charmhub forum](https://discourse.charmhub.io/). We recommend the
+[Open Operator Manifesto](https://charmhub.io/manifesto) as a guideline for
+high quality operator engineering.
+
+## Event serialization and operator services
+
+Distributed systems can be hard! So this framework exists to make it much
+simpler to reason about operator behaviour, especially in complex deployments.
+The OLM provides [operator services](https://juju.is/operator-services) such
+as provisioning, event delivery, leader election and model management.
+
+Coordination between operators is provided by a cluster-wide event
+distribution system. Events are serialized to avoid race conditions in any
+given container or machine. This greatly simplifies the development of
+operators for high availability, scale-out and integrated applications.
+
+## Model-driven Operator Lifecycle Manager
+
+A key goal of the project is to improve the user experience for admins
+working with multiple different operators.
+
+We embrace [model-driven operations](https://juju.is/model-driven-operations)
+in the Operator Lifecycle Manager. The model encompasses capacity,
+storage, networking, the application graph and administrative access.
+
+Admins describe the application graph of integrated microservices, and
+the OLM then drives instantiation. A change in the model is propagated
+to all affected operators, reducing the duplication of effort and
+repetition normally found in operating a complex topology of services.
+
+Administrative actions, updates, configuration and integration are all
+driven through the OLM.
+
+# Getting started
+
+A package of operator code is called a charm. You will use `charmcraft`
+to register your operator name, and publish it when you are ready.
+
+```
+$ sudo snap install charmcraft --beta
+charmcraft (beta) 0.6.0 from John Lenton (chipaca) installed
+```
+
+Charms written using the operator framework are just Python code. The goal
+is to feel natural for somebody used to coding in Python, and reasonably
+easy to learn for somebody who is not a pythonista.
+
+The dependencies of the operator framework are kept as minimal as possible;
+currently that's Python 3.5 or greater, and `PyYAML` (both are included by
+default in Ubuntu's cloud images from 16.04 on).
+
+# A quick introduction
+
+Make an empty directory `my-charm` and cd into it. Then start a new charm
+with:
+
+```
+$ charmcraft init
+All done.
+There are some notes about things we think you should do.
+These are marked with ‘TODO:’, as is customary. Namely:
+ README.md: fill out the description
+ README.md: explain how to use the charm
+ metadata.yaml: fill out the charm's description
+ metadata.yaml: fill out the charm's summary
+```
+
+Charmed operators are just Python code. The entry point to your charm can
+be any filename, by default this is `src/charm.py` which must be executable
+(and probably have `#!/usr/bin/env python3` on the first line).
+
+You need a `metadata.yaml` to describe your charm, and if you will support
+configuration of your charm then `config.yaml` files is required too. The
+`requirements.txt` specifies any Python dependencies.
+
+```
+$ tree my-charm/
+my-charm/
+├── actions.yaml
+├── config.yaml
+├── LICENSE
+├── metadata.yaml
+├── README.md
+├── requirements-dev.txt
+├── requirements.txt
+├── run_tests
+├── src
+│ └── charm.py
+├── tests
+│ ├── __init__.py
+│ └── my_charm.py
+```
+
+`src/charm.py` here is the entry point to your charm code. At a minimum, it
+needs to define a subclass of `CharmBase` and pass that into the framework
+`main` function:
+
+```python
+from ops.charm import CharmBase
+from ops.main import main
+
+class MyCharm(CharmBase):
+ def __init__(self, *args):
+ super().__init__(*args)
+ self.framework.observe(self.on.start, self.on_start)
+
+ def on_start(self, event):
+ # Handle the start event here.
+
+if __name__ == "__main__":
+ main(MyCharm)
+```
+
+That should be enough for you to be able to run
+
+```
+$ charmcraft build
+Done, charm left in 'my-charm.charm'
+$ juju deploy ./my-charm.charm
+```
+
+> 🛈 More information on [`charmcraft`](https://pypi.org/project/charmcraft/) can
+> also be found on its [github page](https://github.com/canonical/charmcraft).
+
+Happy charming!
+
+# Testing your charms
+
+The operator framework provides a testing harness, so you can check your
+charm does the right thing in different scenarios, without having to create
+a full deployment. `pydoc3 ops.testing` has the details, including this
+example:
+
+```python
+harness = Harness(MyCharm)
+# Do initial setup here
+relation_id = harness.add_relation('db', 'postgresql')
+# Now instantiate the charm to see events as the model changes
+harness.begin()
+harness.add_relation_unit(relation_id, 'postgresql/0')
+harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+# Check that charm has properly handled the relation_joined event for postgresql/0
+self.assertEqual(harness.charm. ...)
+```
+
+## Talk to us
+
+If you need help, have ideas, or would just like to chat with us, reach out on
+IRC: we're in [#smooth-operator] on freenode (or try the [webchat]).
+
+We also pay attention to [Charmhub discourse](https://discourse.charmhub.io/)
+
+You can also deep dive into the [API docs] if that's your thing.
+
+[webchat]: https://webchat.freenode.net/#smooth-operator
+[#smooth-operator]: irc://chat.freenode.net/%23smooth-operator
+[discourse]: https://discourse.juju.is/c/charming
+[API docs]: https://ops.rtfd.io/
+
+## Operator Framework development
+
+To work in the framework itself you will need Python >= 3.5 and the
+dependencies in `requirements-dev.txt` installed in your system, or a
+virtualenv:
+
+ virtualenv --python=python3 env
+ source env/bin/activate
+ pip install -r requirements-dev.txt
+
+Then you can try `./run_tests`, it should all go green.
+
+For improved performance on the tests, ensure that you have PyYAML
+installed with the correct extensions:
+
+ apt-get install libyaml-dev
+ pip install --force-reinstall --no-cache-dir pyyaml
+
+If you want to build the documentation you'll need the requirements from
+`docs/requirements.txt`, or in your virtualenv
+
+ pip install -r docs/requirements.txt
+
+and then you can run `./build_docs`.
+
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/RECORD b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..35eb15f5359c1b967188c151b20fdcbc2d251c27
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/RECORD
@@ -0,0 +1,28 @@
+ops-1.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+ops-1.1.0.dist-info/LICENSE.txt,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
+ops-1.1.0.dist-info/METADATA,sha256=ffVuqPnEob6-iBYjEf3lPShSbToJL17obFFufoW2F4g,9485
+ops-1.1.0.dist-info/RECORD,,
+ops-1.1.0.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92
+ops-1.1.0.dist-info/top_level.txt,sha256=enC05wWafSg8iDKIvj3gvtAtEP2kYCyN5Gmd689q-_I,4
+ops/__init__.py,sha256=WaHb0dfp1KEe6jFV8Pm_mcdJ3ModiWujnQ6xLjNzPNQ,819
+ops/__pycache__/__init__.cpython-38.pyc,,
+ops/__pycache__/charm.cpython-38.pyc,,
+ops/__pycache__/framework.cpython-38.pyc,,
+ops/__pycache__/jujuversion.cpython-38.pyc,,
+ops/__pycache__/log.cpython-38.pyc,,
+ops/__pycache__/main.cpython-38.pyc,,
+ops/__pycache__/model.cpython-38.pyc,,
+ops/__pycache__/storage.cpython-38.pyc,,
+ops/__pycache__/testing.cpython-38.pyc,,
+ops/__pycache__/version.cpython-38.pyc,,
+ops/charm.py,sha256=7KyaNNA0t_a0h0hrzehSEWm4xU_Y5JIqGWHTg747qfU,32817
+ops/framework.py,sha256=1ByOtFKRR6kRzOEbfWnGEMNevixOYf18U0oZxKq8LsA,43769
+ops/jujuversion.py,sha256=9wMlUmngcAENV9RkgVVLWtZsyRQaf6XNrQQqUeY_fHA,4139
+ops/lib/__init__.py,sha256=QizPpuRWXjqbH5Gv7mnH8CcPR9BX7q2YNFnxyoSsA0g,9213
+ops/lib/__pycache__/__init__.cpython-38.pyc,,
+ops/log.py,sha256=JVpt_Vkf_lWO2cucUcJfXjAWVTattk4xBscSs65Sn3I,2155
+ops/main.py,sha256=BUJZM4soFpsY4bO6zJ1bSHQeWJcm028gq0MhJT3rC8M,15523
+ops/model.py,sha256=yvM1yhidNyGpVdxkG365jPJRhQuE42EiiojBHJ7tL3c,47930
+ops/storage.py,sha256=jEfszzQGYDrl5wa03I6txvea-7lI661Yq6n7sIPa0fU,14192
+ops/testing.py,sha256=sH8PoNzGmfPdVWM1lBjStxHcNfQHsasFjF-WzHfDhFA,34898
+ops/version.py,sha256=UuaLFU_UN-InNFu4I23Y22huxQdbsOgTQ_d_r623fx4,46
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/WHEEL b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..b552003ff90e66227ec90d1b159324f140d46001
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/top_level.txt b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2d81d3bb6fea804d1db7a1549d67244b513aa145
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops-1.1.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+ops
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/__init__.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f17b2969db298b21bc47bbe1d3614ccff93e9c6e
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""The Operator Framework."""
+
+from .version import version as __version__ # noqa: F401 (imported but unused)
+
+# Import here the bare minimum to break the circular import between modules
+from . import charm # noqa: F401 (imported but unused)
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/charm.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..82ed2fdd8b85d423eb8645bddb4b64426ad180e1
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/charm.py
@@ -0,0 +1,823 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base objects for the Charm, events and metadata."""
+
+import enum
+import os
+import pathlib
+import typing
+
+import yaml
+
+from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents
+from ops import model
+
+
+def _loadYaml(source):
+ if yaml.__with_libyaml__:
+ return yaml.load(source, Loader=yaml.CSafeLoader)
+ return yaml.load(source, Loader=yaml.SafeLoader)
+
+
+class HookEvent(EventBase):
+ """Events raised by Juju to progress a charm's lifecycle.
+
+ Hooks are callback methods of a charm class (a subclass of
+ :class:`CharmBase`) that are invoked in response to events raised
+ by Juju. These callback methods are the means by which a charm
+ governs the lifecycle of its application.
+
+ The :class:`HookEvent` class is the base of a type hierarchy of events
+ related to the charm's lifecycle.
+
+ :class:`HookEvent` subtypes are grouped into the following categories
+
+ - Core lifecycle events
+ - Relation events
+ - Storage events
+ - Metric events
+ """
+
+
+class ActionEvent(EventBase):
+ """Events raised by Juju when an administrator invokes a Juju Action.
+
+ This class is the data type of events triggered when an administrator
+ invokes a Juju Action. Callbacks bound to these events may be used
+ for responding to the administrator's Juju Action request.
+
+ To read the parameters for the action, see the instance variable :attr:`params`.
+ To respond with the result of the action, call :meth:`set_results`. To add
+ progress messages that are visible as the action is progressing use
+ :meth:`log`.
+
+ Attributes:
+ params: The parameters passed to the action.
+ """
+
+ def defer(self):
+ """Action events are not deferable like other events.
+
+ This is because an action runs synchronously and the administrator
+ is waiting for the result.
+ """
+ raise RuntimeError('cannot defer action events')
+
+ def restore(self, snapshot: dict) -> None:
+ """Used by the operator framework to record the action.
+
+ Not meant to be called directly by charm code.
+ """
+ env_action_name = os.environ.get('JUJU_ACTION_NAME')
+ event_action_name = self.handle.kind[:-len('_action')].replace('_', '-')
+ if event_action_name != env_action_name:
+ # This could only happen if the dev manually emits the action, or from a bug.
+ raise RuntimeError('action event kind does not match current action')
+ # Params are loaded at restore rather than __init__ because
+ # the model is not available in __init__.
+ self.params = self.framework.model._backend.action_get()
+
+ def set_results(self, results: typing.Mapping) -> None:
+ """Report the result of the action.
+
+ Args:
+ results: The result of the action as a Dict
+ """
+ self.framework.model._backend.action_set(results)
+
+ def log(self, message: str) -> None:
+ """Send a message that a user will see while the action is running.
+
+ Args:
+ message: The message for the user.
+ """
+ self.framework.model._backend.action_log(message)
+
+ def fail(self, message: str = '') -> None:
+ """Report that this action has failed.
+
+ Args:
+ message: Optional message to record why it has failed.
+ """
+ self.framework.model._backend.action_fail(message)
+
+
+class InstallEvent(HookEvent):
+ """Event triggered when a charm is installed.
+
+ This event is triggered at the beginning of a charm's
+ lifecycle. Any associated callback method should be used to
+ perform one-time setup operations, such as installing prerequisite
+ software.
+ """
+
+
+class StartEvent(HookEvent):
+ """Event triggered immediately after first configuation change.
+
+ This event is triggered immediately after the first
+ :class:`ConfigChangedEvent`. Callback methods bound to the event should be
+ used to ensure that the charm’s software is in a running state. Note that
+ the charm’s software should be configured so as to persist in this state
+ through reboots without further intervention on Juju’s part.
+ """
+
+
+class StopEvent(HookEvent):
+ """Event triggered when a charm is shut down.
+
+ This event is triggered when an application's removal is requested
+ by the client. The event fires immediately before the end of the
+ unit’s destruction sequence. Callback methods bound to this event
+ should be used to ensure that the charm’s software is not running,
+ and that it will not start again on reboot.
+ """
+
+
+class RemoveEvent(HookEvent):
+ """Event triggered when a unit is about to be terminated.
+
+ This event fires prior to Juju removing the charm and terminating its unit.
+ """
+
+
+class ConfigChangedEvent(HookEvent):
+ """Event triggered when a configuration change is requested.
+
+ This event fires in several different situations.
+
+ - immediately after the :class:`install ` event.
+ - after a :class:`relation is created `.
+ - after a :class:`leader is elected `.
+ - after changing charm configuration using the GUI or command line
+ interface
+ - when the charm :class:`starts `.
+ - when a new unit :class:`joins a relation `.
+ - when there is a :class:`change to an existing relation `.
+
+ Any callback method bound to this event cannot assume that the
+ software has already been started; it should not start stopped
+ software, but should (if appropriate) restart running software to
+ take configuration changes into account.
+ """
+
+
+class UpdateStatusEvent(HookEvent):
+ """Event triggered by a status update request from Juju.
+
+ This event is periodically triggered by Juju so that it can
+ provide constant feedback to the administrator about the status of
+ the application the charm is modeling. Any callback method bound
+ to this event should determine the "health" of the application and
+ set the status appropriately.
+
+ The interval between :class:`update-status ` events can
+ be configured model-wide, e.g. ``juju model-config
+ update-status-hook-interval=1m``.
+ """
+
+
+class UpgradeCharmEvent(HookEvent):
+ """Event triggered by request to upgrade the charm.
+
+ This event will be triggered when an administrator executes ``juju
+ upgrade-charm``. The event fires after Juju has unpacked the upgraded charm
+ code, and so this event will be handled by the callback method bound to the
+ event in the new codebase. The associated callback method is invoked
+ provided there is no existing error state. The callback method should be
+ used to reconcile current state written by an older version of the charm
+ into whatever form that is needed by the current charm version.
+ """
+
+
+class PreSeriesUpgradeEvent(HookEvent):
+ """Event triggered to prepare a unit for series upgrade.
+
+ This event triggers when an administrator executes ``juju upgrade-series
+ MACHINE prepare``. The event will fire for each unit that is running on the
+ specified machine. Any callback method bound to this event must prepare the
+ charm for an upgrade to the series. This may include things like exporting
+ database content to a version neutral format, or evacuating running
+ instances to other machines.
+
+ It can be assumed that only after all units on a machine have executed the
+ callback method associated with this event, the administrator will initiate
+ steps to actually upgrade the series. After the upgrade has been completed,
+ the :class:`PostSeriesUpgradeEvent` will fire.
+ """
+
+
+class PostSeriesUpgradeEvent(HookEvent):
+ """Event triggered after a series upgrade.
+
+ This event is triggered after the administrator has done a distribution
+ upgrade (or rolled back and kept the same series). It is called in response
+ to ``juju upgrade-series MACHINE complete``. Associated charm callback
+ methods are expected to do whatever steps are necessary to reconfigure their
+ applications for the new series. This may include things like populating the
+ upgraded version of a database. Note however charms are expected to check if
+ the series has actually changed or whether it was rolled back to the
+ original series.
+ """
+
+
+class LeaderElectedEvent(HookEvent):
+ """Event triggered when a new leader has been elected.
+
+ Juju will trigger this event when a new leader unit is chosen for
+ a given application.
+
+ This event fires at least once after Juju selects a leader
+ unit. Callback methods bound to this event may take any action
+ required for the elected unit to assert leadership. Note that only
+ the elected leader unit will receive this event.
+ """
+
+
+class LeaderSettingsChangedEvent(HookEvent):
+ """Event triggered when leader changes any settings.
+
+ DEPRECATED NOTICE
+
+ This event has been deprecated in favor of using a Peer relation,
+ and having the leader set a value in the Application data bag for
+ that peer relation. (see :class:`RelationChangedEvent`).
+ """
+
+
+class CollectMetricsEvent(HookEvent):
+ """Event triggered by Juju to collect metrics.
+
+ Juju fires this event every five minutes for the lifetime of the
+ unit. Callback methods bound to this event may use the :meth:`add_metrics`
+ method of this class to send measurements to Juju.
+
+ Note that associated callback methods are currently sandboxed in
+ how they can interact with Juju.
+ """
+
+ def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None:
+ """Record metrics that have been gathered by the charm for this unit.
+
+ Args:
+ metrics: A collection of {key: float} pairs that contains the
+ metrics that have been gathered
+ labels: {key:value} strings that can be applied to the
+ metrics that are being gathered
+ """
+ self.framework.model._backend.add_metrics(metrics, labels)
+
+
+class RelationEvent(HookEvent):
+ """A base class representing the various relation lifecycle events.
+
+ Relation lifecycle events are generated when application units
+ participate in relations. Units can only participate in relations
+ after they have been "started", and before they have been
+ "stopped". Within that time window, the unit may participate in
+ several different relations at a time, including multiple
+ relations with the same name.
+
+ Attributes:
+ relation: The :class:`~ops.model.Relation` involved in this event
+ app: The remote :class:`~ops.model.Application` that has triggered this
+ event
+ unit: The remote unit that has triggered this event. This may be
+ ``None`` if the relation event was triggered as an
+ :class:`~ops.model.Application` level event
+
+ """
+
+ def __init__(self, handle, relation, app=None, unit=None):
+ super().__init__(handle)
+
+ if unit is not None and unit.app != app:
+ raise RuntimeError(
+ 'cannot create RelationEvent with application {} and unit {}'.format(app, unit))
+
+ self.relation = relation
+ self.app = app
+ self.unit = unit
+
+ def snapshot(self) -> dict:
+ """Used by the framework to serialize the event to disk.
+
+ Not meant to be called by charm code.
+ """
+ snapshot = {
+ 'relation_name': self.relation.name,
+ 'relation_id': self.relation.id,
+ }
+ if self.app:
+ snapshot['app_name'] = self.app.name
+ if self.unit:
+ snapshot['unit_name'] = self.unit.name
+ return snapshot
+
+ def restore(self, snapshot: dict) -> None:
+ """Used by the framework to deserialize the event from disk.
+
+ Not meant to be called by charm code.
+ """
+ self.relation = self.framework.model.get_relation(
+ snapshot['relation_name'], snapshot['relation_id'])
+
+ app_name = snapshot.get('app_name')
+ if app_name:
+ self.app = self.framework.model.get_app(app_name)
+ else:
+ self.app = None
+
+ unit_name = snapshot.get('unit_name')
+ if unit_name:
+ self.unit = self.framework.model.get_unit(unit_name)
+ else:
+ self.unit = None
+
+
+class RelationCreatedEvent(RelationEvent):
+ """Event triggered when a new relation is created.
+
+ This is triggered when a new relation to another app is added in Juju. This
+ can occur before units for those applications have started. All existing
+ relations should be established before start.
+ """
+
+
+class RelationJoinedEvent(RelationEvent):
+ """Event triggered when a new unit joins a relation.
+
+ This event is triggered whenever a new unit of a related
+ application joins the relation. The event fires only when that
+ remote unit is first observed by the unit. Callback methods bound
+ to this event may set any local unit settings that can be
+ determined using no more than the name of the joining unit and the
+ remote ``private-address`` setting, which is always available when
+ the relation is created and is by convention not deleted.
+ """
+
+
+class RelationChangedEvent(RelationEvent):
+ """Event triggered when relation data changes.
+
+ This event is triggered whenever there is a change to the data bucket for a
+ related application or unit. Look at ``event.relation.data[event.unit/app]``
+ to see the new information, where ``event`` is the event object passed to
+ the callback method bound to this event.
+
+ This event always fires once, after :class:`RelationJoinedEvent`, and
+ will subsequently fire whenever that remote unit changes its settings for
+ the relation. Callback methods bound to this event should be the only ones
+ that rely on remote relation settings. They should not error if the settings
+ are incomplete, since it can be guaranteed that when the remote unit or
+ application changes its settings, the event will fire again.
+
+ The settings that may be queried, or set, are determined by the relation’s
+ interface.
+ """
+
+
+class RelationDepartedEvent(RelationEvent):
+ """Event triggered when a unit leaves a relation.
+
+ This is the inverse of the :class:`RelationJoinedEvent`, representing when a
+ unit is leaving the relation (the unit is being removed, the app is being
+ removed, the relation is being removed). It is fired once for each unit that
+ is going away.
+
+ When the remote unit is known to be leaving the relation, this will result
+ in the :class:`RelationChangedEvent` firing at least once, after which the
+ :class:`RelationDepartedEvent` will fire. The :class:`RelationDepartedEvent`
+ will fire once only. Once the :class:`RelationDepartedEvent` has fired no
+ further :class:`RelationChangedEvent` will fire.
+
+ Callback methods bound to this event may be used to remove all
+ references to the departing remote unit, because there’s no
+ guarantee that it’s still part of the system; it’s perfectly
+ probable (although not guaranteed) that the system running that
+ unit has already shut down.
+
+ Once all callback methods bound to this event have been run for such a
+ relation, the unit agent will fire the :class:`RelationBrokenEvent`.
+ """
+
+
+class RelationBrokenEvent(RelationEvent):
+ """Event triggered when a relation is removed.
+
+ If a relation is being removed (``juju remove-relation`` or ``juju
+ remove-application``), once all the units have been removed, this event will
+ fire to signal that the relationship has been fully terminated.
+
+ The event indicates that the current relation is no longer valid, and that
+ the charm’s software must be configured as though the relation had never
+ existed. It will only be called after every callback method bound to
+ :class:`RelationDepartedEvent` has been run. If a callback method
+ bound to this event is being executed, it is gauranteed that no remote units
+ are currently known locally.
+ """
+
+
+class StorageEvent(HookEvent):
+ """Base class representing storage-related events.
+
+ Juju can provide a variety of storage types to a charms. The
+ charms can define several different types of storage that are
+ allocated from Juju. Changes in state of storage trigger sub-types
+ of :class:`StorageEvent`.
+ """
+
+
+class StorageAttachedEvent(StorageEvent):
+ """Event triggered when new storage becomes available.
+
+ This event is triggered when new storage is available for the
+ charm to use.
+
+ Callback methods bound to this event allow the charm to run code
+ when storage has been added. Such methods will be run before the
+ :class:`InstallEvent` fires, so that the installation routine may
+ use the storage. The name prefix of this hook will depend on the
+ storage key defined in the ``metadata.yaml`` file.
+ """
+
+
+class StorageDetachingEvent(StorageEvent):
+ """Event triggered prior to removal of storage.
+
+ This event is triggered when storage a charm has been using is
+ going away.
+
+ Callback methods bound to this event allow the charm to run code
+ before storage is removed. Such methods will be run before storage
+ is detached, and always before the :class:`StopEvent` fires, thereby
+ allowing the charm to gracefully release resources before they are
+ removed and before the unit terminates. The name prefix of the
+ hook will depend on the storage key defined in the ``metadata.yaml``
+ file.
+ """
+
+
+class CharmEvents(ObjectEvents):
+ """Events generated by Juju pertaining to application lifecycle.
+
+ This class is used to create an event descriptor (``self.on``) attribute for
+ a charm class that inherits from :class:`CharmBase`. The event descriptor
+ may be used to set up event handlers for corresponding events.
+
+ By default the following events will be provided through
+ :class:`CharmBase`::
+
+ self.on.install
+ self.on.start
+ self.on.remove
+ self.on.update_status
+ self.on.config_changed
+ self.on.upgrade_charm
+ self.on.pre_series_upgrade
+ self.on.post_series_upgrade
+ self.on.leader_elected
+ self.on.collect_metrics
+
+
+ In addition to these, depending on the charm's metadata (``metadata.yaml``),
+ named relation and storage events may also be defined. These named events
+ are created by :class:`CharmBase` using charm metadata. The named events may be
+ accessed as ``self.on[].``
+ """
+
+ install = EventSource(InstallEvent)
+ start = EventSource(StartEvent)
+ stop = EventSource(StopEvent)
+ remove = EventSource(RemoveEvent)
+ update_status = EventSource(UpdateStatusEvent)
+ config_changed = EventSource(ConfigChangedEvent)
+ upgrade_charm = EventSource(UpgradeCharmEvent)
+ pre_series_upgrade = EventSource(PreSeriesUpgradeEvent)
+ post_series_upgrade = EventSource(PostSeriesUpgradeEvent)
+ leader_elected = EventSource(LeaderElectedEvent)
+ leader_settings_changed = EventSource(LeaderSettingsChangedEvent)
+ collect_metrics = EventSource(CollectMetricsEvent)
+
+
+class CharmBase(Object):
+ """Base class that represents the charm overall.
+
+ :class:`CharmBase` is used to create a charm. This is done by inheriting
+ from :class:`CharmBase` and customising the sub class as required. So to
+ create your own charm, say ``MyCharm``, define a charm class and set up the
+ required event handlers (“hooks”) in its constructor::
+
+ import logging
+
+ from ops.charm import CharmBase
+ from ops.main import main
+
+ logger = logging.getLogger(__name__)
+
+ def MyCharm(CharmBase):
+ def __init__(self, *args):
+ logger.debug('Initializing Charm')
+
+ super().__init__(*args)
+
+ self.framework.observe(self.on.config_changed, self._on_config_changed)
+ self.framework.observe(self.on.stop, self._on_stop)
+ # ...
+
+ if __name__ == "__main__":
+ main(MyCharm)
+
+ As shown in the example above, a charm class is instantiated by
+ :func:`~ops.main.main` rather than charm authors directly instantiating a
+ charm.
+
+ Args:
+ framework: The framework responsible for managing the Model and events for this
+ charm.
+ key: Ignored; will remove after deprecation period of the signature change.
+
+ """
+
+ # note that without the #: below, sphinx will copy the whole of CharmEvents
+ # docstring inline which is less than ideal.
+ #: Used to set up event handlers; see :class:`CharmEvents`.
+ on = CharmEvents()
+
+ def __init__(self, framework: Framework, key: typing.Optional = None):
+ super().__init__(framework, None)
+
+ for relation_name in self.framework.meta.relations:
+ relation_name = relation_name.replace('-', '_')
+ self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent)
+ self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent)
+ self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent)
+ self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent)
+ self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent)
+
+ for storage_name in self.framework.meta.storages:
+ storage_name = storage_name.replace('-', '_')
+ self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent)
+ self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent)
+
+ for action_name in self.framework.meta.actions:
+ action_name = action_name.replace('-', '_')
+ self.on.define_event(action_name + '_action', ActionEvent)
+
+ @property
+ def app(self) -> model.Application:
+ """Application that this unit is part of."""
+ return self.framework.model.app
+
+ @property
+ def unit(self) -> model.Unit:
+ """Unit that this execution is responsible for."""
+ return self.framework.model.unit
+
+ @property
+ def meta(self) -> 'CharmMeta':
+ """Metadata of this charm."""
+ return self.framework.meta
+
+ @property
+ def charm_dir(self) -> pathlib.Path:
+ """Root directory of the charm as it is running."""
+ return self.framework.charm_dir
+
+ @property
+ def config(self) -> model.ConfigData:
+ """A mapping containing the charm's config and current values."""
+ return self.model.config
+
+
+class CharmMeta:
+ """Object containing the metadata for the charm.
+
+ This is read from ``metadata.yaml`` and/or ``actions.yaml``. Generally
+ charms will define this information, rather than reading it at runtime. This
+ class is mostly for the framework to understand what the charm has defined.
+
+ The :attr:`maintainers`, :attr:`tags`, :attr:`terms`, :attr:`series`, and
+ :attr:`extra_bindings` attributes are all lists of strings. The
+ :attr:`requires`, :attr:`provides`, :attr:`peers`, :attr:`relations`,
+ :attr:`storages`, :attr:`resources`, and :attr:`payloads` attributes are all
+ mappings of names to instances of the respective :class:`RelationMeta`,
+ :class:`StorageMeta`, :class:`ResourceMeta`, or :class:`PayloadMeta`.
+
+ The :attr:`relations` attribute is a convenience accessor which includes all
+ of the ``requires``, ``provides``, and ``peers`` :class:`RelationMeta`
+ items. If needed, the role of the relation definition can be obtained from
+ its :attr:`role ` attribute.
+
+ Attributes:
+ name: The name of this charm
+ summary: Short description of what this charm does
+ description: Long description for this charm
+ maintainers: A list of strings of the email addresses of the maintainers
+ of this charm.
+ tags: Charm store tag metadata for categories associated with this charm.
+ terms: Charm store terms that should be agreed to before this charm can
+ be deployed. (Used for things like licensing issues.)
+ series: The list of supported OS series that this charm can support.
+ The first entry in the list is the default series that will be
+ used by deploy if no other series is requested by the user.
+ subordinate: True/False whether this charm is intended to be used as a
+ subordinate charm.
+ min_juju_version: If supplied, indicates this charm needs features that
+ are not available in older versions of Juju.
+ requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation.
+ provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation.
+ peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation.
+ relations: A dict containing all :class:`RelationMeta` attributes (merged from other
+ sections)
+ storages: A dict of {name: :class:`StorageMeta`} for each defined storage.
+ resources: A dict of {name: :class:`ResourceMeta`} for each defined resource.
+ payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload.
+ extra_bindings: A dict of additional named bindings that a charm can use
+ for network configuration.
+ actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined.
+ Args:
+ raw: a mapping containing the contents of metadata.yaml
+ actions_raw: a mapping containing the contents of actions.yaml
+
+ """
+
+ def __init__(self, raw: dict = {}, actions_raw: dict = {}):
+ self.name = raw.get('name', '')
+ self.summary = raw.get('summary', '')
+ self.description = raw.get('description', '')
+ self.maintainers = []
+ if 'maintainer' in raw:
+ self.maintainers.append(raw['maintainer'])
+ if 'maintainers' in raw:
+ self.maintainers.extend(raw['maintainers'])
+ self.tags = raw.get('tags', [])
+ self.terms = raw.get('terms', [])
+ self.series = raw.get('series', [])
+ self.subordinate = raw.get('subordinate', False)
+ self.min_juju_version = raw.get('min-juju-version')
+ self.requires = {name: RelationMeta(RelationRole.requires, name, rel)
+ for name, rel in raw.get('requires', {}).items()}
+ self.provides = {name: RelationMeta(RelationRole.provides, name, rel)
+ for name, rel in raw.get('provides', {}).items()}
+ self.peers = {name: RelationMeta(RelationRole.peer, name, rel)
+ for name, rel in raw.get('peers', {}).items()}
+ self.relations = {}
+ self.relations.update(self.requires)
+ self.relations.update(self.provides)
+ self.relations.update(self.peers)
+ self.storages = {name: StorageMeta(name, storage)
+ for name, storage in raw.get('storage', {}).items()}
+ self.resources = {name: ResourceMeta(name, res)
+ for name, res in raw.get('resources', {}).items()}
+ self.payloads = {name: PayloadMeta(name, payload)
+ for name, payload in raw.get('payloads', {}).items()}
+ self.extra_bindings = raw.get('extra-bindings', {})
+ self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()}
+
+ @classmethod
+ def from_yaml(
+ cls, metadata: typing.Union[str, typing.TextIO],
+ actions: typing.Optional[typing.Union[str, typing.TextIO]] = None):
+ """Instantiate a CharmMeta from a YAML description of metadata.yaml.
+
+ Args:
+ metadata: A YAML description of charm metadata (name, relations, etc.)
+ This can be a simple string, or a file-like object. (passed to `yaml.safe_load`).
+ actions: YAML description of Actions for this charm (eg actions.yaml)
+ """
+ meta = _loadYaml(metadata)
+ raw_actions = {}
+ if actions is not None:
+ raw_actions = _loadYaml(actions)
+ if raw_actions is None:
+ raw_actions = {}
+ return cls(meta, raw_actions)
+
+
+class RelationRole(enum.Enum):
+ """An annotation for a charm's role in a relation.
+
+ For each relation a charm's role may be
+
+ - A Peer
+ - A service consumer in the relation ('requires')
+ - A service provider in the relation ('provides')
+ """
+ peer = 'peer'
+ requires = 'requires'
+ provides = 'provides'
+
+ def is_peer(self) -> bool:
+ """Return whether the current role is peer.
+
+ A convenience to avoid having to import charm.
+ """
+ return self is RelationRole.peer
+
+
+class RelationMeta:
+ """Object containing metadata about a relation definition.
+
+ Should not be constructed directly by charm code. Is gotten from one of
+ :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`,
+ or :attr:`CharmMeta.relations`.
+
+ Attributes:
+ role: This is :class:`RelationRole`; one of peer/requires/provides
+ relation_name: Name of this relation from metadata.yaml
+ interface_name: Optional definition of the interface protocol.
+ scope: "global" or "container" scope based on how the relation should be used.
+ """
+
+ def __init__(self, role: RelationRole, relation_name: str, raw: dict):
+ if not isinstance(role, RelationRole):
+ raise TypeError("role should be a Role, not {!r}".format(role))
+ self.role = role
+ self.relation_name = relation_name
+ self.interface_name = raw['interface']
+ self.scope = raw.get('scope')
+
+
+class StorageMeta:
+ """Object containing metadata about a storage definition.
+
+ Attributes:
+ storage_name: Name of storage
+ type: Storage type
+ description: A text description of the storage
+ read_only: Whether or not the storage is read only
+ minimum_size: Minimum size of storage
+ location: Mount point of storage
+ multiple_range: Range of numeric qualifiers when multiple storage units are used
+ """
+
+ def __init__(self, name, raw):
+ self.storage_name = name
+ self.type = raw['type']
+ self.description = raw.get('description', '')
+ self.shared = raw.get('shared', False)
+ self.read_only = raw.get('read-only', False)
+ self.minimum_size = raw.get('minimum-size')
+ self.location = raw.get('location')
+ self.multiple_range = None
+ if 'multiple' in raw:
+ range = raw['multiple']['range']
+ if '-' not in range:
+ self.multiple_range = (int(range), int(range))
+ else:
+ range = range.split('-')
+ self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None)
+
+
+class ResourceMeta:
+ """Object containing metadata about a resource definition.
+
+ Attributes:
+ resource_name: Name of resource
+ filename: Name of file
+ description: A text description of resource
+ """
+
+ def __init__(self, name, raw):
+ self.resource_name = name
+ self.type = raw['type']
+ self.filename = raw.get('filename', None)
+ self.description = raw.get('description', '')
+
+
+class PayloadMeta:
+ """Object containing metadata about a payload definition.
+
+ Attributes:
+ payload_name: Name of payload
+ type: Payload type
+ """
+
+ def __init__(self, name, raw):
+ self.payload_name = name
+ self.type = raw['type']
+
+
+class ActionMeta:
+ """Object containing metadata about an action's definition."""
+
+ def __init__(self, name, raw=None):
+ raw = raw or {}
+ self.name = name
+ self.title = raw.get('title', '')
+ self.description = raw.get('description', '')
+ self.parameters = raw.get('params', {}) # {: }
+ self.required = raw.get('required', []) # [, ...]
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/framework.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/framework.py
new file mode 100644
index 0000000000000000000000000000000000000000..d20c0007ebcd58456a0bac90ae5dc0eaacb9a407
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/framework.py
@@ -0,0 +1,1199 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""The Operator Framework infrastructure."""
+
+import collections
+import collections.abc
+import inspect
+import keyword
+import logging
+import marshal
+import os
+import pathlib
+import pdb
+import re
+import sys
+import types
+import weakref
+
+from ops import charm
+from ops.storage import (
+ NoSnapshotError,
+ SQLiteStorage,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Handle:
+ """Handle defines a name for an object in the form of a hierarchical path.
+
+ The provided parent is the object (or that object's handle) that this handle
+ sits under, or None if the object identified by this handle stands by itself
+ as the root of its own hierarchy.
+
+ The handle kind is a string that defines a namespace so objects with the
+ same parent and kind will have unique keys.
+
+ The handle key is a string uniquely identifying the object. No other objects
+ under the same parent and kind may have the same key.
+ """
+
+ def __init__(self, parent, kind, key):
+ if parent and not isinstance(parent, Handle):
+ parent = parent.handle
+ self._parent = parent
+ self._kind = kind
+ self._key = key
+ if parent:
+ if key:
+ self._path = "{}/{}[{}]".format(parent, kind, key)
+ else:
+ self._path = "{}/{}".format(parent, kind)
+ else:
+ if key:
+ self._path = "{}[{}]".format(kind, key)
+ else:
+ self._path = "{}".format(kind)
+
+ def nest(self, kind, key):
+ """Create a new handle as child of the current one."""
+ return Handle(self, kind, key)
+
+ def __hash__(self):
+ return hash((self.parent, self.kind, self.key))
+
+ def __eq__(self, other):
+ return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key)
+
+ def __str__(self):
+ return self.path
+
+ @property
+ def parent(self):
+ """Return own parent handle."""
+ return self._parent
+
+ @property
+ def kind(self):
+ """Return the handle's kind."""
+ return self._kind
+
+ @property
+ def key(self):
+ """Return the handle's key."""
+ return self._key
+
+ @property
+ def path(self):
+ """Return the handle's path."""
+ return self._path
+
+ @classmethod
+ def from_path(cls, path):
+ """Build a handle from the indicated path."""
+ handle = None
+ for pair in path.split("/"):
+ pair = pair.split("[")
+ good = False
+ if len(pair) == 1:
+ kind, key = pair[0], None
+ good = True
+ elif len(pair) == 2:
+ kind, key = pair
+ if key and key[-1] == ']':
+ key = key[:-1]
+ good = True
+ if not good:
+ raise RuntimeError("attempted to restore invalid handle path {}".format(path))
+ handle = Handle(handle, kind, key)
+ return handle
+
+
+class EventBase:
+ """The base for all the different Events.
+
+ Inherit this and override 'snapshot' and 'restore' methods to build a custom event.
+ """
+
+ def __init__(self, handle):
+ self.handle = handle
+ self.deferred = False
+
+ def __repr__(self):
+ return "<%s via %s>" % (self.__class__.__name__, self.handle)
+
+ def defer(self):
+ """Defer the event to the future.
+
+ Deferring an event from a handler puts that handler into a queue, to be
+ called again the next time the charm is invoked. This invocation may be
+ the result of an action, or any event other than metric events. The
+ queue of events will be dispatched before the new event is processed.
+
+ From the above you may deduce, but it's important to point out:
+
+ * ``defer()`` does not interrupt the execution of the current event
+ handler. In almost all cases, a call to ``defer()`` should be followed
+ by an explicit ``return`` from the handler;
+
+ * the re-execution of the deferred event handler starts from the top of
+ the handler method (not where defer was called);
+
+ * only the handlers that actually called ``defer()`` are called again
+ (that is: despite talking about “deferring an event” it is actually
+ the handler/event combination that is deferred); and
+
+ * any deferred events get processed before the event (or action) that
+ caused the current invocation of the charm.
+
+ The general desire to call ``defer()`` happens when some precondition
+ isn't yet met. However, care should be exercised as to whether it is
+ better to defer this event so that you see it again, or whether it is
+ better to just wait for the event that indicates the precondition has
+ been met.
+
+ For example, if ``config-changed`` is fired, and you are waiting for
+ different config, there is no reason to defer the event because there
+ will be a *different* ``config-changed`` event when the config actually
+ changes, rather than checking to see if maybe config has changed prior
+ to every other event that occurs.
+
+ Similarly, if you need 2 events to occur before you are ready to
+ proceed (say event A and B). When you see event A, you could chose to
+ ``defer()`` it because you haven't seen B yet. However, that leads to:
+
+ 1. event A fires, calls defer()
+
+ 2. event B fires, event A handler is called first, still hasn't seen B
+ happen, so is deferred again. Then B happens, which progresses since
+ it has seen A.
+
+ 3. At some future time, event C happens, which also checks if A can
+ proceed.
+
+ """
+ logger.debug("Deferring %s.", self)
+ self.deferred = True
+
+ def snapshot(self):
+ """Return the snapshot data that should be persisted.
+
+ Subclasses must override to save any custom state.
+ """
+ return None
+
+ def restore(self, snapshot):
+ """Restore the value state from the given snapshot.
+
+ Subclasses must override to restore their custom state.
+ """
+ self.deferred = False
+
+
+class EventSource:
+ """EventSource wraps an event type with a descriptor to facilitate observing and emitting.
+
+ It is generally used as:
+
+ class SomethingHappened(EventBase):
+ pass
+
+ class SomeObject(Object):
+ something_happened = EventSource(SomethingHappened)
+
+ With that, instances of that type will offer the someobj.something_happened
+ attribute which is a BoundEvent and may be used to emit and observe the event.
+ """
+
+ def __init__(self, event_type):
+ if not isinstance(event_type, type) or not issubclass(event_type, EventBase):
+ raise RuntimeError(
+ 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type))
+ self.event_type = event_type
+ self.event_kind = None
+ self.emitter_type = None
+
+ def _set_name(self, emitter_type, event_kind):
+ if self.event_kind is not None:
+ raise RuntimeError(
+ 'EventSource({}) reused as {}.{} and {}.{}'.format(
+ self.event_type.__name__,
+ self.emitter_type.__name__,
+ self.event_kind,
+ emitter_type.__name__,
+ event_kind,
+ ))
+ self.event_kind = event_kind
+ self.emitter_type = emitter_type
+
+ def __get__(self, emitter, emitter_type=None):
+ if emitter is None:
+ return self
+ # Framework might not be available if accessed as CharmClass.on.event
+ # rather than charm_instance.on.event, but in that case it couldn't be
+ # emitted anyway, so there's no point to registering it.
+ framework = getattr(emitter, 'framework', None)
+ if framework is not None:
+ framework.register_type(self.event_type, emitter, self.event_kind)
+ return BoundEvent(emitter, self.event_type, self.event_kind)
+
+
+class BoundEvent:
+ """Event bound to an Object."""
+
+ def __repr__(self):
+ return ''.format(
+ self.event_type.__name__,
+ type(self.emitter).__name__,
+ self.event_kind,
+ hex(id(self)),
+ )
+
+ def __init__(self, emitter, event_type, event_kind):
+ self.emitter = emitter
+ self.event_type = event_type
+ self.event_kind = event_kind
+
+ def emit(self, *args, **kwargs):
+ """Emit event to all registered observers.
+
+ The current storage state is committed before and after each observer is notified.
+ """
+ framework = self.emitter.framework
+ key = framework._next_event_key()
+ event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs)
+ framework._emit(event)
+
+
+class HandleKind:
+ """Helper descriptor to define the Object.handle_kind field.
+
+ The handle_kind for an object defaults to its type name, but it may
+ be explicitly overridden if desired.
+ """
+
+ def __get__(self, obj, obj_type):
+ kind = obj_type.__dict__.get("handle_kind")
+ if kind:
+ return kind
+ return obj_type.__name__
+
+
+class _Metaclass(type):
+ """Helper class to ensure proper instantiation of Object-derived classes.
+
+ This class currently has a single purpose: events derived from EventSource
+ that are class attributes of Object-derived classes need to be told what
+ their name is in that class. For example, in
+
+ class SomeObject(Object):
+ something_happened = EventSource(SomethingHappened)
+
+ the instance of EventSource needs to know it's called 'something_happened'.
+
+ Starting from python 3.6 we could use __set_name__ on EventSource for this,
+ but until then this (meta)class does the equivalent work.
+
+ TODO: when we drop support for 3.5 drop this class, and rename _set_name in
+ EventSource to __set_name__; everything should continue to work.
+
+ """
+
+ def __new__(typ, *a, **kw):
+ k = super().__new__(typ, *a, **kw)
+ # k is now the Object-derived class; loop over its class attributes
+ for n, v in vars(k).items():
+ # we could do duck typing here if we want to support
+ # non-EventSource-derived shenanigans. We don't.
+ if isinstance(v, EventSource):
+ # this is what 3.6+ does automatically for us:
+ v._set_name(k, n)
+ return k
+
+
+class Object(metaclass=_Metaclass):
+ """Base class of all the charm-related objects."""
+
+ handle_kind = HandleKind()
+
+ def __init__(self, parent, key):
+ kind = self.handle_kind
+ if isinstance(parent, Framework):
+ self.framework = parent
+ # Avoid Framework instances having a circular reference to themselves.
+ if self.framework is self:
+ self.framework = weakref.proxy(self.framework)
+ self.handle = Handle(None, kind, key)
+ else:
+ self.framework = parent.framework
+ self.handle = Handle(parent, kind, key)
+ self.framework._track(self)
+
+ # TODO Detect conflicting handles here.
+
+ @property
+ def model(self):
+ """Shortcut for more simple access the model."""
+ return self.framework.model
+
+
+class ObjectEvents(Object):
+ """Convenience type to allow defining .on attributes at class level."""
+
+ handle_kind = "on"
+
+ def __init__(self, parent=None, key=None):
+ if parent is not None:
+ super().__init__(parent, key)
+ else:
+ self._cache = weakref.WeakKeyDictionary()
+
+ def __get__(self, emitter, emitter_type):
+ if emitter is None:
+ return self
+ instance = self._cache.get(emitter)
+ if instance is None:
+ # Same type, different instance, more data. Doing this unusual construct
+ # means people can subclass just this one class to have their own 'on'.
+ instance = self._cache[emitter] = type(self)(emitter)
+ return instance
+
+ @classmethod
+ def define_event(cls, event_kind, event_type):
+ """Define an event on this type at runtime.
+
+ cls: a type to define an event on.
+
+ event_kind: an attribute name that will be used to access the
+ event. Must be a valid python identifier, not be a keyword
+ or an existing attribute.
+
+ event_type: a type of the event to define.
+
+ """
+ prefix = 'unable to define an event with event_kind that '
+ if not event_kind.isidentifier():
+ raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind)
+ elif keyword.iskeyword(event_kind):
+ raise RuntimeError(prefix + 'is a python keyword: ' + event_kind)
+ try:
+ getattr(cls, event_kind)
+ raise RuntimeError(
+ prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind))
+ except AttributeError:
+ pass
+
+ event_descriptor = EventSource(event_type)
+ event_descriptor._set_name(cls, event_kind)
+ setattr(cls, event_kind, event_descriptor)
+
+ def _event_kinds(self):
+ event_kinds = []
+ # We have to iterate over the class rather than instance to allow for properties which
+ # might call this method (e.g., event views), leading to infinite recursion.
+ for attr_name, attr_value in inspect.getmembers(type(self)):
+ if isinstance(attr_value, EventSource):
+ # We actually care about the bound_event, however, since it
+ # provides the most info for users of this method.
+ event_kinds.append(attr_name)
+ return event_kinds
+
+ def events(self):
+ """Return a mapping of event_kinds to bound_events for all available events."""
+ return {event_kind: getattr(self, event_kind) for event_kind in self._event_kinds()}
+
+ def __getitem__(self, key):
+ return PrefixedEvents(self, key)
+
+ def __repr__(self):
+ k = type(self)
+ event_kinds = ', '.join(sorted(self._event_kinds()))
+ return '<{}.{}: {}>'.format(k.__module__, k.__qualname__, event_kinds)
+
+
+class PrefixedEvents:
+ """Events to be found in all events using a specific prefix."""
+
+ def __init__(self, emitter, key):
+ self._emitter = emitter
+ self._prefix = key.replace("-", "_") + '_'
+
+ def __getattr__(self, name):
+ return getattr(self._emitter, self._prefix + name)
+
+
+class PreCommitEvent(EventBase):
+ """Events that will be emited first on commit."""
+
+
+class CommitEvent(EventBase):
+ """Events that will be emited second on commit."""
+
+
+class FrameworkEvents(ObjectEvents):
+ """Manager of all framework events."""
+ pre_commit = EventSource(PreCommitEvent)
+ commit = EventSource(CommitEvent)
+
+
+class NoTypeError(Exception):
+ """No class to hold it was found when restoring an event."""
+
+ def __init__(self, handle_path):
+ self.handle_path = handle_path
+
+ def __str__(self):
+ return "cannot restore {} since no class was registered for it".format(self.handle_path)
+
+
+# the message to show to the user when a pdb breakpoint goes active
+_BREAKPOINT_WELCOME_MESSAGE = """
+Starting pdb to debug charm operator.
+Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort.
+Future breakpoints may interrupt execution again.
+More details at https://discourse.jujucharms.com/t/debugging-charm-hooks
+
+"""
+
+
+_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$'
+
+
+class Framework(Object):
+ """Main interface to from the Charm to the Operator Framework internals."""
+
+ on = FrameworkEvents()
+
+ # Override properties from Object so that we can set them in __init__.
+ model = None
+ meta = None
+ charm_dir = None
+
+ def __init__(self, storage, charm_dir, meta, model):
+
+ super().__init__(self, None)
+
+ self.charm_dir = charm_dir
+ self.meta = meta
+ self.model = model
+ self._observers = [] # [(observer_path, method_name, parent_path, event_key)]
+ self._observer = weakref.WeakValueDictionary() # {observer_path: observer}
+ self._objects = weakref.WeakValueDictionary()
+ self._type_registry = {} # {(parent_path, kind): cls}
+ self._type_known = set() # {cls}
+
+ if isinstance(storage, (str, pathlib.Path)):
+ logger.warning(
+ "deprecated: Framework now takes a Storage not a path")
+ storage = SQLiteStorage(storage)
+ self._storage = storage
+
+ # We can't use the higher-level StoredState because it relies on events.
+ self.register_type(StoredStateData, None, StoredStateData.handle_kind)
+ stored_handle = Handle(None, StoredStateData.handle_kind, '_stored')
+ try:
+ self._stored = self.load_snapshot(stored_handle)
+ except NoSnapshotError:
+ self._stored = StoredStateData(self, '_stored')
+ self._stored['event_count'] = 0
+
+ # Flag to indicate that we already presented the welcome message in a debugger breakpoint
+ self._breakpoint_welcomed = False
+
+ # Parse the env var once, which may be used multiple times later
+ debug_at = os.environ.get('JUJU_DEBUG_AT')
+ self._juju_debug_at = debug_at.split(',') if debug_at else ()
+
+ def set_breakpointhook(self):
+ """Hook into sys.breakpointhook so the builtin breakpoint() works as expected.
+
+ This method is called by ``main``, and is not intended to be
+ called by users of the framework itself outside of perhaps
+ some testing scenarios.
+
+ It returns the old value of sys.excepthook.
+
+ The breakpoint function is a Python >= 3.7 feature.
+
+ This method was added in ops 1.0; before that, it was done as
+ part of the Framework's __init__.
+ """
+ old_breakpointhook = getattr(sys, 'breakpointhook', None)
+ if old_breakpointhook is not None:
+ # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do
+ # breakpoint()
+ sys.breakpointhook = self.breakpoint
+ return old_breakpointhook
+
+ def close(self):
+ """Close the underlying backends."""
+ self._storage.close()
+
+ def _track(self, obj):
+ """Track object and ensure it is the only object created using its handle path."""
+ if obj is self:
+ # Framework objects don't track themselves
+ return
+ if obj.handle.path in self.framework._objects:
+ raise RuntimeError(
+ 'two objects claiming to be {} have been created'.format(obj.handle.path))
+ self._objects[obj.handle.path] = obj
+
+ def _forget(self, obj):
+ """Stop tracking the given object. See also _track."""
+ self._objects.pop(obj.handle.path, None)
+
+ def commit(self):
+ """Save changes to the underlying backends."""
+ # Give a chance for objects to persist data they want to before a commit is made.
+ self.on.pre_commit.emit()
+ # Make sure snapshots are saved by instances of StoredStateData. Any possible state
+ # modifications in on_commit handlers of instances of other classes will not be persisted.
+ self.on.commit.emit()
+ # Save our event count after all events have been emitted.
+ self.save_snapshot(self._stored)
+ self._storage.commit()
+
+ def register_type(self, cls, parent, kind=None):
+ """Register a type to a handle."""
+ if parent and not isinstance(parent, Handle):
+ parent = parent.handle
+ if parent:
+ parent_path = parent.path
+ else:
+ parent_path = None
+ if not kind:
+ kind = cls.handle_kind
+ self._type_registry[(parent_path, kind)] = cls
+ self._type_known.add(cls)
+
+ def save_snapshot(self, value):
+ """Save a persistent snapshot of the provided value.
+
+ The provided value must implement the following interface:
+
+ value.handle = Handle(...)
+ value.snapshot() => {...} # Simple builtin types only.
+ value.restore(snapshot) # Restore custom state from prior snapshot.
+ """
+ if type(value) not in self._type_known:
+ raise RuntimeError(
+ 'cannot save {} values before registering that type'.format(type(value).__name__))
+ data = value.snapshot()
+
+ # Use marshal as a validator, enforcing the use of simple types, as we later the
+ # information is really pickled, which is too error prone for future evolution of the
+ # stored data (e.g. if the developer stores a custom object and later changes its
+ # class name; when unpickling the original class will not be there and event
+ # data loading will fail).
+ try:
+ marshal.dumps(data)
+ except ValueError:
+ msg = "unable to save the data for {}, it must contain only simple types: {!r}"
+ raise ValueError(msg.format(value.__class__.__name__, data))
+
+ self._storage.save_snapshot(value.handle.path, data)
+
+ def load_snapshot(self, handle):
+ """Load a persistent snapshot."""
+ parent_path = None
+ if handle.parent:
+ parent_path = handle.parent.path
+ cls = self._type_registry.get((parent_path, handle.kind))
+ if not cls:
+ raise NoTypeError(handle.path)
+ data = self._storage.load_snapshot(handle.path)
+ obj = cls.__new__(cls)
+ obj.framework = self
+ obj.handle = handle
+ obj.restore(data)
+ self._track(obj)
+ return obj
+
+ def drop_snapshot(self, handle):
+ """Discard a persistent snapshot."""
+ self._storage.drop_snapshot(handle.path)
+
+ def observe(self, bound_event: BoundEvent, observer: types.MethodType):
+ """Register observer to be called when bound_event is emitted.
+
+ The bound_event is generally provided as an attribute of the object that emits
+ the event, and is created in this style::
+
+ class SomeObject:
+ something_happened = Event(SomethingHappened)
+
+ That event may be observed as::
+
+ framework.observe(someobj.something_happened, self._on_something_happened)
+
+ Raises:
+ RuntimeError: if bound_event or observer are the wrong type.
+ """
+ if not isinstance(bound_event, BoundEvent):
+ raise RuntimeError(
+ 'Framework.observe requires a BoundEvent as second parameter, got {}'.format(
+ bound_event))
+ if not isinstance(observer, types.MethodType):
+ # help users of older versions of the framework
+ if isinstance(observer, charm.CharmBase):
+ raise TypeError(
+ 'observer methods must now be explicitly provided;'
+ ' please replace observe(self.on.{0}, self)'
+ ' with e.g. observe(self.on.{0}, self._on_{0})'.format(
+ bound_event.event_kind))
+ raise RuntimeError(
+ 'Framework.observe requires a method as third parameter, got {}'.format(observer))
+
+ event_type = bound_event.event_type
+ event_kind = bound_event.event_kind
+ emitter = bound_event.emitter
+
+ self.register_type(event_type, emitter, event_kind)
+
+ if hasattr(emitter, "handle"):
+ emitter_path = emitter.handle.path
+ else:
+ raise RuntimeError(
+ 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__))
+
+ # Validate that the method has an acceptable call signature.
+ sig = inspect.signature(observer)
+ # Self isn't included in the params list, so the first arg will be the event.
+ extra_params = list(sig.parameters.values())[1:]
+
+ method_name = observer.__name__
+ observer = observer.__self__
+ if not sig.parameters:
+ raise TypeError(
+ '{}.{} must accept event parameter'.format(type(observer).__name__, method_name))
+ elif any(param.default is inspect.Parameter.empty for param in extra_params):
+ # Allow for additional optional params, since there's no reason to exclude them, but
+ # required params will break.
+ raise TypeError(
+ '{}.{} has extra required parameter'.format(type(observer).__name__, method_name))
+
+ # TODO Prevent the exact same parameters from being registered more than once.
+
+ self._observer[observer.handle.path] = observer
+ self._observers.append((observer.handle.path, method_name, emitter_path, event_kind))
+
+ def _next_event_key(self):
+ """Return the next event key that should be used, incrementing the internal counter."""
+ # Increment the count first; this means the keys will start at 1, and 0
+ # means no events have been emitted.
+ self._stored['event_count'] += 1
+ return str(self._stored['event_count'])
+
+ def _emit(self, event):
+ """See BoundEvent.emit for the public way to call this."""
+ saved = False
+ event_path = event.handle.path
+ event_kind = event.handle.kind
+ parent_path = event.handle.parent.path
+ # TODO Track observers by (parent_path, event_kind) rather than as a list of
+ # all observers. Avoiding linear search through all observers for every event
+ for observer_path, method_name, _parent_path, _event_kind in self._observers:
+ if _parent_path != parent_path:
+ continue
+ if _event_kind and _event_kind != event_kind:
+ continue
+ if not saved:
+ # Save the event for all known observers before the first notification
+ # takes place, so that either everyone interested sees it, or nobody does.
+ self.save_snapshot(event)
+ saved = True
+ # Again, only commit this after all notices are saved.
+ self._storage.save_notice(event_path, observer_path, method_name)
+ if saved:
+ self._reemit(event_path)
+
+ def reemit(self):
+ """Reemit previously deferred events to the observers that deferred them.
+
+ Only the specific observers that have previously deferred the event will be
+ notified again. Observers that asked to be notified about events after it's
+ been first emitted won't be notified, as that would mean potentially observing
+ events out of order.
+ """
+ self._reemit()
+
+ def _reemit(self, single_event_path=None):
+ last_event_path = None
+ deferred = True
+ for event_path, observer_path, method_name in self._storage.notices(single_event_path):
+ event_handle = Handle.from_path(event_path)
+
+ if last_event_path != event_path:
+ if not deferred and last_event_path is not None:
+ self._storage.drop_snapshot(last_event_path)
+ last_event_path = event_path
+ deferred = False
+
+ try:
+ event = self.load_snapshot(event_handle)
+ except NoTypeError:
+ self._storage.drop_notice(event_path, observer_path, method_name)
+ continue
+
+ event.deferred = False
+ observer = self._observer.get(observer_path)
+ if observer:
+ if single_event_path is None:
+ logger.debug("Re-emitting %s.", event)
+ custom_handler = getattr(observer, method_name, None)
+ if custom_handler:
+ event_is_from_juju = isinstance(event, charm.HookEvent)
+ event_is_action = isinstance(event, charm.ActionEvent)
+ if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at:
+ # Present the welcome message and run under PDB.
+ self._show_debug_code_message()
+ pdb.runcall(custom_handler, event)
+ else:
+ # Regular call to the registered method.
+ custom_handler(event)
+
+ if event.deferred:
+ deferred = True
+ else:
+ self._storage.drop_notice(event_path, observer_path, method_name)
+ # We intentionally consider this event to be dead and reload it from
+ # scratch in the next path.
+ self.framework._forget(event)
+
+ if not deferred and last_event_path is not None:
+ self._storage.drop_snapshot(last_event_path)
+
+ def _show_debug_code_message(self):
+ """Present the welcome message (only once!) when using debugger functionality."""
+ if not self._breakpoint_welcomed:
+ self._breakpoint_welcomed = True
+ print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='')
+
+ def breakpoint(self, name=None):
+ """Add breakpoint, optionally named, at the place where this method is called.
+
+ For the breakpoint to be activated the JUJU_DEBUG_AT environment variable
+ must be set to "all" or to the specific name parameter provided, if any. In every
+ other situation calling this method does nothing.
+
+ The framework also provides a standard breakpoint named "hook", that will
+ stop execution when a hook event is about to be handled.
+
+ For those reasons, the "all" and "hook" breakpoint names are reserved.
+ """
+ # If given, validate the name comply with all the rules
+ if name is not None:
+ if not isinstance(name, str):
+ raise TypeError('breakpoint names must be strings')
+ if name in ('hook', 'all'):
+ raise ValueError('breakpoint names "all" and "hook" are reserved')
+ if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name):
+ raise ValueError('breakpoint names must look like "foo" or "foo-bar"')
+
+ indicated_breakpoints = self._juju_debug_at
+ if not indicated_breakpoints:
+ return
+
+ if 'all' in indicated_breakpoints or name in indicated_breakpoints:
+ self._show_debug_code_message()
+
+ # If we call set_trace() directly it will open the debugger *here*, so indicating
+ # it to use our caller's frame
+ code_frame = inspect.currentframe().f_back
+ pdb.Pdb().set_trace(code_frame)
+ else:
+ logger.warning(
+ "Breakpoint %r skipped (not found in the requested breakpoints: %s)",
+ name, indicated_breakpoints)
+
+ def remove_unreferenced_events(self):
+ """Remove events from storage that are not referenced.
+
+ In older versions of the framework, events that had no observers would get recorded but
+ never deleted. This makes a best effort to find these events and remove them from the
+ database.
+ """
+ event_regex = re.compile(_event_regex)
+ to_remove = []
+ for handle_path in self._storage.list_snapshots():
+ if event_regex.match(handle_path):
+ notices = self._storage.notices(handle_path)
+ if next(notices, None) is None:
+ # There are no notices for this handle_path, it is valid to remove it
+ to_remove.append(handle_path)
+ for handle_path in to_remove:
+ self._storage.drop_snapshot(handle_path)
+
+
+class StoredStateData(Object):
+ """Manager of the stored data."""
+
+ def __init__(self, parent, attr_name):
+ super().__init__(parent, attr_name)
+ self._cache = {}
+ self.dirty = False
+
+ def __getitem__(self, key):
+ return self._cache.get(key)
+
+ def __setitem__(self, key, value):
+ self._cache[key] = value
+ self.dirty = True
+
+ def __contains__(self, key):
+ return key in self._cache
+
+ def snapshot(self):
+ """Return the current state."""
+ return self._cache
+
+ def restore(self, snapshot):
+ """Restore current state to the given snapshot."""
+ self._cache = snapshot
+ self.dirty = False
+
+ def on_commit(self, event):
+ """Save changes to the storage backend."""
+ if self.dirty:
+ self.framework.save_snapshot(self)
+ self.dirty = False
+
+
+class BoundStoredState:
+ """Stored state data bound to a specific Object."""
+
+ def __init__(self, parent, attr_name):
+ parent.framework.register_type(StoredStateData, parent)
+
+ handle = Handle(parent, StoredStateData.handle_kind, attr_name)
+ try:
+ data = parent.framework.load_snapshot(handle)
+ except NoSnapshotError:
+ data = StoredStateData(parent, attr_name)
+
+ # __dict__ is used to avoid infinite recursion.
+ self.__dict__["_data"] = data
+ self.__dict__["_attr_name"] = attr_name
+
+ parent.framework.observe(parent.framework.on.commit, self._data.on_commit)
+
+ def __getattr__(self, key):
+ # "on" is the only reserved key that can't be used in the data map.
+ if key == "on":
+ return self._data.on
+ if key not in self._data:
+ raise AttributeError("attribute '{}' is not stored".format(key))
+ return _wrap_stored(self._data, self._data[key])
+
+ def __setattr__(self, key, value):
+ if key == "on":
+ raise AttributeError("attribute 'on' is reserved and cannot be set")
+
+ value = _unwrap_stored(self._data, value)
+
+ if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)):
+ raise AttributeError(
+ 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format(
+ key, type(value).__name__))
+
+ self._data[key] = _unwrap_stored(self._data, value)
+
+ def set_default(self, **kwargs):
+ """Set the value of any given key if it has not already been set."""
+ for k, v in kwargs.items():
+ if k not in self._data:
+ self._data[k] = v
+
+
+class StoredState:
+ """A class used to store data the charm needs persisted across invocations.
+
+ Example::
+
+ class MyClass(Object):
+ _stored = StoredState()
+
+ Instances of `MyClass` can transparently save state between invocations by
+ setting attributes on `_stored`. Initial state should be set with
+ `set_default` on the bound object, that is::
+
+ class MyClass(Object):
+ _stored = StoredState()
+
+ def __init__(self, parent, key):
+ super().__init__(parent, key)
+ self._stored.set_default(seen=set())
+ self.framework.observe(self.on.seen, self._on_seen)
+
+ def _on_seen(self, event):
+ self._stored.seen.add(event.uuid)
+
+ """
+
+ def __init__(self):
+ self.parent_type = None
+ self.attr_name = None
+
+ def __get__(self, parent, parent_type=None):
+ if self.parent_type is not None and self.parent_type not in parent_type.mro():
+ # the StoredState instance is being shared between two unrelated classes
+ # -> unclear what is exepcted of us -> bail out
+ raise RuntimeError(
+ 'StoredState shared by {} and {}'.format(
+ self.parent_type.__name__, parent_type.__name__))
+
+ if parent is None:
+ # accessing via the class directly (e.g. MyClass.stored)
+ return self
+
+ bound = None
+ if self.attr_name is not None:
+ bound = parent.__dict__.get(self.attr_name)
+ if bound is not None:
+ # we already have the thing from a previous pass, huzzah
+ return bound
+
+ # need to find ourselves amongst the parent's bases
+ for cls in parent_type.mro():
+ for attr_name, attr_value in cls.__dict__.items():
+ if attr_value is not self:
+ continue
+ # we've found ourselves! is it the first time?
+ if bound is not None:
+ # the StoredState instance is being stored in two different
+ # attributes -> unclear what is expected of us -> bail out
+ raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format(
+ cls.__name__, self.attr_name, attr_name))
+ # we've found ourselves for the first time; save where, and bind the object
+ self.attr_name = attr_name
+ self.parent_type = cls
+ bound = BoundStoredState(parent, attr_name)
+
+ if bound is not None:
+ # cache the bound object to avoid the expensive lookup the next time
+ # (don't use setattr, to keep things symmetric with the fast-path lookup above)
+ parent.__dict__[self.attr_name] = bound
+ return bound
+
+ raise AttributeError(
+ 'cannot find {} attribute in type {}'.format(
+ self.__class__.__name__, parent_type.__name__))
+
+
+def _wrap_stored(parent_data, value):
+ t = type(value)
+ if t is dict:
+ return StoredDict(parent_data, value)
+ if t is list:
+ return StoredList(parent_data, value)
+ if t is set:
+ return StoredSet(parent_data, value)
+ return value
+
+
+def _unwrap_stored(parent_data, value):
+ t = type(value)
+ if t is StoredDict or t is StoredList or t is StoredSet:
+ return value._under
+ return value
+
+
+def _wrapped_repr(obj):
+ t = type(obj)
+ if obj._under:
+ return "{}.{}({!r})".format(t.__module__, t.__name__, obj._under)
+ else:
+ return "{}.{}()".format(t.__module__, t.__name__)
+
+
+class StoredDict(collections.abc.MutableMapping):
+ """A dict-like object that uses the StoredState as backend."""
+
+ def __init__(self, stored_data, under):
+ self._stored_data = stored_data
+ self._under = under
+
+ def __getitem__(self, key):
+ return _wrap_stored(self._stored_data, self._under[key])
+
+ def __setitem__(self, key, value):
+ self._under[key] = _unwrap_stored(self._stored_data, value)
+ self._stored_data.dirty = True
+
+ def __delitem__(self, key):
+ del self._under[key]
+ self._stored_data.dirty = True
+
+ def __iter__(self):
+ return self._under.__iter__()
+
+ def __len__(self):
+ return len(self._under)
+
+ def __eq__(self, other):
+ if isinstance(other, StoredDict):
+ return self._under == other._under
+ elif isinstance(other, collections.abc.Mapping):
+ return self._under == other
+ else:
+ return NotImplemented
+
+ __repr__ = _wrapped_repr
+
+
+class StoredList(collections.abc.MutableSequence):
+ """A list-like object that uses the StoredState as backend."""
+
+ def __init__(self, stored_data, under):
+ self._stored_data = stored_data
+ self._under = under
+
+ def __getitem__(self, index):
+ return _wrap_stored(self._stored_data, self._under[index])
+
+ def __setitem__(self, index, value):
+ self._under[index] = _unwrap_stored(self._stored_data, value)
+ self._stored_data.dirty = True
+
+ def __delitem__(self, index):
+ del self._under[index]
+ self._stored_data.dirty = True
+
+ def __len__(self):
+ return len(self._under)
+
+ def insert(self, index, value):
+ """Insert value before index."""
+ self._under.insert(index, value)
+ self._stored_data.dirty = True
+
+ def append(self, value):
+ """Append value to the end of the list."""
+ self._under.append(value)
+ self._stored_data.dirty = True
+
+ def __eq__(self, other):
+ if isinstance(other, StoredList):
+ return self._under == other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under == other
+ else:
+ return NotImplemented
+
+ def __lt__(self, other):
+ if isinstance(other, StoredList):
+ return self._under < other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under < other
+ else:
+ return NotImplemented
+
+ def __le__(self, other):
+ if isinstance(other, StoredList):
+ return self._under <= other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under <= other
+ else:
+ return NotImplemented
+
+ def __gt__(self, other):
+ if isinstance(other, StoredList):
+ return self._under > other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under > other
+ else:
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, StoredList):
+ return self._under >= other._under
+ elif isinstance(other, collections.abc.Sequence):
+ return self._under >= other
+ else:
+ return NotImplemented
+
+ __repr__ = _wrapped_repr
+
+
+class StoredSet(collections.abc.MutableSet):
+ """A set-like object that uses the StoredState as backend."""
+
+ def __init__(self, stored_data, under):
+ self._stored_data = stored_data
+ self._under = under
+
+ def add(self, key):
+ """Add a key to a set.
+
+ This has no effect if the key is already present.
+ """
+ self._under.add(key)
+ self._stored_data.dirty = True
+
+ def discard(self, key):
+ """Remove a key from a set if it is a member.
+
+ If the key is not a member, do nothing.
+ """
+ self._under.discard(key)
+ self._stored_data.dirty = True
+
+ def __contains__(self, key):
+ return key in self._under
+
+ def __iter__(self):
+ return self._under.__iter__()
+
+ def __len__(self):
+ return len(self._under)
+
+ @classmethod
+ def _from_iterable(cls, it):
+ """Construct an instance of the class from any iterable input.
+
+ Per https://docs.python.org/3/library/collections.abc.html
+ if the Set mixin is being used in a class with a different constructor signature,
+ you will need to override _from_iterable() with a classmethod that can construct
+ new instances from an iterable argument.
+ """
+ return set(it)
+
+ def __le__(self, other):
+ if isinstance(other, StoredSet):
+ return self._under <= other._under
+ elif isinstance(other, collections.abc.Set):
+ return self._under <= other
+ else:
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, StoredSet):
+ return self._under >= other._under
+ elif isinstance(other, collections.abc.Set):
+ return self._under >= other
+ else:
+ return NotImplemented
+
+ def __eq__(self, other):
+ if isinstance(other, StoredSet):
+ return self._under == other._under
+ elif isinstance(other, collections.abc.Set):
+ return self._under == other
+ else:
+ return NotImplemented
+
+ __repr__ = _wrapped_repr
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/jujuversion.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/jujuversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..61d420d369d9b0e75b9c2c242574ddcd4b89be51
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/jujuversion.py
@@ -0,0 +1,114 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A helper to work with the Juju version."""
+
+import os
+import re
+from functools import total_ordering
+
+
+@total_ordering
+class JujuVersion:
+ """Helper to work with the Juju version.
+
+ It knows how to parse the ``JUJU_VERSION`` environment variable, and exposes different
+ capabilities according to the specific version, allowing also to compare with other
+ versions.
+ """
+
+ PATTERN = r'''^
+ (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there
+ ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or -
+ (\.(?P\d{1,9}))?$ # and sometimes with a number.
+ '''
+
+ def __init__(self, version):
+ m = re.match(self.PATTERN, version, re.VERBOSE)
+ if not m:
+ raise RuntimeError('"{}" is not a valid Juju version string'.format(version))
+
+ d = m.groupdict()
+ self.major = int(m.group('major'))
+ self.minor = int(m.group('minor'))
+ self.tag = d['tag'] or ''
+ self.patch = int(d['patch'] or 0)
+ self.build = int(d['build'] or 0)
+
+ def __repr__(self):
+ if self.tag:
+ s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch)
+ else:
+ s = '{}.{}.{}'.format(self.major, self.minor, self.patch)
+ if self.build > 0:
+ s += '.{}'.format(self.build)
+ return s
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if isinstance(other, str):
+ other = type(self)(other)
+ elif not isinstance(other, JujuVersion):
+ raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+ return (
+ self.major == other.major
+ and self.minor == other.minor
+ and self.tag == other.tag
+ and self.build == other.build
+ and self.patch == other.patch)
+
+ def __lt__(self, other):
+ if self is other:
+ return False
+ if isinstance(other, str):
+ other = type(self)(other)
+ elif not isinstance(other, JujuVersion):
+ raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+
+ if self.major != other.major:
+ return self.major < other.major
+ elif self.minor != other.minor:
+ return self.minor < other.minor
+ elif self.tag != other.tag:
+ if not self.tag:
+ return False
+ elif not other.tag:
+ return True
+ return self.tag < other.tag
+ elif self.patch != other.patch:
+ return self.patch < other.patch
+ elif self.build != other.build:
+ return self.build < other.build
+ return False
+
+ @classmethod
+ def from_environ(cls) -> 'JujuVersion':
+ """Build a JujuVersion from JUJU_VERSION."""
+ v = os.environ.get('JUJU_VERSION')
+ if v is None:
+ v = '0.0.0'
+ return cls(v)
+
+ def has_app_data(self) -> bool:
+ """Determine whether this juju version knows about app data."""
+ return (self.major, self.minor, self.patch) >= (2, 7, 0)
+
+ def is_dispatch_aware(self) -> bool:
+ """Determine whether this juju version knows about dispatch."""
+ return (self.major, self.minor, self.patch) >= (2, 8, 0)
+
+ def has_controller_storage(self) -> bool:
+ """Determine whether this juju version supports controller-side storage."""
+ return (self.major, self.minor, self.patch) >= (2, 8, 0)
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/lib/__init__.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/lib/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..22b5a84e517df8a061b7ca2742678536a481b616
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/lib/__init__.py
@@ -0,0 +1,264 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Infrastructure for the opslib functionality."""
+
+import logging
+import os
+import re
+import sys
+
+from ast import literal_eval
+from importlib.util import module_from_spec
+from importlib.machinery import ModuleSpec
+from pkgutil import get_importer
+from types import ModuleType
+from typing import List
+
+__all__ = ('use', 'autoimport')
+
+logger = logging.getLogger(__name__)
+
+_libraries = None
+
+_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''')
+_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''')
+
+# Not perfect, but should do for now.
+_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''')
+
+
+def use(name: str, api: int, author: str) -> ModuleType:
+ """Use a library from the ops libraries.
+
+ Args:
+ name: the name of the library requested.
+ api: the API version of the library.
+ author: the author of the library. If not given, requests the
+ one in the standard library.
+
+ Raises:
+ ImportError: if the library cannot be found.
+ TypeError: if the name, api, or author are the wrong type.
+ ValueError: if the name, api, or author are invalid.
+ """
+ if not isinstance(name, str):
+ raise TypeError("invalid library name: {!r} (must be a str)".format(name))
+ if not isinstance(author, str):
+ raise TypeError("invalid library author: {!r} (must be a str)".format(author))
+ if not isinstance(api, int):
+ raise TypeError("invalid library API: {!r} (must be an int)".format(api))
+ if api < 0:
+ raise ValueError('invalid library api: {} (must be ≥0)'.format(api))
+ if not _libname_re.match(name):
+ raise ValueError("invalid library name: {!r} (chars and digits only)".format(name))
+ if not _libauthor_re.match(author):
+ raise ValueError("invalid library author email: {!r}".format(author))
+
+ if _libraries is None:
+ autoimport()
+
+ versions = _libraries.get((name, author), ())
+ for lib in versions:
+ if lib.api == api:
+ return lib.import_module()
+
+ others = ', '.join(str(lib.api) for lib in versions)
+ if others:
+ msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format(
+ name, author, api, others)
+ else:
+ msg = 'cannot find library "{}" from "{}"'.format(name, author)
+
+ raise ImportError(msg, name=name)
+
+
+def autoimport():
+ """Find all libs in the path and enable use of them.
+
+ You only need to call this if you've installed a package or
+ otherwise changed sys.path in the current run, and need to see the
+ changes. Otherwise libraries are found on first call of `use`.
+ """
+ global _libraries
+ _libraries = {}
+ for spec in _find_all_specs(sys.path):
+ lib = _parse_lib(spec)
+ if lib is None:
+ continue
+
+ versions = _libraries.setdefault((lib.name, lib.author), [])
+ versions.append(lib)
+ versions.sort(reverse=True)
+
+
+def _find_all_specs(path):
+ for sys_dir in path:
+ if sys_dir == "":
+ sys_dir = "."
+ try:
+ top_dirs = os.listdir(sys_dir)
+ except (FileNotFoundError, NotADirectoryError):
+ continue
+ except OSError as e:
+ logger.debug("Tried to look for ops.lib packages under '%s': %s", sys_dir, e)
+ continue
+ logger.debug("Looking for ops.lib packages under '%s'", sys_dir)
+ for top_dir in top_dirs:
+ opslib = os.path.join(sys_dir, top_dir, 'opslib')
+ try:
+ lib_dirs = os.listdir(opslib)
+ except (FileNotFoundError, NotADirectoryError):
+ continue
+ except OSError as e:
+ logger.debug(" Tried '%s': %s", opslib, e) # *lots* of things checked here
+ continue
+ else:
+ logger.debug(" Trying '%s'", opslib)
+ finder = get_importer(opslib)
+ if finder is None:
+ logger.debug(" Finder for '%s' is None", opslib)
+ continue
+ if not hasattr(finder, 'find_spec'):
+ logger.debug(" Finder for '%s' has no find_spec", opslib)
+ continue
+ for lib_dir in lib_dirs:
+ spec_name = "{}.opslib.{}".format(top_dir, lib_dir)
+ spec = finder.find_spec(spec_name)
+ if spec is None:
+ logger.debug(" No spec for %r", spec_name)
+ continue
+ if spec.loader is None:
+ # a namespace package; not supported
+ logger.debug(" No loader for %r (probably a namespace package)", spec_name)
+ continue
+
+ logger.debug(" Found %r", spec_name)
+ yield spec
+
+
+# only the first this many lines of a file are looked at for the LIB* constants
+_MAX_LIB_LINES = 99
+# these keys, with these types, are needed to have an opslib
+_NEEDED_KEYS = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int}
+
+
+def _join_and(keys: List[str]) -> str:
+ if len(keys) == 0:
+ return ""
+ if len(keys) == 1:
+ return keys[0]
+ return ", ".join(keys[:-1]) + ", and " + keys[-1]
+
+
+class _Missing:
+ """Helper to get the difference between what was found and what was needed when logging."""
+
+ def __init__(self, found):
+ self._found = found
+
+ def __str__(self):
+ exp = set(_NEEDED_KEYS)
+ got = set(self._found)
+ if len(got) == 0:
+ return "missing {}".format(_join_and(sorted(exp)))
+ return "got {}, but missing {}".format(
+ _join_and(sorted(got)),
+ _join_and(sorted(exp - got)))
+
+
+def _parse_lib(spec):
+ if spec.origin is None:
+ # "can't happen"
+ logger.warning("No origin for %r (no idea why; please report)", spec.name)
+ return None
+
+ logger.debug(" Parsing %r", spec.name)
+
+ try:
+ with open(spec.origin, 'rt', encoding='utf-8') as f:
+ libinfo = {}
+ for n, line in enumerate(f):
+ if len(libinfo) == len(_NEEDED_KEYS):
+ break
+ if n > _MAX_LIB_LINES:
+ logger.debug(
+ " Missing opslib metadata after reading to line %d: %s",
+ _MAX_LIB_LINES, _Missing(libinfo))
+ return None
+ m = _libline_re.match(line)
+ if m is None:
+ continue
+ key, value = m.groups()
+ if key in _NEEDED_KEYS:
+ value = literal_eval(value)
+ if not isinstance(value, _NEEDED_KEYS[key]):
+ logger.debug(
+ " Bad type for %s: expected %s, got %s",
+ key, _NEEDED_KEYS[key].__name__, type(value).__name__)
+ return None
+ libinfo[key] = value
+ else:
+ if len(libinfo) != len(_NEEDED_KEYS):
+ logger.debug(
+ " Missing opslib metadata after reading to end of file: %s",
+ _Missing(libinfo))
+ return None
+ except Exception as e:
+ logger.debug(" Failed: %s", e)
+ return None
+
+ lib = _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH'])
+ logger.debug(" Success: found library %s", lib)
+
+ return lib
+
+
+class _Lib:
+
+ def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int):
+ self.spec = spec
+ self.name = name
+ self.author = author
+ self.api = api
+ self.patch = patch
+
+ self._module = None
+
+ def __repr__(self):
+ return "<_Lib {}>".format(self)
+
+ def __str__(self):
+ return "{0.name} by {0.author}, API {0.api}, patch {0.patch}".format(self)
+
+ def import_module(self) -> ModuleType:
+ if self._module is None:
+ module = module_from_spec(self.spec)
+ self.spec.loader.exec_module(module)
+ self._module = module
+ return self._module
+
+ def __eq__(self, other):
+ if not isinstance(other, _Lib):
+ return NotImplemented
+ a = (self.name, self.author, self.api, self.patch)
+ b = (other.name, other.author, other.api, other.patch)
+ return a == b
+
+ def __lt__(self, other):
+ if not isinstance(other, _Lib):
+ return NotImplemented
+ a = (self.name, self.author, self.api, self.patch)
+ b = (other.name, other.author, other.api, other.patch)
+ return a < b
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/log.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/log.py
new file mode 100644
index 0000000000000000000000000000000000000000..b47013dd597c5500ea763d9e4beada10e6f2ca87
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/log.py
@@ -0,0 +1,58 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Interface to emit messages to the Juju logging system."""
+
+import sys
+import logging
+
+
+class JujuLogHandler(logging.Handler):
+ """A handler for sending logs to Juju via juju-log."""
+
+ def __init__(self, model_backend, level=logging.DEBUG):
+ super().__init__(level)
+ self.model_backend = model_backend
+
+ def emit(self, record):
+ """Send the specified logging record to the Juju backend.
+
+ This method is not used directly by the Operator Framework code, but by
+ :class:`logging.Handler` itself as part of the logging machinery.
+ """
+ self.model_backend.juju_log(record.levelname, self.format(record))
+
+
+def setup_root_logging(model_backend, debug=False):
+ """Setup python logging to forward messages to juju-log.
+
+ By default, logging is set to DEBUG level, and messages will be filtered by Juju.
+ Charmers can also set their own default log level with::
+
+ logging.getLogger().setLevel(logging.INFO)
+
+ model_backend -- a ModelBackend to use for juju-log
+ debug -- if True, write logs to stderr as well as to juju-log.
+ """
+ logger = logging.getLogger()
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(JujuLogHandler(model_backend))
+ if debug:
+ handler = logging.StreamHandler()
+ formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+ sys.excepthook = lambda etype, value, tb: logger.error(
+ "Uncaught exception while in charm code:", exc_info=(etype, value, tb))
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/main.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..f18f88ae0eff807e065e5df778a16cd83ca560eb
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/main.py
@@ -0,0 +1,406 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Main entry point to the Operator Framework."""
+
+import inspect
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import typing
+import warnings
+from pathlib import Path
+
+import yaml
+
+import ops.charm
+import ops.framework
+import ops.model
+import ops.storage
+
+from ops.log import setup_root_logging
+from ops.jujuversion import JujuVersion
+
+CHARM_STATE_FILE = '.unit-state.db'
+
+
+logger = logging.getLogger()
+
+
+def _exe_path(path: Path) -> typing.Optional[Path]:
+ """Find and return the full path to the given binary.
+
+ Here path is the absolute path to a binary, but might be missing an extension.
+ """
+ p = shutil.which(path.name, mode=os.F_OK, path=str(path.parent))
+ if p is None:
+ return None
+ return Path(p)
+
+
+def _get_charm_dir():
+ charm_dir = os.environ.get("JUJU_CHARM_DIR")
+ if charm_dir is None:
+ # Assume $JUJU_CHARM_DIR/lib/op/main.py structure.
+ charm_dir = Path('{}/../../..'.format(__file__)).resolve()
+ else:
+ charm_dir = Path(charm_dir).resolve()
+ return charm_dir
+
+
+def _create_event_link(charm, bound_event, link_to):
+ """Create a symlink for a particular event.
+
+ charm -- A charm object.
+ bound_event -- An event for which to create a symlink.
+ link_to -- What the event link should point to
+ """
+ if issubclass(bound_event.event_type, ops.charm.HookEvent):
+ event_dir = charm.framework.charm_dir / 'hooks'
+ event_path = event_dir / bound_event.event_kind.replace('_', '-')
+ elif issubclass(bound_event.event_type, ops.charm.ActionEvent):
+ if not bound_event.event_kind.endswith("_action"):
+ raise RuntimeError(
+ 'action event name {} needs _action suffix'.format(bound_event.event_kind))
+ event_dir = charm.framework.charm_dir / 'actions'
+ # The event_kind is suffixed with "_action" while the executable is not.
+ event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-')
+ else:
+ raise RuntimeError(
+ 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type))
+
+ event_dir.mkdir(exist_ok=True)
+ if not event_path.exists():
+ target_path = os.path.relpath(link_to, str(event_dir))
+
+ # Ignore the non-symlink files or directories
+ # assuming the charm author knows what they are doing.
+ logger.debug(
+ 'Creating a new relative symlink at %s pointing to %s',
+ event_path, target_path)
+ event_path.symlink_to(target_path)
+
+
+def _setup_event_links(charm_dir, charm):
+ """Set up links for supported events that originate from Juju.
+
+ Whether a charm can handle an event or not can be determined by
+ introspecting which events are defined on it.
+
+ Hooks or actions are created as symlinks to the charm code file
+ which is determined by inspecting symlinks provided by the charm
+ author at hooks/install or hooks/start.
+
+ charm_dir -- A root directory of the charm.
+ charm -- An instance of the Charm class.
+
+ """
+ # XXX: on windows this function does not accomplish what it wants to:
+ # it creates symlinks with no extension pointing to a .py
+ # and juju only knows how to handle .exe, .bat, .cmd, and .ps1
+ # so it does its job, but does not accomplish anything as the
+ # hooks aren't 'callable'.
+ link_to = os.path.realpath(os.environ.get("JUJU_DISPATCH_PATH", sys.argv[0]))
+ for bound_event in charm.on.events().values():
+ # Only events that originate from Juju need symlinks.
+ if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)):
+ _create_event_link(charm, bound_event, link_to)
+
+
+def _emit_charm_event(charm, event_name):
+ """Emits a charm event based on a Juju event name.
+
+ charm -- A charm instance to emit an event from.
+ event_name -- A Juju event name to emit on a charm.
+ """
+ event_to_emit = None
+ try:
+ event_to_emit = getattr(charm.on, event_name)
+ except AttributeError:
+ logger.debug("Event %s not defined for %s.", event_name, charm)
+
+ # If the event is not supported by the charm implementation, do
+ # not error out or try to emit it. This is to support rollbacks.
+ if event_to_emit is not None:
+ args, kwargs = _get_event_args(charm, event_to_emit)
+ logger.debug('Emitting Juju event %s.', event_name)
+ event_to_emit.emit(*args, **kwargs)
+
+
+def _get_event_args(charm, bound_event):
+ event_type = bound_event.event_type
+ model = charm.framework.model
+
+ if issubclass(event_type, ops.charm.RelationEvent):
+ relation_name = os.environ['JUJU_RELATION']
+ relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
+ relation = model.get_relation(relation_name, relation_id)
+ else:
+ relation = None
+
+ remote_app_name = os.environ.get('JUJU_REMOTE_APP', '')
+ remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '')
+ if remote_app_name or remote_unit_name:
+ if not remote_app_name:
+ if '/' not in remote_unit_name:
+ raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name))
+ remote_app_name = remote_unit_name.split('/')[0]
+ args = [relation, model.get_app(remote_app_name)]
+ if remote_unit_name:
+ args.append(model.get_unit(remote_unit_name))
+ return args, {}
+ elif relation:
+ return [relation], {}
+ return [], {}
+
+
+class _Dispatcher:
+ """Encapsulate how to figure out what event Juju wants us to run.
+
+ Also knows how to run “legacy” hooks when Juju called us via a top-level
+ ``dispatch`` binary.
+
+ Args:
+ charm_dir: the toplevel directory of the charm
+
+ Attributes:
+ event_name: the name of the event to run
+ is_dispatch_aware: are we running under a Juju that knows about the
+ dispatch binary, and is that binary present?
+
+ """
+
+ def __init__(self, charm_dir: Path):
+ self._charm_dir = charm_dir
+ self._exec_path = Path(os.environ.get('JUJU_DISPATCH_PATH', sys.argv[0]))
+
+ dispatch = charm_dir / 'dispatch'
+ if JujuVersion.from_environ().is_dispatch_aware() and _exe_path(dispatch) is not None:
+ self._init_dispatch()
+ else:
+ self._init_legacy()
+
+ def ensure_event_links(self, charm):
+ """Make sure necessary symlinks are present on disk."""
+ if self.is_dispatch_aware:
+ # links aren't needed
+ return
+
+ # When a charm is force-upgraded and a unit is in an error state Juju
+ # does not run upgrade-charm and instead runs the failed hook followed
+ # by config-changed. Given the nature of force-upgrading the hook setup
+ # code is not triggered on config-changed.
+ #
+ # 'start' event is included as Juju does not fire the install event for
+ # K8s charms (see LP: #1854635).
+ if (self.event_name in ('install', 'start', 'upgrade_charm')
+ or self.event_name.endswith('_storage_attached')):
+ _setup_event_links(self._charm_dir, charm)
+
+ def run_any_legacy_hook(self):
+ """Run any extant legacy hook.
+
+ If there is both a dispatch file and a legacy hook for the
+ current event, run the wanted legacy hook.
+ """
+ if not self.is_dispatch_aware:
+ # we *are* the legacy hook
+ return
+
+ dispatch_path = _exe_path(self._charm_dir / self._dispatch_path)
+ if dispatch_path is None:
+ logger.debug("Legacy %s does not exist.", self._dispatch_path)
+ return
+
+ # super strange that there isn't an is_executable
+ if not os.access(str(dispatch_path), os.X_OK):
+ logger.warning("Legacy %s exists but is not executable.", self._dispatch_path)
+ return
+
+ if dispatch_path.resolve() == Path(sys.argv[0]).resolve():
+ logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path)
+ return
+
+ argv = sys.argv.copy()
+ argv[0] = str(dispatch_path)
+ logger.info("Running legacy %s.", self._dispatch_path)
+ try:
+ subprocess.run(argv, check=True)
+ except subprocess.CalledProcessError as e:
+ logger.warning("Legacy %s exited with status %d.", self._dispatch_path, e.returncode)
+ sys.exit(e.returncode)
+ except OSError as e:
+ logger.warning("Unable to run legacy %s: %s", self._dispatch_path, e)
+ sys.exit(1)
+ else:
+ logger.debug("Legacy %s exited with status 0.", self._dispatch_path)
+
+ def _set_name_from_path(self, path: Path):
+ """Sets the name attribute to that which can be inferred from the given path."""
+ name = path.name.replace('-', '_')
+ if path.parent.name == 'actions':
+ name = '{}_action'.format(name)
+ self.event_name = name
+
+ def _init_legacy(self):
+ """Set up the 'legacy' dispatcher.
+
+ The current Juju doesn't know about 'dispatch' and calls hooks
+ explicitly.
+ """
+ self.is_dispatch_aware = False
+ self._set_name_from_path(self._exec_path)
+
+ def _init_dispatch(self):
+ """Set up the new 'dispatch' dispatcher.
+
+ The current Juju will run 'dispatch' if it exists, and otherwise fall
+ back to the old behaviour.
+
+ JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install,
+ in both cases.
+ """
+ self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH'])
+
+ if 'OPERATOR_DISPATCH' in os.environ:
+ logger.debug("Charm called itself via %s.", self._dispatch_path)
+ sys.exit(0)
+ os.environ['OPERATOR_DISPATCH'] = '1'
+
+ self.is_dispatch_aware = True
+ self._set_name_from_path(self._dispatch_path)
+
+ def is_restricted_context(self):
+ """Return True if we are running in a restricted Juju context.
+
+ When in a restricted context, most commands (relation-get, config-get,
+ state-get) are not available. As such, we change how we interact with
+ Juju.
+ """
+ return self.event_name in ('collect_metrics',)
+
+
+def _should_use_controller_storage(db_path: Path, meta: ops.charm.CharmMeta) -> bool:
+ """Figure out whether we want to use controller storage or not."""
+ # if you've previously used local state, carry on using that
+ if db_path.exists():
+ logger.debug("Using local storage: %s already exists", db_path)
+ return False
+
+ # if you're not in k8s you don't need controller storage
+ if 'kubernetes' not in meta.series:
+ logger.debug("Using local storage: not a kubernetes charm")
+ return False
+
+ # are we in a new enough Juju?
+ cur_version = JujuVersion.from_environ()
+
+ if cur_version.has_controller_storage():
+ logger.debug("Using controller storage: JUJU_VERSION=%s", cur_version)
+ return True
+ else:
+ logger.debug("Using local storage: JUJU_VERSION=%s", cur_version)
+ return False
+
+
+def main(charm_class: ops.charm.CharmBase, use_juju_for_storage: bool = None):
+ """Setup the charm and dispatch the observed event.
+
+ The event name is based on the way this executable was called (argv[0]).
+
+ Args:
+ charm_class: your charm class.
+ use_juju_for_storage: whether to use controller-side storage. If not specified
+ then kubernetes charms that haven't previously used local storage and that
+ are running on a new enough Juju default to controller-side storage,
+ otherwise local storage is used.
+ """
+ charm_dir = _get_charm_dir()
+
+ model_backend = ops.model._ModelBackend()
+ debug = ('JUJU_DEBUG' in os.environ)
+ setup_root_logging(model_backend, debug=debug)
+ logger.debug("Operator Framework %s up and running.", ops.__version__)
+
+ dispatcher = _Dispatcher(charm_dir)
+ dispatcher.run_any_legacy_hook()
+
+ metadata = (charm_dir / 'metadata.yaml').read_text()
+ actions_meta = charm_dir / 'actions.yaml'
+ if actions_meta.exists():
+ actions_metadata = actions_meta.read_text()
+ else:
+ actions_metadata = None
+
+ if not yaml.__with_libyaml__:
+ logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader')
+ meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata)
+ model = ops.model.Model(meta, model_backend)
+
+ charm_state_path = charm_dir / CHARM_STATE_FILE
+
+ if use_juju_for_storage and not ops.storage.juju_backend_available():
+ # raise an exception; the charm is broken and needs fixing.
+ msg = 'charm set use_juju_for_storage=True, but Juju version {} does not support it'
+ raise RuntimeError(msg.format(JujuVersion.from_environ()))
+
+ if use_juju_for_storage is None:
+ use_juju_for_storage = _should_use_controller_storage(charm_state_path, meta)
+
+ if use_juju_for_storage:
+ if dispatcher.is_restricted_context():
+ # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event
+ # Though we eventually expect that juju will run collect-metrics in a
+ # non-restricted context. Once we can determine that we are running collect-metrics
+ # in a non-restricted context, we should fire the event as normal.
+ logger.debug('"%s" is not supported when using Juju for storage\n'
+ 'see: https://github.com/canonical/operator/issues/348',
+ dispatcher.event_name)
+ # Note that we don't exit nonzero, because that would cause Juju to rerun the hook
+ return
+ store = ops.storage.JujuStorage()
+ else:
+ store = ops.storage.SQLiteStorage(charm_state_path)
+ framework = ops.framework.Framework(store, charm_dir, meta, model)
+ framework.set_breakpointhook()
+ try:
+ sig = inspect.signature(charm_class)
+ try:
+ sig.bind(framework)
+ except TypeError:
+ msg = (
+ "the second argument, 'key', has been deprecated and will be "
+ "removed after the 0.7 release")
+ warnings.warn(msg, DeprecationWarning)
+ charm = charm_class(framework, None)
+ else:
+ charm = charm_class(framework)
+ dispatcher.ensure_event_links(charm)
+
+ # TODO: Remove the collect_metrics check below as soon as the relevant
+ # Juju changes are made. Also adjust the docstring on
+ # EventBase.defer().
+ #
+ # Skip reemission of deferred events for collect-metrics events because
+ # they do not have the full access to all hook tools.
+ if not dispatcher.is_restricted_context():
+ framework.reemit()
+
+ _emit_charm_event(charm, dispatcher.event_name)
+
+ framework.commit()
+ finally:
+ framework.close()
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/model.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..d446d63647807db570192c690a6d245244b8f19d
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/model.py
@@ -0,0 +1,1314 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Representations of Juju's model, application, unit, and other entities."""
+
+import datetime
+import decimal
+import ipaddress
+import json
+import os
+import re
+import shutil
+import tempfile
+import time
+import typing
+import weakref
+
+from abc import ABC, abstractmethod
+from collections.abc import Mapping, MutableMapping
+from pathlib import Path
+from subprocess import run, PIPE, CalledProcessError
+import yaml
+
+import ops
+from ops.jujuversion import JujuVersion
+
+
+if yaml.__with_libyaml__:
+ _DefaultDumper = yaml.CSafeDumper
+else:
+ _DefaultDumper = yaml.SafeDumper
+
+
+class Model:
+ """Represents the Juju Model as seen from this unit.
+
+ This should not be instantiated directly by Charmers, but can be accessed as `self.model`
+ from any class that derives from Object.
+ """
+
+ def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'):
+ self._cache = _ModelCache(backend)
+ self._backend = backend
+ self._unit = self.get_unit(self._backend.unit_name)
+ self._relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache)
+ self._config = ConfigData(self._backend)
+ self._resources = Resources(list(meta.resources), self._backend)
+ self._pod = Pod(self._backend)
+ self._storages = StorageMapping(list(meta.storages), self._backend)
+ self._bindings = BindingMapping(self._backend)
+
+ @property
+ def unit(self) -> 'Unit':
+ """A :class:`Unit` that represents the unit that is running this code (eg yourself)."""
+ return self._unit
+
+ @property
+ def app(self):
+ """A :class:`Application` that represents the application this unit is a part of."""
+ return self._unit.app
+
+ @property
+ def relations(self) -> 'RelationMapping':
+ """Mapping of endpoint to list of :class:`Relation`.
+
+ Answers the question "what am I currently related to".
+ See also :meth:`.get_relation`.
+ """
+ return self._relations
+
+ @property
+ def config(self) -> 'ConfigData':
+ """Return a mapping of config for the current application."""
+ return self._config
+
+ @property
+ def resources(self) -> 'Resources':
+ """Access to resources for this charm.
+
+ Use ``model.resources.fetch(resource_name)`` to get the path on disk
+ where the resource can be found.
+ """
+ return self._resources
+
+ @property
+ def storages(self) -> 'StorageMapping':
+ """Mapping of storage_name to :class:`Storage` as defined in metadata.yaml."""
+ return self._storages
+
+ @property
+ def pod(self) -> 'Pod':
+ """Use ``model.pod.set_spec`` to set the container specification for Kubernetes charms."""
+ return self._pod
+
+ @property
+ def name(self) -> str:
+ """Return the name of the Model that this unit is running in.
+
+ This is read from the environment variable ``JUJU_MODEL_NAME``.
+ """
+ return self._backend.model_name
+
+ def get_unit(self, unit_name: str) -> 'Unit':
+ """Get an arbitrary unit by name.
+
+ Internally this uses a cache, so asking for the same unit two times will
+ return the same object.
+ """
+ return self._cache.get(Unit, unit_name)
+
+ def get_app(self, app_name: str) -> 'Application':
+ """Get an application by name.
+
+ Internally this uses a cache, so asking for the same application two times will
+ return the same object.
+ """
+ return self._cache.get(Application, app_name)
+
+ def get_relation(
+ self, relation_name: str,
+ relation_id: typing.Optional[int] = None) -> 'Relation':
+ """Get a specific Relation instance.
+
+ If relation_id is not given, this will return the Relation instance if the
+ relation is established only once or None if it is not established. If this
+ same relation is established multiple times the error TooManyRelatedAppsError is raised.
+
+ Args:
+ relation_name: The name of the endpoint for this charm
+ relation_id: An identifier for a specific relation. Used to disambiguate when a
+ given application has more than one relation on a given endpoint.
+
+ Raises:
+ TooManyRelatedAppsError: is raised if there is more than one relation to the
+ supplied relation_name and no relation_id was supplied
+ """
+ return self.relations._get_unique(relation_name, relation_id)
+
+ def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding':
+ """Get a network space binding.
+
+ Args:
+ binding_key: The relation name or instance to obtain bindings for.
+
+ Returns:
+ If ``binding_key`` is a relation name, the method returns the default binding
+ for that relation. If a relation instance is provided, the method first looks
+ up a more specific binding for that specific relation ID, and if none is found
+ falls back to the default binding for the relation name.
+ """
+ return self._bindings.get(binding_key)
+
+
+class _ModelCache:
+
+ def __init__(self, backend):
+ self._backend = backend
+ self._weakrefs = weakref.WeakValueDictionary()
+
+ def get(self, entity_type, *args):
+ key = (entity_type,) + args
+ entity = self._weakrefs.get(key)
+ if entity is None:
+ entity = entity_type(*args, backend=self._backend, cache=self)
+ self._weakrefs[key] = entity
+ return entity
+
+
+class Application:
+ """Represents a named application in the model.
+
+ This might be your application, or might be an application that you are related to.
+ Charmers should not instantiate Application objects directly, but should use
+ :meth:`Model.get_app` if they need a reference to a given application.
+
+ Attributes:
+ name: The name of this application (eg, 'mysql'). This name may differ from the name of
+ the charm, if the user has deployed it to a different name.
+ """
+
+ def __init__(self, name, backend, cache):
+ self.name = name
+ self._backend = backend
+ self._cache = cache
+ self._is_our_app = self.name == self._backend.app_name
+ self._status = None
+
+ def _invalidate(self):
+ self._status = None
+
+ @property
+ def status(self) -> 'StatusBase':
+ """Used to report or read the status of the overall application.
+
+ Can only be read and set by the lead unit of the application.
+
+ The status of remote units is always Unknown.
+
+ Raises:
+ RuntimeError: if you try to set the status of another application, or if you try to
+ set the status of this application as a unit that is not the leader.
+ InvalidStatusError: if you try to set the status to something that is not a
+ :class:`StatusBase`
+
+ Example::
+
+ self.model.app.status = BlockedStatus('I need a human to come help me')
+ """
+ if not self._is_our_app:
+ return UnknownStatus()
+
+ if not self._backend.is_leader():
+ raise RuntimeError('cannot get application status as a non-leader unit')
+
+ if self._status:
+ return self._status
+
+ s = self._backend.status_get(is_app=True)
+ self._status = StatusBase.from_name(s['status'], s['message'])
+ return self._status
+
+ @status.setter
+ def status(self, value: 'StatusBase'):
+ if not isinstance(value, StatusBase):
+ raise InvalidStatusError(
+ 'invalid value provided for application {} status: {}'.format(self, value)
+ )
+
+ if not self._is_our_app:
+ raise RuntimeError('cannot to set status for a remote application {}'.format(self))
+
+ if not self._backend.is_leader():
+ raise RuntimeError('cannot set application status as a non-leader unit')
+
+ self._backend.status_set(value.name, value.message, is_app=True)
+ self._status = value
+
+ def __repr__(self):
+ return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+
+class Unit:
+ """Represents a named unit in the model.
+
+ This might be your unit, another unit of your application, or a unit of another application
+ that you are related to.
+
+ Attributes:
+ name: The name of the unit (eg, 'mysql/0')
+ app: The Application the unit is a part of.
+ """
+
+ def __init__(self, name, backend, cache):
+ self.name = name
+
+ app_name = name.split('/')[0]
+ self.app = cache.get(Application, app_name)
+
+ self._backend = backend
+ self._cache = cache
+ self._is_our_unit = self.name == self._backend.unit_name
+ self._status = None
+
+ def _invalidate(self):
+ self._status = None
+
+ @property
+ def status(self) -> 'StatusBase':
+ """Used to report or read the status of a specific unit.
+
+ The status of any unit other than yourself is always Unknown.
+
+ Raises:
+ RuntimeError: if you try to set the status of a unit other than yourself.
+ InvalidStatusError: if you try to set the status to something other than
+ a :class:`StatusBase`
+ Example::
+
+ self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators')
+ """
+ if not self._is_our_unit:
+ return UnknownStatus()
+
+ if self._status:
+ return self._status
+
+ s = self._backend.status_get(is_app=False)
+ self._status = StatusBase.from_name(s['status'], s['message'])
+ return self._status
+
+ @status.setter
+ def status(self, value: 'StatusBase'):
+ if not isinstance(value, StatusBase):
+ raise InvalidStatusError(
+ 'invalid value provided for unit {} status: {}'.format(self, value)
+ )
+
+ if not self._is_our_unit:
+ raise RuntimeError('cannot set status for a remote unit {}'.format(self))
+
+ self._backend.status_set(value.name, value.message, is_app=False)
+ self._status = value
+
+ def __repr__(self):
+ return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+ def is_leader(self) -> bool:
+ """Return whether this unit is the leader of its application.
+
+ This can only be called for your own unit.
+
+ Returns:
+ True if you are the leader, False otherwise
+ Raises:
+ RuntimeError: if called for a unit that is not yourself
+ """
+ if self._is_our_unit:
+ # This value is not cached as it is not guaranteed to persist for the whole duration
+ # of a hook execution.
+ return self._backend.is_leader()
+ else:
+ raise RuntimeError(
+ 'leadership status of remote units ({}) is not visible to other'
+ ' applications'.format(self)
+ )
+
+ def set_workload_version(self, version: str) -> None:
+ """Record the version of the software running as the workload.
+
+ This shouldn't be confused with the revision of the charm. This is informative only;
+ shown in the output of 'juju status'.
+ """
+ if not isinstance(version, str):
+ raise TypeError("workload version must be a str, not {}: {!r}".format(
+ type(version).__name__, version))
+ self._backend.application_version_set(version)
+
+
+class LazyMapping(Mapping, ABC):
+ """Represents a dict that isn't populated until it is accessed.
+
+ Charm authors should generally never need to use this directly, but it forms
+ the basis for many of the dicts that the framework tracks.
+ """
+
+ _lazy_data = None
+
+ @abstractmethod
+ def _load(self):
+ raise NotImplementedError()
+
+ @property
+ def _data(self):
+ data = self._lazy_data
+ if data is None:
+ data = self._lazy_data = self._load()
+ return data
+
+ def _invalidate(self):
+ self._lazy_data = None
+
+ def __contains__(self, key):
+ return key in self._data
+
+ def __len__(self):
+ return len(self._data)
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __getitem__(self, key):
+ return self._data[key]
+
+ def __repr__(self):
+ return repr(self._data)
+
+
+class RelationMapping(Mapping):
+ """Map of relation names to lists of :class:`Relation` instances."""
+
+ def __init__(self, relations_meta, our_unit, backend, cache):
+ self._peers = set()
+ for name, relation_meta in relations_meta.items():
+ if relation_meta.role.is_peer():
+ self._peers.add(name)
+ self._our_unit = our_unit
+ self._backend = backend
+ self._cache = cache
+ self._data = {relation_name: None for relation_name in relations_meta}
+
+ def __contains__(self, key):
+ return key in self._data
+
+ def __len__(self):
+ return len(self._data)
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __getitem__(self, relation_name):
+ is_peer = relation_name in self._peers
+ relation_list = self._data[relation_name]
+ if relation_list is None:
+ relation_list = self._data[relation_name] = []
+ for rid in self._backend.relation_ids(relation_name):
+ relation = Relation(relation_name, rid, is_peer,
+ self._our_unit, self._backend, self._cache)
+ relation_list.append(relation)
+ return relation_list
+
+ def _invalidate(self, relation_name):
+ """Used to wipe the cache of a given relation_name.
+
+ Not meant to be used by Charm authors. The content of relation data is
+ static for the lifetime of a hook, so it is safe to cache in memory once
+ accessed.
+ """
+ self._data[relation_name] = None
+
+ def _get_unique(self, relation_name, relation_id=None):
+ if relation_id is not None:
+ if not isinstance(relation_id, int):
+ raise ModelError('relation id {} must be int or None not {}'.format(
+ relation_id,
+ type(relation_id).__name__))
+ for relation in self[relation_name]:
+ if relation.id == relation_id:
+ return relation
+ else:
+ # The relation may be dead, but it is not forgotten.
+ is_peer = relation_name in self._peers
+ return Relation(relation_name, relation_id, is_peer,
+ self._our_unit, self._backend, self._cache)
+ num_related = len(self[relation_name])
+ if num_related == 0:
+ return None
+ elif num_related == 1:
+ return self[relation_name][0]
+ else:
+ # TODO: We need something in the framework to catch and gracefully handle
+ # errors, ideally integrating the error catching with Juju's mechanisms.
+ raise TooManyRelatedAppsError(relation_name, num_related, 1)
+
+
+class BindingMapping:
+ """Mapping of endpoints to network bindings.
+
+ Charm authors should not instantiate this directly, but access it via
+ :meth:`Model.get_binding`
+ """
+
+ def __init__(self, backend):
+ self._backend = backend
+ self._data = {}
+
+ def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding':
+ """Get a specific Binding for an endpoint/relation.
+
+ Not used directly by Charm authors. See :meth:`Model.get_binding`
+ """
+ if isinstance(binding_key, Relation):
+ binding_name = binding_key.name
+ relation_id = binding_key.id
+ elif isinstance(binding_key, str):
+ binding_name = binding_key
+ relation_id = None
+ else:
+ raise ModelError('binding key must be str or relation instance, not {}'
+ ''.format(type(binding_key).__name__))
+ binding = self._data.get(binding_key)
+ if binding is None:
+ binding = Binding(binding_name, relation_id, self._backend)
+ self._data[binding_key] = binding
+ return binding
+
+
+class Binding:
+ """Binding to a network space.
+
+ Attributes:
+ name: The name of the endpoint this binding represents (eg, 'db')
+ """
+
+ def __init__(self, name, relation_id, backend):
+ self.name = name
+ self._relation_id = relation_id
+ self._backend = backend
+ self._network = None
+
+ @property
+ def network(self) -> 'Network':
+ """The network information for this binding."""
+ if self._network is None:
+ try:
+ self._network = Network(self._backend.network_get(self.name, self._relation_id))
+ except RelationNotFoundError:
+ if self._relation_id is None:
+ raise
+ # If a relation is dead, we can still get network info associated with an
+ # endpoint itself
+ self._network = Network(self._backend.network_get(self.name))
+ return self._network
+
+
+class Network:
+ """Network space details.
+
+ Charm authors should not instantiate this directly, but should get access to the Network
+ definition from :meth:`Model.get_binding` and its ``network`` attribute.
+
+ Attributes:
+ interfaces: A list of :class:`NetworkInterface` details. This includes the
+ information about how your application should be configured (eg, what
+ IP addresses should you bind to.)
+ Note that multiple addresses for a single interface are represented as multiple
+ interfaces. (eg, ``[NetworkInfo('ens1', '10.1.1.1/32'),
+ NetworkInfo('ens1', '10.1.2.1/32'])``)
+ ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP
+ addresses that other units should use to get in touch with you.
+ egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that
+ other units will see you connecting from. Due to things like NAT it isn't always
+ possible to narrow it down to a single address, but when it is clear, the CIDRs
+ will be constrained to a single address. (eg, 10.0.0.1/32)
+ Args:
+ network_info: A dict of network information as returned by ``network-get``.
+ """
+
+ def __init__(self, network_info: dict):
+ self.interfaces = []
+ # Treat multiple addresses on an interface as multiple logical
+ # interfaces with the same name.
+ for interface_info in network_info.get('bind-addresses', []):
+ interface_name = interface_info.get('interface-name')
+ for address_info in interface_info.get('addresses', []):
+ self.interfaces.append(NetworkInterface(interface_name, address_info))
+ self.ingress_addresses = []
+ for address in network_info.get('ingress-addresses', []):
+ self.ingress_addresses.append(ipaddress.ip_address(address))
+ self.egress_subnets = []
+ for subnet in network_info.get('egress-subnets', []):
+ self.egress_subnets.append(ipaddress.ip_network(subnet))
+
+ @property
+ def bind_address(self):
+ """A single address that your application should bind() to.
+
+ For the common case where there is a single answer. This represents a single
+ address from :attr:`.interfaces` that can be used to configure where your
+ application should bind() and listen().
+ """
+ if self.interfaces:
+ return self.interfaces[0].address
+ else:
+ return None
+
+ @property
+ def ingress_address(self):
+ """The address other applications should use to connect to your unit.
+
+ Due to things like public/private addresses, NAT and tunneling, the address you bind()
+ to is not always the address other people can use to connect() to you.
+ This is just the first address from :attr:`.ingress_addresses`.
+ """
+ if self.ingress_addresses:
+ return self.ingress_addresses[0]
+ else:
+ return None
+
+
+class NetworkInterface:
+ """Represents a single network interface that the charm needs to know about.
+
+ Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding`
+ to get the network information for a given endpoint.
+
+ Attributes:
+ name: The name of the interface (eg. 'eth0', or 'ens1')
+ subnet: An :class:`ipaddress.ip_network` representation of the IP for the network
+ interface. This may be a single address (eg '10.0.1.2/32')
+ """
+
+ def __init__(self, name: str, address_info: dict):
+ self.name = name
+ # TODO: expose a hardware address here, see LP: #1864070.
+ address = address_info.get('value')
+ # The value field may be empty.
+ if address:
+ self.address = ipaddress.ip_address(address)
+ else:
+ self.address = None
+ cidr = address_info.get('cidr')
+ # The cidr field may be empty, see LP: #1864102.
+ if cidr:
+ self.subnet = ipaddress.ip_network(cidr)
+ elif address:
+ # If we have an address, convert it to a /32 or /128 IP network.
+ self.subnet = ipaddress.ip_network(address)
+ else:
+ self.subnet = None
+ # TODO: expose a hostname/canonical name for the address here, see LP: #1864086.
+
+
+class Relation:
+ """Represents an established relation between this application and another application.
+
+ This class should not be instantiated directly, instead use :meth:`Model.get_relation`
+ or :attr:`ops.charm.RelationEvent.relation`.
+
+ Attributes:
+ name: The name of the local endpoint of the relation (eg 'db')
+ id: The identifier for a particular relation (integer)
+ app: An :class:`Application` representing the remote application of this relation.
+ For peer relations this will be the local application.
+ units: A set of :class:`Unit` for units that have started and joined this relation.
+ data: A :class:`RelationData` holding the data buckets for each entity
+ of a relation. Accessed via eg Relation.data[unit]['foo']
+ """
+
+ def __init__(
+ self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit,
+ backend: '_ModelBackend', cache: '_ModelCache'):
+ self.name = relation_name
+ self.id = relation_id
+ self.app = None
+ self.units = set()
+
+ # For peer relations, both the remote and the local app are the same.
+ if is_peer:
+ self.app = our_unit.app
+ try:
+ for unit_name in backend.relation_list(self.id):
+ unit = cache.get(Unit, unit_name)
+ self.units.add(unit)
+ if self.app is None:
+ self.app = unit.app
+ except RelationNotFoundError:
+ # If the relation is dead, just treat it as if it has no remote units.
+ pass
+ self.data = RelationData(self, our_unit, backend)
+
+ def __repr__(self):
+ return '<{}.{} {}:{}>'.format(type(self).__module__,
+ type(self).__name__,
+ self.name,
+ self.id)
+
+
+class RelationData(Mapping):
+ """Represents the various data buckets of a given relation.
+
+ Each unit and application involved in a relation has their own data bucket.
+ Eg: ``{entity: RelationDataContent}``
+ where entity can be either a :class:`Unit` or a :class:`Application`.
+
+ Units can read and write their own data, and if they are the leader,
+ they can read and write their application data. They are allowed to read
+ remote unit and application data.
+
+ This class should not be created directly. It should be accessed via
+ :attr:`Relation.data`
+ """
+
+ def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'):
+ self.relation = weakref.proxy(relation)
+ self._data = {
+ our_unit: RelationDataContent(self.relation, our_unit, backend),
+ our_unit.app: RelationDataContent(self.relation, our_unit.app, backend),
+ }
+ self._data.update({
+ unit: RelationDataContent(self.relation, unit, backend)
+ for unit in self.relation.units})
+ # The relation might be dead so avoid a None key here.
+ if self.relation.app is not None:
+ self._data.update({
+ self.relation.app: RelationDataContent(self.relation, self.relation.app, backend),
+ })
+
+ def __contains__(self, key):
+ return key in self._data
+
+ def __len__(self):
+ return len(self._data)
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __getitem__(self, key):
+ return self._data[key]
+
+ def __repr__(self):
+ return repr(self._data)
+
+
+# We mix in MutableMapping here to get some convenience implementations, but whether it's actually
+# mutable or not is controlled by the flag.
+class RelationDataContent(LazyMapping, MutableMapping):
+ """Data content of a unit or application in a relation."""
+
+ def __init__(self, relation, entity, backend):
+ self.relation = relation
+ self._entity = entity
+ self._backend = backend
+ self._is_app = isinstance(entity, Application)
+
+ def _load(self):
+ """Load the data from the current entity / relation."""
+ try:
+ return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app)
+ except RelationNotFoundError:
+ # Dead relations tell no tales (and have no data).
+ return {}
+
+ def _is_mutable(self):
+ """Return if the data content can be modified."""
+ if self._is_app:
+ is_our_app = self._backend.app_name == self._entity.name
+ if not is_our_app:
+ return False
+ # Whether the application data bag is mutable or not depends on
+ # whether this unit is a leader or not, but this is not guaranteed
+ # to be always true during the same hook execution.
+ return self._backend.is_leader()
+ else:
+ is_our_unit = self._backend.unit_name == self._entity.name
+ if is_our_unit:
+ return True
+ return False
+
+ def __setitem__(self, key, value):
+ if not self._is_mutable():
+ raise RelationDataError('cannot set relation data for {}'.format(self._entity.name))
+ if not isinstance(value, str):
+ raise RelationDataError('relation data values must be strings')
+
+ self._backend.relation_set(self.relation.id, key, value, self._is_app)
+
+ # Don't load data unnecessarily if we're only updating.
+ if self._lazy_data is not None:
+ if value == '':
+ # Match the behavior of Juju, which is that setting the value to an
+ # empty string will remove the key entirely from the relation data.
+ self._data.pop(key, None)
+ else:
+ self._data[key] = value
+
+ def __delitem__(self, key):
+ # Match the behavior of Juju, which is that setting the value to an empty
+ # string will remove the key entirely from the relation data.
+ self.__setitem__(key, '')
+
+
+class ConfigData(LazyMapping):
+ """Configuration data.
+
+ This class should not be created directly. It should be accessed via :attr:`Model.config`.
+ """
+
+ def __init__(self, backend):
+ self._backend = backend
+
+ def _load(self):
+ return self._backend.config_get()
+
+
+class StatusBase:
+ """Status values specific to applications and units.
+
+ To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just
+ directly use the child class to indicate their status.
+ """
+
+ _statuses = {}
+ name = None
+
+ def __init__(self, message: str):
+ self.message = message
+
+ def __new__(cls, *args, **kwargs):
+ """Forbid the usage of StatusBase directly."""
+ if cls is StatusBase:
+ raise TypeError("cannot instantiate a base class")
+ return super().__new__(cls)
+
+ def __eq__(self, other):
+ if not isinstance(self, type(other)):
+ return False
+ return self.message == other.message
+
+ def __repr__(self):
+ return "{.__class__.__name__}({!r})".format(self, self.message)
+
+ @classmethod
+ def from_name(cls, name: str, message: str):
+ """Get the specific Status for the name (or UnknownStatus if not registered)."""
+ if name == 'unknown':
+ # unknown is special
+ return UnknownStatus()
+ else:
+ return cls._statuses[name](message)
+
+ @classmethod
+ def register(cls, child):
+ """Register a Status for the child's name."""
+ if child.name is None:
+ raise AttributeError('cannot register a Status which has no name')
+ cls._statuses[child.name] = child
+ return child
+
+
+@StatusBase.register
+class UnknownStatus(StatusBase):
+ """The unit status is unknown.
+
+ A unit-agent has finished calling install, config-changed and start, but the
+ charm has not called status-set yet.
+
+ """
+ name = 'unknown'
+
+ def __init__(self):
+ # Unknown status cannot be set and does not have a message associated with it.
+ super().__init__('')
+
+ def __repr__(self):
+ return "UnknownStatus()"
+
+
+@StatusBase.register
+class ActiveStatus(StatusBase):
+ """The unit is ready.
+
+ The unit believes it is correctly offering all the services it has been asked to offer.
+ """
+ name = 'active'
+
+ def __init__(self, message: str = ''):
+ super().__init__(message)
+
+
+@StatusBase.register
+class BlockedStatus(StatusBase):
+ """The unit requires manual intervention.
+
+ An operator has to manually intervene to unblock the unit and let it proceed.
+ """
+ name = 'blocked'
+
+
+@StatusBase.register
+class MaintenanceStatus(StatusBase):
+ """The unit is performing maintenance tasks.
+
+ The unit is not yet providing services, but is actively doing work in preparation
+ for providing those services. This is a "spinning" state, not an error state. It
+ reflects activity on the unit itself, not on peers or related units.
+
+ """
+ name = 'maintenance'
+
+
+@StatusBase.register
+class WaitingStatus(StatusBase):
+ """A unit is unable to progress.
+
+ The unit is unable to progress to an active state because an application to which
+ it is related is not running.
+
+ """
+ name = 'waiting'
+
+
+class Resources:
+ """Object representing resources for the charm."""
+
+ def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'):
+ self._backend = backend
+ self._paths = {name: None for name in names}
+
+ def fetch(self, name: str) -> Path:
+ """Fetch the resource from the controller or store.
+
+ If successfully fetched, this returns a Path object to where the resource is stored
+ on disk, otherwise it raises a ModelError.
+ """
+ if name not in self._paths:
+ raise RuntimeError('invalid resource name: {}'.format(name))
+ if self._paths[name] is None:
+ self._paths[name] = Path(self._backend.resource_get(name))
+ return self._paths[name]
+
+
+class Pod:
+ """Represents the definition of a pod spec in Kubernetes models.
+
+ Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`.
+ """
+
+ def __init__(self, backend: '_ModelBackend'):
+ self._backend = backend
+
+ def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None):
+ """Set the specification for pods that Juju should start in kubernetes.
+
+ See `juju help-tool pod-spec-set` for details of what should be passed.
+
+ Args:
+ spec: The mapping defining the pod specification
+ k8s_resources: Additional kubernetes specific specification.
+
+ Returns:
+ None
+ """
+ if not self._backend.is_leader():
+ raise ModelError('cannot set a pod spec as this unit is not a leader')
+ self._backend.pod_spec_set(spec, k8s_resources)
+
+
+class StorageMapping(Mapping):
+ """Map of storage names to lists of Storage instances."""
+
+ def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'):
+ self._backend = backend
+ self._storage_map = {storage_name: None for storage_name in storage_names}
+
+ def __contains__(self, key: str):
+ return key in self._storage_map
+
+ def __len__(self):
+ return len(self._storage_map)
+
+ def __iter__(self):
+ return iter(self._storage_map)
+
+ def __getitem__(self, storage_name: str) -> typing.List['Storage']:
+ storage_list = self._storage_map[storage_name]
+ if storage_list is None:
+ storage_list = self._storage_map[storage_name] = []
+ for storage_id in self._backend.storage_list(storage_name):
+ storage_list.append(Storage(storage_name, storage_id, self._backend))
+ return storage_list
+
+ def request(self, storage_name: str, count: int = 1):
+ """Requests new storage instances of a given name.
+
+ Uses storage-add tool to request additional storage. Juju will notify the unit
+ via -storage-attached events when it becomes available.
+ """
+ if storage_name not in self._storage_map:
+ raise ModelError(('cannot add storage {!r}:'
+ ' it is not present in the charm metadata').format(storage_name))
+ self._backend.storage_add(storage_name, count)
+
+
+class Storage:
+ """Represents a storage as defined in metadata.yaml.
+
+ Attributes:
+ name: Simple string name of the storage
+ id: The provider id for storage
+ """
+
+ def __init__(self, storage_name, storage_id, backend):
+ self.name = storage_name
+ self.id = storage_id
+ self._backend = backend
+ self._location = None
+
+ @property
+ def location(self):
+ """Return the location of the storage."""
+ if self._location is None:
+ raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location")
+ self._location = Path(raw)
+ return self._location
+
+
+class ModelError(Exception):
+ """Base class for exceptions raised when interacting with the Model."""
+ pass
+
+
+class TooManyRelatedAppsError(ModelError):
+ """Raised by :meth:`Model.get_relation` if there is more than one related application."""
+
+ def __init__(self, relation_name, num_related, max_supported):
+ super().__init__('Too many remote applications on {} ({} > {})'.format(
+ relation_name, num_related, max_supported))
+ self.relation_name = relation_name
+ self.num_related = num_related
+ self.max_supported = max_supported
+
+
+class RelationDataError(ModelError):
+ """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid.
+
+ This is raised if you're either trying to set a value to something that isn't a string,
+ or if you are trying to set a value in a bucket that you don't have access to. (eg,
+ another application/unit or setting your application data but you aren't the leader.)
+ """
+
+
+class RelationNotFoundError(ModelError):
+ """Backend error when querying juju for a given relation and that relation doesn't exist."""
+
+
+class InvalidStatusError(ModelError):
+ """Raised if trying to set an Application or Unit status to something invalid."""
+
+
+class _ModelBackend:
+ """Represents the connection between the Model representation and talking to Juju.
+
+ Charm authors should not directly interact with the ModelBackend, it is a private
+ implementation of Model.
+ """
+
+ LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30)
+
+ def __init__(self, unit_name=None, model_name=None):
+ if unit_name is None:
+ self.unit_name = os.environ['JUJU_UNIT_NAME']
+ else:
+ self.unit_name = unit_name
+ if model_name is None:
+ model_name = os.environ.get('JUJU_MODEL_NAME')
+ self.model_name = model_name
+ self.app_name = self.unit_name.split('/')[0]
+
+ self._is_leader = None
+ self._leader_check_time = None
+
+ def _run(self, *args, return_output=False, use_json=False):
+ kwargs = dict(stdout=PIPE, stderr=PIPE, check=True)
+ args = (shutil.which(args[0]),) + args[1:]
+ if use_json:
+ args += ('--format=json',)
+ try:
+ result = run(args, **kwargs)
+ except CalledProcessError as e:
+ raise ModelError(e.stderr)
+ if return_output:
+ if result.stdout is None:
+ return ''
+ else:
+ text = result.stdout.decode('utf8')
+ if use_json:
+ return json.loads(text)
+ else:
+ return text
+
+ def relation_ids(self, relation_name):
+ relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True)
+ return [int(relation_id.split(':')[-1]) for relation_id in relation_ids]
+
+ def relation_list(self, relation_id):
+ try:
+ return self._run('relation-list', '-r', str(relation_id),
+ return_output=True, use_json=True)
+ except ModelError as e:
+ if 'relation not found' in str(e):
+ raise RelationNotFoundError() from e
+ raise
+
+ def relation_get(self, relation_id, member_name, is_app):
+ if not isinstance(is_app, bool):
+ raise TypeError('is_app parameter to relation_get must be a boolean')
+
+ if is_app:
+ version = JujuVersion.from_environ()
+ if not version.has_app_data():
+ raise RuntimeError(
+ 'getting application data is not supported on Juju version {}'.format(version))
+
+ args = ['relation-get', '-r', str(relation_id), '-', member_name]
+ if is_app:
+ args.append('--app')
+
+ try:
+ return self._run(*args, return_output=True, use_json=True)
+ except ModelError as e:
+ if 'relation not found' in str(e):
+ raise RelationNotFoundError() from e
+ raise
+
+ def relation_set(self, relation_id, key, value, is_app):
+ if not isinstance(is_app, bool):
+ raise TypeError('is_app parameter to relation_set must be a boolean')
+
+ if is_app:
+ version = JujuVersion.from_environ()
+ if not version.has_app_data():
+ raise RuntimeError(
+ 'setting application data is not supported on Juju version {}'.format(version))
+
+ args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)]
+ if is_app:
+ args.append('--app')
+
+ try:
+ return self._run(*args)
+ except ModelError as e:
+ if 'relation not found' in str(e):
+ raise RelationNotFoundError() from e
+ raise
+
+ def config_get(self):
+ return self._run('config-get', return_output=True, use_json=True)
+
+ def is_leader(self):
+ """Obtain the current leadership status for the unit the charm code is executing on.
+
+ The value is cached for the duration of a lease which is 30s in Juju.
+ """
+ now = time.monotonic()
+ if self._leader_check_time is None:
+ check = True
+ else:
+ time_since_check = datetime.timedelta(seconds=now - self._leader_check_time)
+ check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None)
+ if check:
+ # Current time MUST be saved before running is-leader to ensure the cache
+ # is only used inside the window that is-leader itself asserts.
+ self._leader_check_time = now
+ self._is_leader = self._run('is-leader', return_output=True, use_json=True)
+
+ return self._is_leader
+
+ def resource_get(self, resource_name):
+ return self._run('resource-get', resource_name, return_output=True).strip()
+
+ def pod_spec_set(self, spec, k8s_resources):
+ tmpdir = Path(tempfile.mkdtemp('-pod-spec-set'))
+ try:
+ spec_path = tmpdir / 'spec.yaml'
+ with spec_path.open("wt", encoding="utf8") as f:
+ yaml.dump(spec, stream=f, Dumper=_DefaultDumper)
+ args = ['--file', str(spec_path)]
+ if k8s_resources:
+ k8s_res_path = tmpdir / 'k8s-resources.yaml'
+ with k8s_res_path.open("wt", encoding="utf8") as f:
+ yaml.dump(k8s_resources, stream=f, Dumper=_DefaultDumper)
+ args.extend(['--k8s-resources', str(k8s_res_path)])
+ self._run('pod-spec-set', *args)
+ finally:
+ shutil.rmtree(str(tmpdir))
+
+ def status_get(self, *, is_app=False):
+ """Get a status of a unit or an application.
+
+ Args:
+ is_app: A boolean indicating whether the status should be retrieved for a unit
+ or an application.
+ """
+ content = self._run(
+ 'status-get', '--include-data', '--application={}'.format(is_app),
+ use_json=True,
+ return_output=True)
+ # Unit status looks like (in YAML):
+ # message: 'load: 0.28 0.26 0.26'
+ # status: active
+ # status-data: {}
+ # Application status looks like (in YAML):
+ # application-status:
+ # message: 'load: 0.28 0.26 0.26'
+ # status: active
+ # status-data: {}
+ # units:
+ # uo/0:
+ # message: 'load: 0.28 0.26 0.26'
+ # status: active
+ # status-data: {}
+
+ if is_app:
+ return {'status': content['application-status']['status'],
+ 'message': content['application-status']['message']}
+ else:
+ return content
+
+ def status_set(self, status, message='', *, is_app=False):
+ """Set a status of a unit or an application.
+
+ Args:
+ status: The status to set.
+ message: The message to set in the status.
+ is_app: A boolean indicating whether the status should be set for a unit or an
+ application.
+ """
+ if not isinstance(is_app, bool):
+ raise TypeError('is_app parameter must be boolean')
+ return self._run('status-set', '--application={}'.format(is_app), status, message)
+
+ def storage_list(self, name):
+ return [int(s.split('/')[1]) for s in self._run('storage-list', name,
+ return_output=True, use_json=True)]
+
+ def storage_get(self, storage_name_id, attribute):
+ return self._run('storage-get', '-s', storage_name_id, attribute,
+ return_output=True, use_json=True)
+
+ def storage_add(self, name, count=1):
+ if not isinstance(count, int) or isinstance(count, bool):
+ raise TypeError('storage count must be integer, got: {} ({})'.format(count,
+ type(count)))
+ self._run('storage-add', '{}={}'.format(name, count))
+
+ def action_get(self):
+ return self._run('action-get', return_output=True, use_json=True)
+
+ def action_set(self, results):
+ self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()])
+
+ def action_log(self, message):
+ self._run('action-log', message)
+
+ def action_fail(self, message=''):
+ self._run('action-fail', message)
+
+ def application_version_set(self, version):
+ self._run('application-version-set', '--', version)
+
+ def juju_log(self, level, message):
+ self._run('juju-log', '--log-level', level, "--", message)
+
+ def network_get(self, binding_name, relation_id=None):
+ """Return network info provided by network-get for a given binding.
+
+ Args:
+ binding_name: A name of a binding (relation name or extra-binding name).
+ relation_id: An optional relation id to get network info for.
+ """
+ cmd = ['network-get', binding_name]
+ if relation_id is not None:
+ cmd.extend(['-r', str(relation_id)])
+ try:
+ return self._run(*cmd, return_output=True, use_json=True)
+ except ModelError as e:
+ if 'relation not found' in str(e):
+ raise RelationNotFoundError() from e
+ raise
+
+ def add_metrics(self, metrics, labels=None):
+ cmd = ['add-metric']
+
+ if labels:
+ label_args = []
+ for k, v in labels.items():
+ _ModelBackendValidator.validate_metric_label(k)
+ _ModelBackendValidator.validate_label_value(k, v)
+ label_args.append('{}={}'.format(k, v))
+ cmd.extend(['--labels', ','.join(label_args)])
+
+ metric_args = []
+ for k, v in metrics.items():
+ _ModelBackendValidator.validate_metric_key(k)
+ metric_value = _ModelBackendValidator.format_metric_value(v)
+ metric_args.append('{}={}'.format(k, metric_value))
+ cmd.extend(metric_args)
+ self._run(*cmd)
+
+
+class _ModelBackendValidator:
+ """Provides facilities for validating inputs and formatting them for model backends."""
+
+ METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$')
+
+ @classmethod
+ def validate_metric_key(cls, key):
+ if cls.METRIC_KEY_REGEX.match(key) is None:
+ raise ModelError(
+ 'invalid metric key {!r}: must match {}'.format(
+ key, cls.METRIC_KEY_REGEX.pattern))
+
+ @classmethod
+ def validate_metric_label(cls, label_name):
+ if cls.METRIC_KEY_REGEX.match(label_name) is None:
+ raise ModelError(
+ 'invalid metric label name {!r}: must match {}'.format(
+ label_name, cls.METRIC_KEY_REGEX.pattern))
+
+ @classmethod
+ def format_metric_value(cls, value):
+ try:
+ decimal_value = decimal.Decimal.from_float(value)
+ except TypeError as e:
+ e2 = ModelError('invalid metric value {!r} provided:'
+ ' must be a positive finite float'.format(value))
+ raise e2 from e
+ if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0:
+ raise ModelError('invalid metric value {!r} provided:'
+ ' must be a positive finite float'.format(value))
+ return str(decimal_value)
+
+ @classmethod
+ def validate_label_value(cls, label, value):
+ # Label values cannot be empty, contain commas or equal signs as those are
+ # used by add-metric as separators.
+ if not value:
+ raise ModelError(
+ 'metric label {} has an empty value, which is not allowed'.format(label))
+ v = str(value)
+ if re.search('[,=]', v) is not None:
+ raise ModelError(
+ 'metric label values must not contain "," or "=": {}={!r}'.format(label, value))
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/storage.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..562cde770bcc3b5961aa6086372f0a2529bbd317
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/storage.py
@@ -0,0 +1,374 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Structures to offer storage to the charm (through Juju or locally)."""
+
+from datetime import timedelta
+import pickle
+import shutil
+import subprocess
+import sqlite3
+import typing
+
+import yaml
+
+
+def _run(args, **kw):
+ cmd = shutil.which(args[0])
+ if cmd is None:
+ raise FileNotFoundError(args[0])
+ return subprocess.run([cmd, *args[1:]], **kw)
+
+
+class SQLiteStorage:
+ """Storage using SQLite backend."""
+
+ DB_LOCK_TIMEOUT = timedelta(hours=1)
+
+ def __init__(self, filename):
+ # The isolation_level argument is set to None such that the implicit
+ # transaction management behavior of the sqlite3 module is disabled.
+ self._db = sqlite3.connect(str(filename),
+ isolation_level=None,
+ timeout=self.DB_LOCK_TIMEOUT.total_seconds())
+ self._setup()
+
+ def _setup(self):
+ """Make the database ready to be used as storage."""
+ # Make sure that the database is locked until the connection is closed,
+ # not until the transaction ends.
+ self._db.execute("PRAGMA locking_mode=EXCLUSIVE")
+ c = self._db.execute("BEGIN")
+ c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'")
+ if c.fetchone()[0] == 0:
+ # Keep in mind what might happen if the process dies somewhere below.
+ # The system must not be rendered permanently broken by that.
+ self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)")
+ self._db.execute('''
+ CREATE TABLE notice (
+ sequence INTEGER PRIMARY KEY AUTOINCREMENT,
+ event_path TEXT,
+ observer_path TEXT,
+ method_name TEXT)
+ ''')
+ self._db.commit()
+
+ def close(self):
+ """Part of the Storage API, close the storage backend."""
+ self._db.close()
+
+ def commit(self):
+ """Part of the Storage API, commit latest changes in the storage backend."""
+ self._db.commit()
+
+ # There's commit but no rollback. For abort to be supported, we'll need logic that
+ # can rollback decisions made by third-party code in terms of the internal state
+ # of objects that have been snapshotted, and hooks to let them know about it and
+ # take the needed actions to undo their logic until the last snapshot.
+ # This is doable but will increase significantly the chances for mistakes.
+
+ def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None:
+ """Part of the Storage API, persist a snapshot data under the given handle.
+
+ Args:
+ handle_path: The string identifying the snapshot.
+ snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This
+ might be a dict/tuple/int, but must only contain 'simple' python types.
+ """
+ # Use pickle for serialization, so the value remains portable.
+ raw_data = pickle.dumps(snapshot_data)
+ self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data))
+
+ def load_snapshot(self, handle_path: str) -> typing.Any:
+ """Part of the Storage API, retrieve a snapshot that was previously saved.
+
+ Args:
+ handle_path: The string identifying the snapshot.
+
+ Raises:
+ NoSnapshotError: if there is no snapshot for the given handle_path.
+ """
+ c = self._db.cursor()
+ c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,))
+ row = c.fetchone()
+ if row:
+ return pickle.loads(row[0])
+ raise NoSnapshotError(handle_path)
+
+ def drop_snapshot(self, handle_path: str):
+ """Part of the Storage API, remove a snapshot that was previously saved.
+
+ Dropping a snapshot that doesn't exist is treated as a no-op.
+ """
+ self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,))
+
+ def list_snapshots(self) -> typing.Generator[str, None, None]:
+ """Return the name of all snapshots that are currently saved."""
+ c = self._db.cursor()
+ c.execute("SELECT handle FROM snapshot")
+ while True:
+ rows = c.fetchmany()
+ if not rows:
+ break
+ for row in rows:
+ yield row[0]
+
+ def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None:
+ """Part of the Storage API, record an notice (event and observer)."""
+ self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)',
+ (event_path, observer_path, method_name))
+
+ def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None:
+ """Part of the Storage API, remove a notice that was previously recorded."""
+ self._db.execute('''
+ DELETE FROM notice
+ WHERE event_path=?
+ AND observer_path=?
+ AND method_name=?
+ ''', (event_path, observer_path, method_name))
+
+ def notices(self, event_path: str = None) ->\
+ typing.Generator[typing.Tuple[str, str, str], None, None]:
+ """Part of the Storage API, return all notices that begin with event_path.
+
+ Args:
+ event_path: If supplied, will only yield events that match event_path. If not
+ supplied (or None/'') will return all events.
+
+ Returns:
+ Iterable of (event_path, observer_path, method_name) tuples
+ """
+ if event_path:
+ c = self._db.execute('''
+ SELECT event_path, observer_path, method_name
+ FROM notice
+ WHERE event_path=?
+ ORDER BY sequence
+ ''', (event_path,))
+ else:
+ c = self._db.execute('''
+ SELECT event_path, observer_path, method_name
+ FROM notice
+ ORDER BY sequence
+ ''')
+ while True:
+ rows = c.fetchmany()
+ if not rows:
+ break
+ for row in rows:
+ yield tuple(row)
+
+
+class JujuStorage:
+ """Storing the content tracked by the Framework in Juju.
+
+ This uses :class:`_JujuStorageBackend` to interact with state-get/state-set
+ as the way to store state for the framework and for components.
+ """
+
+ NOTICE_KEY = "#notices#"
+
+ def __init__(self, backend: '_JujuStorageBackend' = None):
+ self._backend = backend
+ if backend is None:
+ self._backend = _JujuStorageBackend()
+
+ def close(self):
+ """Part of the Storage API, close the storage backend.
+
+ Nothing to be done for Juju backend, as it's transactional.
+ """
+
+ def commit(self):
+ """Part of the Storage API, commit latest changes in the storage backend.
+
+ Nothing to be done for Juju backend, as it's transactional.
+ """
+
+ def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None:
+ """Part of the Storage API, persist a snapshot data under the given handle.
+
+ Args:
+ handle_path: The string identifying the snapshot.
+ snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This
+ might be a dict/tuple/int, but must only contain 'simple' python types.
+ """
+ self._backend.set(handle_path, snapshot_data)
+
+ def load_snapshot(self, handle_path):
+ """Part of the Storage API, retrieve a snapshot that was previously saved.
+
+ Args:
+ handle_path: The string identifying the snapshot.
+
+ Raises:
+ NoSnapshotError: if there is no snapshot for the given handle_path.
+ """
+ try:
+ content = self._backend.get(handle_path)
+ except KeyError:
+ raise NoSnapshotError(handle_path)
+ return content
+
+ def drop_snapshot(self, handle_path):
+ """Part of the Storage API, remove a snapshot that was previously saved.
+
+ Dropping a snapshot that doesn't exist is treated as a no-op.
+ """
+ self._backend.delete(handle_path)
+
+ def save_notice(self, event_path: str, observer_path: str, method_name: str):
+ """Part of the Storage API, record an notice (event and observer)."""
+ notice_list = self._load_notice_list()
+ notice_list.append([event_path, observer_path, method_name])
+ self._save_notice_list(notice_list)
+
+ def drop_notice(self, event_path: str, observer_path: str, method_name: str):
+ """Part of the Storage API, remove a notice that was previously recorded."""
+ notice_list = self._load_notice_list()
+ notice_list.remove([event_path, observer_path, method_name])
+ self._save_notice_list(notice_list)
+
+ def notices(self, event_path: str = None):
+ """Part of the Storage API, return all notices that begin with event_path.
+
+ Args:
+ event_path: If supplied, will only yield events that match event_path. If not
+ supplied (or None/'') will return all events.
+
+ Returns:
+ Iterable of (event_path, observer_path, method_name) tuples
+ """
+ notice_list = self._load_notice_list()
+ for row in notice_list:
+ if event_path and row[0] != event_path:
+ continue
+ yield tuple(row)
+
+ def _load_notice_list(self) -> typing.List[typing.Tuple[str]]:
+ """Load a notice list from current key.
+
+ Returns:
+ List of (event_path, observer_path, method_name) tuples; empty if no key or is None.
+ """
+ try:
+ notice_list = self._backend.get(self.NOTICE_KEY)
+ except KeyError:
+ return []
+ if notice_list is None:
+ return []
+ return notice_list
+
+ def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None:
+ """Save a notice list under current key.
+
+ Args:
+ notices: List of (event_path, observer_path, method_name) tuples.
+ """
+ self._backend.set(self.NOTICE_KEY, notices)
+
+
+class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)):
+ """Handle a couple basic python types.
+
+ yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one
+ that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just
+ subclass SafeLoader and add tuples back in.
+ """
+ # Taken from the example at:
+ # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml
+
+ construct_python_tuple = yaml.Loader.construct_python_tuple
+
+
+_SimpleLoader.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ _SimpleLoader.construct_python_tuple)
+
+
+class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)):
+ """Add types supported by 'marshal'.
+
+ YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So
+ we want to only support dumping out types that are safe to load.
+ """
+
+
+_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple
+_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple)
+
+
+def juju_backend_available() -> bool:
+ """Check if Juju state storage is available."""
+ p = shutil.which('state-get')
+ return p is not None
+
+
+class _JujuStorageBackend:
+ """Implements the interface from the Operator framework to Juju's state-get/set/etc."""
+
+ def set(self, key: str, value: typing.Any) -> None:
+ """Set a key to a given value.
+
+ Args:
+ key: The string key that will be used to find the value later
+ value: Arbitrary content that will be returned by get().
+
+ Raises:
+ CalledProcessError: if 'state-set' returns an error code.
+ """
+ # default_flow_style=None means that it can use Block for
+ # complex types (types that have nested types) but use flow
+ # for simple types (like an array). Not all versions of PyYAML
+ # have the same default style.
+ encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None)
+ content = yaml.dump(
+ {key: encoded_value}, encoding='utf8', default_style='|',
+ default_flow_style=False,
+ Dumper=_SimpleDumper)
+ _run(["state-set", "--file", "-"], input=content, check=True)
+
+ def get(self, key: str) -> typing.Any:
+ """Get the bytes value associated with a given key.
+
+ Args:
+ key: The string key that will be used to find the value
+ Raises:
+ CalledProcessError: if 'state-get' returns an error code.
+ """
+ # We don't capture stderr here so it can end up in debug logs.
+ p = _run(["state-get", key], stdout=subprocess.PIPE, check=True, universal_newlines=True)
+ if p.stdout == '' or p.stdout == '\n':
+ raise KeyError(key)
+ return yaml.load(p.stdout, Loader=_SimpleLoader)
+
+ def delete(self, key: str) -> None:
+ """Remove a key from being tracked.
+
+ Args:
+ key: The key to stop storing
+ Raises:
+ CalledProcessError: if 'state-delete' returns an error code.
+ """
+ _run(["state-delete", key], check=True)
+
+
+class NoSnapshotError(Exception):
+ """Exception to flag that there is no snapshot for the given handle_path."""
+
+ def __init__(self, handle_path):
+ self.handle_path = handle_path
+
+ def __str__(self):
+ return 'no snapshot data found for {} object'.format(self.handle_path)
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/testing.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..e70bc98ff661b51cf45c3085ce6a29809a7b3110
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/testing.py
@@ -0,0 +1,826 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Infrastructure to build unittests for Charms using the Operator Framework."""
+
+import inspect
+import pathlib
+import random
+import tempfile
+import typing
+import yaml
+from contextlib import contextmanager
+from textwrap import dedent
+
+from ops import (
+ charm,
+ framework,
+ model,
+ storage,
+)
+
+
+# OptionalYAML is something like metadata.yaml or actions.yaml. You can
+# pass in a file-like object or the string directly.
+OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]]
+
+
+# noinspection PyProtectedMember
+class Harness:
+ """This class represents a way to build up the model that will drive a test suite.
+
+ The model that is created is from the viewpoint of the charm that you are testing.
+
+ Example::
+
+ harness = Harness(MyCharm)
+ # Do initial setup here
+ relation_id = harness.add_relation('db', 'postgresql')
+ # Now instantiate the charm to see events as the model changes
+ harness.begin()
+ harness.add_relation_unit(relation_id, 'postgresql/0')
+ harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+ # Check that charm has properly handled the relation_joined event for postgresql/0
+ self.assertEqual(harness.charm. ...)
+
+ Args:
+ charm_cls: The Charm class that you'll be testing.
+ meta: charm.CharmBase is a A string or file-like object containing the contents of
+ metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the
+ parent directory of the Charm, and if not found fall back to a trivial
+ 'name: test-charm' metadata.
+ actions: A string or file-like object containing the contents of
+ actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the
+ parent directory of the Charm.
+ config: A string or file-like object containing the contents of
+ config.yaml. If not supplied, we will look for a 'config.yaml' file in the
+ parent directory of the Charm.
+ """
+
+ def __init__(
+ self,
+ charm_cls: typing.Type[charm.CharmBase],
+ *,
+ meta: OptionalYAML = None,
+ actions: OptionalYAML = None,
+ config: OptionalYAML = None):
+ self._charm_cls = charm_cls
+ self._charm = None
+ self._charm_dir = 'no-disk-path' # this may be updated by _create_meta
+ self._meta = self._create_meta(meta, actions)
+ self._unit_name = self._meta.name + '/0'
+ self._framework = None
+ self._hooks_enabled = True
+ self._relation_id_counter = 0
+ self._backend = _TestingModelBackend(self._unit_name, self._meta)
+ self._model = model.Model(self._meta, self._backend)
+ self._storage = storage.SQLiteStorage(':memory:')
+ self._oci_resources = {}
+ self._framework = framework.Framework(
+ self._storage, self._charm_dir, self._meta, self._model)
+ self._update_config(key_values=self._load_config_defaults(config))
+
+ @property
+ def charm(self) -> charm.CharmBase:
+ """Return the instance of the charm class that was passed to __init__.
+
+ Note that the Charm is not instantiated until you have called
+ :meth:`.begin()`.
+ """
+ return self._charm
+
+ @property
+ def model(self) -> model.Model:
+ """Return the :class:`~ops.model.Model` that is being driven by this Harness."""
+ return self._model
+
+ @property
+ def framework(self) -> framework.Framework:
+ """Return the Framework that is being driven by this Harness."""
+ return self._framework
+
+ def begin(self) -> None:
+ """Instantiate the Charm and start handling events.
+
+ Before calling :meth:`begin`, there is no Charm instance, so changes to the Model won't
+ emit events. You must call :meth:`.begin` before :attr:`.charm` is valid.
+ """
+ if self._charm is not None:
+ raise RuntimeError('cannot call the begin method on the harness more than once')
+
+ # The Framework adds attributes to class objects for events, etc. As such, we can't re-use
+ # the original class against multiple Frameworks. So create a locally defined class
+ # and register it.
+ # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of
+ # Class attributes which should clean up this ugliness. The API can stay the same
+ class TestEvents(self._charm_cls.on.__class__):
+ pass
+
+ TestEvents.__name__ = self._charm_cls.on.__class__.__name__
+
+ class TestCharm(self._charm_cls):
+ on = TestEvents()
+
+ # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo,
+ # rather than TestCharm has no attribute foo.
+ TestCharm.__name__ = self._charm_cls.__name__
+ self._charm = TestCharm(self._framework)
+
+ def begin_with_initial_hooks(self) -> None:
+ """Called when you want the Harness to fire the same hooks that Juju would fire at startup.
+
+ This triggers install, relation-created, config-changed, start, and any relation-joined
+ hooks. Based on what relations have been defined before you called begin().
+ Note that all of these are fired before returning control to the test suite, so if you
+ want to introspect what happens at each step, you need to fire them directly
+ (eg Charm.on.install.emit()).
+
+ To use this with all the normal hooks, you should instantiate the harness, setup any
+ relations that you want active when the charm starts, and then call this method.
+
+ Example::
+
+ harness = Harness(MyCharm)
+ # Do initial setup here
+ relation_id = harness.add_relation('db', 'postgresql')
+ harness.add_relation_unit(relation_id, 'postgresql/0')
+ harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+ harness.set_leader(True)
+ harness.update_config({'initial': 'config'})
+ harness.begin_with_initial_hooks()
+ # This will cause
+ # install, db-relation-created('postgresql'), leader-elected, config-changed, start
+ # db-relation-joined('postrgesql/0'), db-relation-changed('postgresql/0')
+ # To be fired.
+ """
+ self.begin()
+ # TODO: jam 2020-08-03 This should also handle storage-attached hooks once we have support
+ # for dealing with storage.
+ self._charm.on.install.emit()
+ # Juju itself iterates what relation to fire based on a map[int]relation, so it doesn't
+ # guarantee a stable ordering between relation events. It *does* give a stable ordering
+ # of joined units for a given relation.
+ items = list(self._meta.relations.items())
+ random.shuffle(items)
+ this_app_name = self._meta.name
+ for relname, rel_meta in items:
+ if rel_meta.role == charm.RelationRole.peer:
+ # If the user has directly added a relation, leave it be, but otherwise ensure
+ # that peer relations are always established at before leader-elected.
+ rel_ids = self._backend._relation_ids_map.get(relname)
+ if rel_ids is None:
+ self.add_relation(relname, self._meta.name)
+ else:
+ random.shuffle(rel_ids)
+ for rel_id in rel_ids:
+ self._emit_relation_created(relname, rel_id, this_app_name)
+ else:
+ rel_ids = self._backend._relation_ids_map.get(relname, [])
+ random.shuffle(rel_ids)
+ for rel_id in rel_ids:
+ app_name = self._backend._relation_app_and_units[rel_id]["app"]
+ self._emit_relation_created(relname, rel_id, app_name)
+ if self._backend._is_leader:
+ self._charm.on.leader_elected.emit()
+ else:
+ self._charm.on.leader_settings_changed.emit()
+ self._charm.on.config_changed.emit()
+ self._charm.on.start.emit()
+ all_ids = list(self._backend._relation_names.items())
+ random.shuffle(all_ids)
+ for rel_id, rel_name in all_ids:
+ rel_app_and_units = self._backend._relation_app_and_units[rel_id]
+ app_name = rel_app_and_units["app"]
+ # Note: Juju *does* fire relation events for a given relation in the sorted order of
+ # the unit names. It also always fires relation-changed immediately after
+ # relation-joined for the same unit.
+ # Juju only fires relation-changed (app) if there is data for the related application
+ relation = self._model.get_relation(rel_name, rel_id)
+ if self._backend._relation_data[rel_id].get(app_name):
+ app = self._model.get_app(app_name)
+ self._charm.on[rel_name].relation_changed.emit(
+ relation, app, None)
+ for unit_name in sorted(rel_app_and_units["units"]):
+ remote_unit = self._model.get_unit(unit_name)
+ self._charm.on[rel_name].relation_joined.emit(
+ relation, remote_unit.app, remote_unit)
+ self._charm.on[rel_name].relation_changed.emit(
+ relation, remote_unit.app, remote_unit)
+
+ def cleanup(self) -> None:
+ """Called by your test infrastructure to cleanup any temporary directories/files/etc.
+
+ Currently this only needs to be called if you test with resources. But it is reasonable
+ to always include a `testcase.addCleanup(harness.cleanup)` just in case.
+ """
+ self._backend._cleanup()
+
+ def _create_meta(self, charm_metadata, action_metadata):
+ """Create a CharmMeta object.
+
+ Handle the cases where a user doesn't supply explicit metadata snippets.
+ """
+ filename = inspect.getfile(self._charm_cls)
+ charm_dir = pathlib.Path(filename).parents[1]
+
+ if charm_metadata is None:
+ metadata_path = charm_dir / 'metadata.yaml'
+ if metadata_path.is_file():
+ charm_metadata = metadata_path.read_text()
+ self._charm_dir = charm_dir
+ else:
+ # The simplest of metadata that the framework can support
+ charm_metadata = 'name: test-charm'
+ elif isinstance(charm_metadata, str):
+ charm_metadata = dedent(charm_metadata)
+
+ if action_metadata is None:
+ actions_path = charm_dir / 'actions.yaml'
+ if actions_path.is_file():
+ action_metadata = actions_path.read_text()
+ self._charm_dir = charm_dir
+ elif isinstance(action_metadata, str):
+ action_metadata = dedent(action_metadata)
+
+ return charm.CharmMeta.from_yaml(charm_metadata, action_metadata)
+
+ def _load_config_defaults(self, charm_config):
+ """Load default values from config.yaml.
+
+ Handle the case where a user doesn't supply explicit config snippets.
+ """
+ filename = inspect.getfile(self._charm_cls)
+ charm_dir = pathlib.Path(filename).parents[1]
+
+ if charm_config is None:
+ config_path = charm_dir / 'config.yaml'
+ if config_path.is_file():
+ charm_config = config_path.read_text()
+ self._charm_dir = charm_dir
+ else:
+ # The simplest of config that the framework can support
+ charm_config = '{}'
+ elif isinstance(charm_config, str):
+ charm_config = dedent(charm_config)
+ charm_config = yaml.load(charm_config, Loader=yaml.SafeLoader)
+ charm_config = charm_config.get('options', {})
+ return {key: value['default'] for key, value in charm_config.items()
+ if 'default' in value}
+
+ def add_oci_resource(self, resource_name: str,
+ contents: typing.Mapping[str, str] = None) -> None:
+ """Add oci resources to the backend.
+
+ This will register an oci resource and create a temporary file for processing metadata
+ about the resource. A default set of values will be used for all the file contents
+ unless a specific contents dict is provided.
+
+ Args:
+ resource_name: Name of the resource to add custom contents to.
+ contents: Optional custom dict to write for the named resource.
+ """
+ if not contents:
+ contents = {'registrypath': 'registrypath',
+ 'username': 'username',
+ 'password': 'password',
+ }
+ if resource_name not in self._meta.resources.keys():
+ raise RuntimeError('Resource {} is not a defined resources'.format(resource_name))
+ if self._meta.resources[resource_name].type != "oci-image":
+ raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name))
+
+ as_yaml = yaml.dump(contents, Dumper=yaml.SafeDumper)
+ self._backend._resources_map[resource_name] = ('contents.yaml', as_yaml)
+
+ def add_resource(self, resource_name: str, content: typing.AnyStr) -> None:
+ """Add content for a resource to the backend.
+
+ This will register the content, so that a call to `Model.resources.fetch(resource_name)`
+ will return a path to a file containing that content.
+
+ Args:
+ resource_name: The name of the resource being added
+ content: Either string or bytes content, which will be the content of the filename
+ returned by resource-get. If contents is a string, it will be encoded in utf-8
+ """
+ if resource_name not in self._meta.resources.keys():
+ raise RuntimeError('Resource {} is not a defined resources'.format(resource_name))
+ record = self._meta.resources[resource_name]
+ if record.type != "file":
+ raise RuntimeError(
+ 'Resource {} is not a file, but actually {}'.format(resource_name, record.type))
+ filename = record.filename
+ if filename is None:
+ filename = resource_name
+
+ self._backend._resources_map[resource_name] = (filename, content)
+
+ def populate_oci_resources(self) -> None:
+ """Populate all OCI resources."""
+ for name, data in self._meta.resources.items():
+ if data.type == "oci-image":
+ self.add_oci_resource(name)
+
+ def disable_hooks(self) -> None:
+ """Stop emitting hook events when the model changes.
+
+ This can be used by developers to stop changes to the model from emitting events that
+ the charm will react to. Call :meth:`.enable_hooks`
+ to re-enable them.
+ """
+ self._hooks_enabled = False
+
+ def enable_hooks(self) -> None:
+ """Re-enable hook events from charm.on when the model is changed.
+
+ By default hook events are enabled once you call :meth:`.begin`,
+ but if you have used :meth:`.disable_hooks`, this can be used to
+ enable them again.
+ """
+ self._hooks_enabled = True
+
+ @contextmanager
+ def hooks_disabled(self):
+ """A context manager to run code with hooks disabled.
+
+ Example::
+
+ with harness.hooks_disabled():
+ # things in here don't fire events
+ harness.set_leader(True)
+ harness.update_config(unset=['foo', 'bar'])
+ # things here will again fire events
+ """
+ if self._hooks_enabled:
+ self.disable_hooks()
+ try:
+ yield None
+ finally:
+ self.enable_hooks()
+ else:
+ yield None
+
+ def _next_relation_id(self):
+ rel_id = self._relation_id_counter
+ self._relation_id_counter += 1
+ return rel_id
+
+ def add_relation(self, relation_name: str, remote_app: str) -> int:
+ """Declare that there is a new relation between this app and `remote_app`.
+
+ Args:
+ relation_name: The relation on Charm that is being related to
+ remote_app: The name of the application that is being related to
+
+ Return:
+ The relation_id created by this add_relation.
+ """
+ rel_id = self._next_relation_id()
+ self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id)
+ self._backend._relation_names[rel_id] = relation_name
+ self._backend._relation_list_map[rel_id] = []
+ self._backend._relation_data[rel_id] = {
+ remote_app: {},
+ self._backend.unit_name: {},
+ self._backend.app_name: {},
+ }
+ self._backend._relation_app_and_units[rel_id] = {
+ "app": remote_app,
+ "units": [],
+ }
+ # Reload the relation_ids list
+ if self._model is not None:
+ self._model.relations._invalidate(relation_name)
+ self._emit_relation_created(relation_name, rel_id, remote_app)
+ return rel_id
+
+ def _emit_relation_created(self, relation_name: str, relation_id: int,
+ remote_app: str) -> None:
+ """Trigger relation-created for a given relation with a given remote application."""
+ if self._charm is None or not self._hooks_enabled:
+ return
+ if self._charm is None or not self._hooks_enabled:
+ return
+ relation = self._model.get_relation(relation_name, relation_id)
+ app = self._model.get_app(remote_app)
+ self._charm.on[relation_name].relation_created.emit(
+ relation, app)
+
+ def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None:
+ """Add a new unit to a relation.
+
+ Example::
+
+ rel_id = harness.add_relation('db', 'postgresql')
+ harness.add_relation_unit(rel_id, 'postgresql/0')
+
+ This will trigger a `relation_joined` event. This would naturally be
+ followed by a `relation_changed` event, which you can trigger with
+ :meth:`.update_relation_data`. This separation is artificial in the
+ sense that Juju will always fire the two, but is intended to make
+ testing relations and their data bags slightly more natural.
+
+ Args:
+ relation_id: The integer relation identifier (as returned by add_relation).
+ remote_unit_name: A string representing the remote unit that is being added.
+
+ Return:
+ None
+ """
+ self._backend._relation_list_map[relation_id].append(remote_unit_name)
+ self._backend._relation_data[relation_id][remote_unit_name] = {}
+ # TODO: jam 2020-08-03 This is where we could assert that the unit name matches the
+ # application name (eg you don't have a relation to 'foo' but add units of 'bar/0'
+ self._backend._relation_app_and_units[relation_id]["units"].append(remote_unit_name)
+ relation_name = self._backend._relation_names[relation_id]
+ # Make sure that the Model reloads the relation_list for this relation_id, as well as
+ # reloading the relation data for this unit.
+ if self._model is not None:
+ remote_unit = self._model.get_unit(remote_unit_name)
+ relation = self._model.get_relation(relation_name, relation_id)
+ unit_cache = relation.data.get(remote_unit, None)
+ if unit_cache is not None:
+ unit_cache._invalidate()
+ self._model.relations._invalidate(relation_name)
+ if self._charm is None or not self._hooks_enabled:
+ return
+ self._charm.on[relation_name].relation_joined.emit(
+ relation, remote_unit.app, remote_unit)
+
+ def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping:
+ """Get the relation data bucket for a single app or unit in a given relation.
+
+ This ignores all of the safety checks of who can and can't see data in relations (eg,
+ non-leaders can't read their own application's relation data because there are no events
+ that keep that data up-to-date for the unit).
+
+ Args:
+ relation_id: The relation whose content we want to look at.
+ app_or_unit: The name of the application or unit whose data we want to read
+ Return:
+ a dict containing the relation data for `app_or_unit` or None.
+
+ Raises:
+ KeyError: if relation_id doesn't exist
+ """
+ return self._backend._relation_data[relation_id].get(app_or_unit, None)
+
+ def get_pod_spec(self) -> (typing.Mapping, typing.Mapping):
+ """Return the content of the pod spec as last set by the charm.
+
+ This returns both the pod spec and any k8s_resources that were supplied.
+ See the signature of Model.pod.set_spec
+ """
+ return self._backend._pod_spec
+
+ def get_workload_version(self) -> str:
+ """Read the workload version that was set by the unit."""
+ return self._backend._workload_version
+
+ def set_model_name(self, name: str) -> None:
+ """Set the name of the Model that this is representing.
+
+ This cannot be called once begin() has been called. But it lets you set the value that
+ will be returned by Model.name.
+ """
+ if self._charm is not None:
+ raise RuntimeError('cannot set the Model name after begin()')
+ self._backend.model_name = name
+
+ def update_relation_data(
+ self,
+ relation_id: int,
+ app_or_unit: str,
+ key_values: typing.Mapping,
+ ) -> None:
+ """Update the relation data for a given unit or application in a given relation.
+
+ This also triggers the `relation_changed` event for this relation_id.
+
+ Args:
+ relation_id: The integer relation_id representing this relation.
+ app_or_unit: The unit or application name that is being updated.
+ This can be the local or remote application.
+ key_values: Each key/value will be updated in the relation data.
+ """
+ relation_name = self._backend._relation_names[relation_id]
+ relation = self._model.get_relation(relation_name, relation_id)
+ if '/' in app_or_unit:
+ entity = self._model.get_unit(app_or_unit)
+ else:
+ entity = self._model.get_app(app_or_unit)
+ rel_data = relation.data.get(entity, None)
+ if rel_data is not None:
+ # rel_data may have cached now-stale data, so _invalidate() it.
+ # Note, this won't cause the data to be loaded if it wasn't already.
+ rel_data._invalidate()
+
+ new_values = self._backend._relation_data[relation_id][app_or_unit].copy()
+ for k, v in key_values.items():
+ if v == '':
+ new_values.pop(k, None)
+ else:
+ new_values[k] = v
+ self._backend._relation_data[relation_id][app_or_unit] = new_values
+
+ if app_or_unit == self._model.unit.name:
+ # No events for our own unit
+ return
+ if app_or_unit == self._model.app.name:
+ # updating our own app only generates an event if it is a peer relation and we
+ # aren't the leader
+ is_peer = self._meta.relations[relation_name].role.is_peer()
+ if not is_peer:
+ return
+ if self._model.unit.is_leader():
+ return
+ self._emit_relation_changed(relation_id, app_or_unit)
+
+ def _emit_relation_changed(self, relation_id, app_or_unit):
+ if self._charm is None or not self._hooks_enabled:
+ return
+ rel_name = self._backend._relation_names[relation_id]
+ relation = self.model.get_relation(rel_name, relation_id)
+ if '/' in app_or_unit:
+ app_name = app_or_unit.split('/')[0]
+ unit_name = app_or_unit
+ app = self.model.get_app(app_name)
+ unit = self.model.get_unit(unit_name)
+ args = (relation, app, unit)
+ else:
+ app_name = app_or_unit
+ app = self.model.get_app(app_name)
+ args = (relation, app)
+ self._charm.on[rel_name].relation_changed.emit(*args)
+
+ def _update_config(
+ self,
+ key_values: typing.Mapping[str, str] = None,
+ unset: typing.Iterable[str] = (),
+ ) -> None:
+ """Update the config as seen by the charm.
+
+ This will *not* trigger a `config_changed` event, and is intended for internal use.
+
+ Note that the `key_values` mapping will only add or update configuration items.
+ To remove existing ones, see the `unset` parameter.
+
+ Args:
+ key_values: A Mapping of key:value pairs to update in config.
+ unset: An iterable of keys to remove from Config. (Note that this does
+ not currently reset the config values to the default defined in config.yaml.)
+ """
+ # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config
+ # is a LazyMapping, but its _load returns a dict and this method mutates
+ # the dict that Config is caching. Arguably we should be doing some sort
+ # of charm.framework.model.config._invalidate()
+ config = self._backend._config
+ if key_values is not None:
+ for key, value in key_values.items():
+ config[key] = value
+ for key in unset:
+ config.pop(key, None)
+
+ def update_config(
+ self,
+ key_values: typing.Mapping[str, str] = None,
+ unset: typing.Iterable[str] = (),
+ ) -> None:
+ """Update the config as seen by the charm.
+
+ This will trigger a `config_changed` event.
+
+ Note that the `key_values` mapping will only add or update configuration items.
+ To remove existing ones, see the `unset` parameter.
+
+ Args:
+ key_values: A Mapping of key:value pairs to update in config.
+ unset: An iterable of keys to remove from Config. (Note that this does
+ not currently reset the config values to the default defined in config.yaml.)
+ """
+ self._update_config(key_values, unset)
+ if self._charm is None or not self._hooks_enabled:
+ return
+ self._charm.on.config_changed.emit()
+
+ def set_leader(self, is_leader: bool = True) -> None:
+ """Set whether this unit is the leader or not.
+
+ If this charm becomes a leader then `leader_elected` will be triggered.
+
+ Args:
+ is_leader: True/False as to whether this unit is the leader.
+ """
+ was_leader = self._backend._is_leader
+ self._backend._is_leader = is_leader
+ # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in
+ # the Model objects, so this automatically gets noticed.
+ if is_leader and not was_leader and self._charm is not None and self._hooks_enabled:
+ self._charm.on.leader_elected.emit()
+
+ def _get_backend_calls(self, reset: bool = True) -> list:
+ """Return the calls that we have made to the TestingModelBackend.
+
+ This is useful mostly for testing the framework itself, so that we can assert that we
+ do/don't trigger extra calls.
+
+ Args:
+ reset: If True, reset the calls list back to empty, if false, the call list is
+ preserved.
+
+ Return:
+ ``[(call1, args...), (call2, args...)]``
+ """
+ calls = self._backend._calls.copy()
+ if reset:
+ self._backend._calls.clear()
+ return calls
+
+
+def _record_calls(cls):
+ """Replace methods on cls with methods that record that they have been called.
+
+ Iterate all attributes of cls, and for public methods, replace them with a wrapped method
+ that records the method called along with the arguments and keyword arguments.
+ """
+ for meth_name, orig_method in cls.__dict__.items():
+ if meth_name.startswith('_'):
+ continue
+
+ def decorator(orig_method):
+ def wrapped(self, *args, **kwargs):
+ full_args = (orig_method.__name__,) + args
+ if kwargs:
+ full_args = full_args + (kwargs,)
+ self._calls.append(full_args)
+ return orig_method(self, *args, **kwargs)
+ return wrapped
+
+ setattr(cls, meth_name, decorator(orig_method))
+ return cls
+
+
+class _ResourceEntry:
+ """Tracks the contents of a Resource."""
+
+ def __init__(self, resource_name):
+ self.name = resource_name
+
+
+@_record_calls
+class _TestingModelBackend:
+ """This conforms to the interface for ModelBackend but provides canned data.
+
+ DO NOT use this class directly, it is used by `Harness`_ to drive the model.
+ `Harness`_ is responsible for maintaining the internal consistency of the values here,
+ as the only public methods of this type are for implementing ModelBackend.
+ """
+
+ def __init__(self, unit_name, meta):
+ self.unit_name = unit_name
+ self.app_name = self.unit_name.split('/')[0]
+ self.model_name = None
+ self._calls = []
+ self._meta = meta
+ self._is_leader = None
+ self._relation_ids_map = {} # relation name to [relation_ids,...]
+ self._relation_names = {} # reverse map from relation_id to relation_name
+ self._relation_list_map = {} # relation_id: [unit_name,...]
+ self._relation_data = {} # {relation_id: {name: data}}
+ # {relation_id: {"app": app_name, "units": ["app/0",...]}
+ self._relation_app_and_units = {}
+ self._config = {}
+ self._is_leader = False
+ self._resources_map = {} # {resource_name: resource_content}
+ self._pod_spec = None
+ self._app_status = {'status': 'unknown', 'message': ''}
+ self._unit_status = {'status': 'maintenance', 'message': ''}
+ self._workload_version = None
+ self._resource_dir = None
+
+ def _cleanup(self):
+ if self._resource_dir is not None:
+ self._resource_dir.cleanup()
+ self._resource_dir = None
+
+ def _get_resource_dir(self) -> pathlib.Path:
+ if self._resource_dir is None:
+ # In actual Juju, the resource path for a charm's resource is
+ # $AGENT_DIR/resources/$RESOURCE_NAME/$RESOURCE_FILENAME
+ # However, charms shouldn't depend on this.
+ self._resource_dir = tempfile.TemporaryDirectory(prefix='tmp-ops-test-resource-')
+ return pathlib.Path(self._resource_dir.name)
+
+ def relation_ids(self, relation_name):
+ try:
+ return self._relation_ids_map[relation_name]
+ except KeyError as e:
+ if relation_name not in self._meta.relations:
+ raise model.ModelError('{} is not a known relation'.format(relation_name)) from e
+ return []
+
+ def relation_list(self, relation_id):
+ try:
+ return self._relation_list_map[relation_id]
+ except KeyError as e:
+ raise model.RelationNotFoundError from e
+
+ def relation_get(self, relation_id, member_name, is_app):
+ if is_app and '/' in member_name:
+ member_name = member_name.split('/')[0]
+ if relation_id not in self._relation_data:
+ raise model.RelationNotFoundError()
+ return self._relation_data[relation_id][member_name].copy()
+
+ def relation_set(self, relation_id, key, value, is_app):
+ relation = self._relation_data[relation_id]
+ if is_app:
+ bucket_key = self.app_name
+ else:
+ bucket_key = self.unit_name
+ if bucket_key not in relation:
+ relation[bucket_key] = {}
+ bucket = relation[bucket_key]
+ if value == '':
+ bucket.pop(key, None)
+ else:
+ bucket[key] = value
+
+ def config_get(self):
+ return self._config
+
+ def is_leader(self):
+ return self._is_leader
+
+ def application_version_set(self, version):
+ self._workload_version = version
+
+ def resource_get(self, resource_name):
+ if resource_name not in self._resources_map:
+ raise model.ModelError(
+ "ERROR could not download resource: HTTP request failed: "
+ "Get https://.../units/unit-{}/resources/{}: resource#{}/{} not found".format(
+ self.unit_name.replace('/', '-'), resource_name, self.app_name, resource_name
+ ))
+ filename, contents = self._resources_map[resource_name]
+ resource_dir = self._get_resource_dir()
+ resource_filename = resource_dir / resource_name / filename
+ if not resource_filename.exists():
+ if isinstance(contents, bytes):
+ mode = 'wb'
+ else:
+ mode = 'wt'
+ resource_filename.parent.mkdir(exist_ok=True)
+ with resource_filename.open(mode=mode) as resource_file:
+ resource_file.write(contents)
+ return resource_filename
+
+ def pod_spec_set(self, spec, k8s_resources):
+ self._pod_spec = (spec, k8s_resources)
+
+ def status_get(self, *, is_app=False):
+ if is_app:
+ return self._app_status
+ else:
+ return self._unit_status
+
+ def status_set(self, status, message='', *, is_app=False):
+ if is_app:
+ self._app_status = {'status': status, 'message': message}
+ else:
+ self._unit_status = {'status': status, 'message': message}
+
+ def storage_list(self, name):
+ raise NotImplementedError(self.storage_list)
+
+ def storage_get(self, storage_name_id, attribute):
+ raise NotImplementedError(self.storage_get)
+
+ def storage_add(self, name, count=1):
+ raise NotImplementedError(self.storage_add)
+
+ def action_get(self):
+ raise NotImplementedError(self.action_get)
+
+ def action_set(self, results):
+ raise NotImplementedError(self.action_set)
+
+ def action_log(self, message):
+ raise NotImplementedError(self.action_log)
+
+ def action_fail(self, message=''):
+ raise NotImplementedError(self.action_fail)
+
+ def network_get(self, endpoint_name, relation_id=None):
+ raise NotImplementedError(self.network_get)
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/version.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..db9e98175100d8045815d0ee215d2f5f76ad468f
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/ops/version.py
@@ -0,0 +1,3 @@
+# this is a generated file
+
+version = '1.1.0'
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/__init__.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..86d07b5525d10bf1d543be0e1f5d01af897a4b49
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/__init__.py
@@ -0,0 +1,427 @@
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '5.4.1'
+try:
+ from .cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+import io
+
+#------------------------------------------------------------------------------
+# Warnings control
+#------------------------------------------------------------------------------
+
+# 'Global' warnings state:
+_warnings_enabled = {
+ 'YAMLLoadWarning': True,
+}
+
+# Get or set global warnings' state
+def warnings(settings=None):
+ if settings is None:
+ return _warnings_enabled
+
+ if type(settings) is dict:
+ for key in settings:
+ if key in _warnings_enabled:
+ _warnings_enabled[key] = settings[key]
+
+# Warn when load() is called without Loader=...
+class YAMLLoadWarning(RuntimeWarning):
+ pass
+
+def load_warning(method):
+ if _warnings_enabled['YAMLLoadWarning'] is False:
+ return
+
+ import warnings
+
+ message = (
+ "calling yaml.%s() without Loader=... is deprecated, as the "
+ "default Loader is unsafe. Please read "
+ "https://msg.pyyaml.org/load for full details."
+ ) % method
+
+ warnings.warn(message, YAMLLoadWarning, stacklevel=3)
+
+#------------------------------------------------------------------------------
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=None):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ if Loader is None:
+ load_warning('load')
+ Loader = FullLoader
+
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=None):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ if Loader is None:
+ load_warning('load_all')
+ Loader = FullLoader
+
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def full_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve all tags except those known to be
+ unsafe on untrusted input.
+ """
+ return load(stream, FullLoader)
+
+def full_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve all tags except those known to be
+ unsafe on untrusted input.
+ """
+ return load_all(stream, FullLoader)
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve only basic YAML tags. This is known
+ to be safe for untrusted input.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve only basic YAML tags. This is known
+ to be safe for untrusted input.
+ """
+ return load_all(stream, SafeLoader)
+
+def unsafe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve all tags, even those known to be
+ unsafe on untrusted input.
+ """
+ return load(stream, UnsafeLoader)
+
+def unsafe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve all tags, even those known to be
+ unsafe on untrusted input.
+ """
+ return load_all(stream, UnsafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = io.StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=None, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ if Loader is None:
+ loader.Loader.add_implicit_resolver(tag, regexp, first)
+ loader.FullLoader.add_implicit_resolver(tag, regexp, first)
+ loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
+ else:
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ if Loader is None:
+ loader.Loader.add_path_resolver(tag, path, kind)
+ loader.FullLoader.add_path_resolver(tag, path, kind)
+ loader.UnsafeLoader.add_path_resolver(tag, path, kind)
+ else:
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=None):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ loader.Loader.add_constructor(tag, constructor)
+ loader.FullLoader.add_constructor(tag, constructor)
+ loader.UnsafeLoader.add_constructor(tag, constructor)
+ else:
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
+ loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
+ loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ if isinstance(cls.yaml_loader, list):
+ for loader in cls.yaml_loader:
+ loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ else:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = [Loader, FullLoader, UnsafeLoader]
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/_yaml.cpython-38-x86_64-linux-gnu.so b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/_yaml.cpython-38-x86_64-linux-gnu.so
new file mode 100755
index 0000000000000000000000000000000000000000..801c0e2a72a862a33d640a576ca969b684dc8e86
Binary files /dev/null and b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/_yaml.cpython-38-x86_64-linux-gnu.so differ
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/composer.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/composer.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d15cb40e3b4198819c91c6f8d8b32807fcf53b2
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer:
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor, event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurrence"
+ % anchor, self.anchors[anchor].start_mark,
+ "second occurrence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/constructor.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/constructor.py
new file mode 100644
index 0000000000000000000000000000000000000000..619acd3070a4845c653fcf22a626e05158035bc2
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/constructor.py
@@ -0,0 +1,748 @@
+
+__all__ = [
+ 'BaseConstructor',
+ 'SafeConstructor',
+ 'FullConstructor',
+ 'UnsafeConstructor',
+ 'Constructor',
+ 'ConstructorError'
+]
+
+from .error import *
+from .nodes import *
+
+import collections.abc, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor:
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def check_state_key(self, key):
+ """Block special attributes/methods from being set in a newly created
+ object, to prevent user-controlled methods from being called during
+ deserialization"""
+ if self.get_state_keys_blacklist_regexp().match(key):
+ raise ConstructorError(None, None,
+ "blacklisted key '%s' in instance state found" % (key,), None)
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if tag_prefix is not None and node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if not isinstance(key, collections.abc.Hashable):
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unhashable key", key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return super().construct_scalar(node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return super().construct_mapping(node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ r'''^(?P[0-9][0-9][0-9][0-9])
+ -(?P[0-9][0-9]?)
+ -(?P[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P[0-9][0-9]?)
+ :(?P[0-9][0-9])
+ :(?P[0-9][0-9])
+ (?:\.(?P[0-9]*))?
+ (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?)
+ (?::(?P[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ tzinfo = None
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ tzinfo = datetime.timezone(delta)
+ elif values['tz']:
+ tzinfo = datetime.timezone.utc
+ return datetime.datetime(year, month, day, hour, minute, second, fraction,
+ tzinfo=tzinfo)
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag,
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class FullConstructor(SafeConstructor):
+ # 'extend' is blacklisted because it is used by
+ # construct_python_object_apply to add `listitems` to a newly generate
+ # python instance
+ def get_state_keys_blacklist(self):
+ return ['^extend$', '^__.*__$']
+
+ def get_state_keys_blacklist_regexp(self):
+ if not hasattr(self, 'state_keys_blacklist_regexp'):
+ self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
+ return self.state_keys_blacklist_regexp
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ def construct_python_long(self, node):
+ return self.construct_yaml_int(node)
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark, unsafe=False):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ if unsafe:
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name, exc), mark)
+ if name not in sys.modules:
+ raise ConstructorError("while constructing a Python module", mark,
+ "module %r is not imported" % name, mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark, unsafe=False):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if '.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = 'builtins'
+ object_name = name
+ if unsafe:
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name, exc), mark)
+ if module_name not in sys.modules:
+ raise ConstructorError("while constructing a Python object", mark,
+ "module %r is not imported" % module_name, mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r"
+ % (object_name, module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False, unsafe=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if not (unsafe or isinstance(cls, type)):
+ raise ConstructorError("while constructing a Python instance", node.start_mark,
+ "expected a class, but found %r" % type(cls),
+ node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state, unsafe=False):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ if not unsafe and state:
+ for key in state.keys():
+ self.check_state_key(key)
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ if not unsafe:
+ self.check_state_key(key)
+ setattr(instance, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/none',
+ FullConstructor.construct_yaml_null)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/bool',
+ FullConstructor.construct_yaml_bool)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/str',
+ FullConstructor.construct_python_str)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode',
+ FullConstructor.construct_python_unicode)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes',
+ FullConstructor.construct_python_bytes)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/int',
+ FullConstructor.construct_yaml_int)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/long',
+ FullConstructor.construct_python_long)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/float',
+ FullConstructor.construct_yaml_float)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex',
+ FullConstructor.construct_python_complex)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/list',
+ FullConstructor.construct_yaml_seq)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple',
+ FullConstructor.construct_python_tuple)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/dict',
+ FullConstructor.construct_yaml_map)
+
+FullConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:',
+ FullConstructor.construct_python_name)
+
+class UnsafeConstructor(FullConstructor):
+
+ def find_python_module(self, name, mark):
+ return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
+
+ def find_python_name(self, name, mark):
+ return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
+
+ def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
+ return super(UnsafeConstructor, self).make_python_instance(
+ suffix, node, args, kwds, newobj, unsafe=True)
+
+ def set_python_instance_state(self, instance, state):
+ return super(UnsafeConstructor, self).set_python_instance_state(
+ instance, state, unsafe=True)
+
+UnsafeConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:',
+ UnsafeConstructor.construct_python_module)
+
+UnsafeConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:',
+ UnsafeConstructor.construct_python_object)
+
+UnsafeConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:',
+ UnsafeConstructor.construct_python_object_new)
+
+UnsafeConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:',
+ UnsafeConstructor.construct_python_object_apply)
+
+# Constructor is same as UnsafeConstructor. Need to leave this in place in case
+# people have extended it directly.
+class Constructor(UnsafeConstructor):
+ pass
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/cyaml.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/cyaml.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c21345879b298bb8668201bebe7d289586b17f9
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/cyaml.py
@@ -0,0 +1,101 @@
+
+__all__ = [
+ 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper'
+]
+
+from yaml._yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CFullLoader(CParser, FullConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ FullConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CUnsafeLoader(CParser, UnsafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ UnsafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/dumper.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/dumper.py
new file mode 100644
index 0000000000000000000000000000000000000000..6aadba551f3836b02f4752277f4b3027073defad
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/emitter.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/emitter.py
new file mode 100644
index 0000000000000000000000000000000000000000..a664d011162af69184df2f8e59ab7feec818f7c7
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/emitter.py
@@ -0,0 +1,1137 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis:
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter:
+
+ DEFAULT_TAG_PREFIXES = {
+ '!' : '!',
+ 'tag:yaml.org,2002:' : '!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overridden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor('&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator('[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator('{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator('-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == '')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return '%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError("tag handle must start and end with '!': %r" % handle)
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch, handle))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return ''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == '!' and handle != '!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ch)
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = ''.join(chunks)
+ if handle:
+ return '%s%s' % (handle, suffix_text)
+ else:
+ return '!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch, anchor))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',?[]{}':
+ flow_indicators = True
+ if ch == ':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '#' and preceded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'
+ or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = '%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = '%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator('\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == '\'':
+ data = '\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator('\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '\"': '\"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+ or not ('\x20' <= ch <= '\x7E'
+ or (self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ hints += str(self.best_indent)
+ if text[-1] not in '\n\x85\u2028\u2029':
+ hints += '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ hints += '+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('>'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != ' ' \
+ and text[start] == '\n':
+ self.write_line_break()
+ leading_space = (ch == ' ')
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ spaces = (ch == ' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('|'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/error.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/error.py
new file mode 100644
index 0000000000000000000000000000000000000000..b796b4dc519512c4825ff539a2e6aa20f4d370d0
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/events.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/events.py
new file mode 100644
index 0000000000000000000000000000000000000000..f79ad389cb6c9517e391dcd25534866bc9ccd36a
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/loader.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..e90c11224c38e559cdf0cb205f0692ebd4fb8681
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/loader.py
@@ -0,0 +1,63 @@
+
+__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ FullConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+# UnsafeLoader is the same as Loader (which is and was always unsafe on
+# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
+# FullLoad should be able to load almost all YAML safely. Loader is left intact
+# to ensure backwards compatibility.
+class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/nodes.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4f070c41e1fb1bc01af27d69329e92dded38908
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = ''
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/parser.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..13a5995d292045d0f865a99abf692bd35dc87814
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ '!': '!',
+ '!!': 'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == 'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle,
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle,
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == '!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), '',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected , but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected , but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/reader.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..774b0219b5932a0ee1c27e637371de5ba8d9cb16
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/reader.py
@@ -0,0 +1,185 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, bytes):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, str):
+ self.name = ""
+ self.check_printable(stream)
+ self.buffer = stream+'\0'
+ elif isinstance(stream, bytes):
+ self.name = ""
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "")
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' \
+ or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=4096):
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/representer.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/representer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b0b192ef32ed7f5b7015456fe883c3327bb841e
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/representer.py
@@ -0,0 +1,389 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, copyreg, types, base64, collections
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter:
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
+ self.default_style = default_style
+ self.sort_keys = sort_keys
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ if self.sort_keys:
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data is None:
+ return True
+ if isinstance(data, tuple) and data == ():
+ return True
+ if isinstance(data, (str, bytes, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data):
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if '.' not in value and 'e' in value:
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object", data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+ SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = '%r' % data.real
+ elif data.real == 0.0:
+ data = '%rj' % data.imag
+ elif data.imag > 0:
+ data = '%r+%rj' % (data.real, data.imag)
+ else:
+ data = '%r%rj' % (data.real, data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = '%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ 'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent an object", data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = '%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+ def represent_ordered_dict(self, data):
+ # Provide uniform representation across different Python versions.
+ data_type = type(data)
+ tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
+ % (data_type.__module__, data_type.__name__)
+ items = [[key, value] for key, value in data.items()]
+ return self.represent_sequence(tag, [items])
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(collections.OrderedDict,
+ Representer.represent_ordered_dict)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/resolver.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/resolver.py
new file mode 100644
index 0000000000000000000000000000000000000000..013896d2f10619e0e75d2579cd63220338a7fef1
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/resolver.py
@@ -0,0 +1,227 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ implicit_resolvers = {}
+ for key in cls.yaml_implicit_resolvers:
+ implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
+ cls.yaml_implicit_resolvers = implicit_resolvers
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, str) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (str, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, str):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == '':
+ resolvers = self.yaml_implicit_resolvers.get('', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ wildcard_resolvers = self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers + wildcard_resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:bool',
+ re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:float',
+ re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:int',
+ re.compile(r'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:merge',
+ re.compile(r'^(?:<<)$'),
+ ['<'])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:null',
+ re.compile(r'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:timestamp',
+ re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:value',
+ re.compile(r'^(?:=)$'),
+ ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:yaml',
+ re.compile(r'^(?:!|&|\*)$'),
+ list('!&*'))
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/scanner.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/scanner.py
new file mode 100644
index 0000000000000000000000000000000000000000..7437ede1c608266aaca481955f438844479cab4f
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/scanner.py
@@ -0,0 +1,1435 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner:
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ # Return None if no more tokens.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+ else:
+ return None
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == '\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token" % ch,
+ self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid indentation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not necessary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be caught by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '---' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '...' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+ and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if :
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == '\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r" % self.peek(),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch, self.get_mark())
+ length = 0
+ while '0' <= self.peek(length) <= '9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == ' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != ' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch, self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpreted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == '<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != '>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ elif ch in '\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = '!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in ' \t'
+ length = 0
+ while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == '\n' \
+ and leading_non_space and self.peek() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == '\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch, self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r" % ch,
+ self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ while self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '\"': '\"',
+ '\\': '\\',
+ '/': '/',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {
+ 'x': 2,
+ 'u': 4,
+ 'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == '\'' and self.peek(1) == '\'':
+ chunks.append('\'')
+ self.forward(2)
+ elif (double and ch == '\'') or (not double and ch in '\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == '\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k)), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(chr(code))
+ self.forward(length)
+ elif ch in '\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch, self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in ' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == '\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in ' \t':
+ self.forward()
+ if self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',' or '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == '#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in '\0 \t\r\n\x85\u2028\u2029' \
+ or (ch == ':' and
+ self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029'
+ + (u',[]{}' if self.flow_level else u''))\
+ or (self.flow_level and ch in ',?[]{}'):
+ break
+ length += 1
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == '#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in ' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != '!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != '!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == '%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch, self.get_mark())
+ return ''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ codes = []
+ mark = self.get_mark()
+ while self.peek() == '%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+ % self.peek(k), self.get_mark())
+ codes.append(int(self.prefix(2), 16))
+ self.forward(2)
+ try:
+ value = bytes(codes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in '\r\n\x85':
+ if self.prefix(2) == '\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.forward()
+ return ch
+ return ''
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/serializer.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/serializer.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe911e67ae7a739abb491fbbc6834b9c37bbda4b
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer:
+
+ ANCHOR_TEMPLATE = 'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/tokens.py b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/tokens.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d0b48a394ac8c019b401516a12f688df361cf90
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/charms/virtual-pc/venv/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = ''
+
+class DirectiveToken(Token):
+ id = ''
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = ''
+
+class DocumentEndToken(Token):
+ id = ''
+
+class StreamStartToken(Token):
+ id = ''
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = ''
+
+class BlockSequenceStartToken(Token):
+ id = ''
+
+class BlockMappingStartToken(Token):
+ id = ''
+
+class BlockEndToken(Token):
+ id = ''
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = ''
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = ''
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = ''
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = ''
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/hackfest_virtual-pc_vnfd/cloud_init/virtual-pc_init b/hackfest_virtual-pc_vnfd/cloud_init/virtual-pc_init
new file mode 100644
index 0000000000000000000000000000000000000000..a9acf62700fbcaed336d7a5213143e22da63a8c7
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/cloud_init/virtual-pc_init
@@ -0,0 +1,4 @@
+#cloud-config
+password: osm2020
+chpasswd: { expire: False }
+ssh_pwauth: True
diff --git a/hackfest_virtual-pc_vnfd/virtual-pc_vnfd.yaml b/hackfest_virtual-pc_vnfd/virtual-pc_vnfd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..49e4b4cfaa6ce484bdf0303915fa0a7f16e35f96
--- /dev/null
+++ b/hackfest_virtual-pc_vnfd/virtual-pc_vnfd.yaml
@@ -0,0 +1,66 @@
+vnfd:
+ description: Virtual Desktop Computer with Xubuntu Desktop and RDP
+ df:
+ - id: default-df
+ instantiation-level:
+ - id: default-instantiation-level
+ vdu-level:
+ - number-of-instances: 1
+ vdu-id: virtual-pc
+ vdu-profile:
+ - id: virtual-pc
+ min-number-of-instances: 1
+ vdu-configuration-id: virtual-pc-vdu-configuration
+ ext-cpd:
+ - id: virtual-pc-mgmt-ext
+ int-cpd:
+ cpd: eth0-int
+ vdu-id: virtual-pc
+ id: virtual-pc_vnfd
+ mgmt-cp: virtual-pc-mgmt-ext
+ product-name: virtual-pc_vnfd
+ provider: Canonical
+ sw-image-desc:
+ - id: ubuntu20.04
+ image: ubuntu20.04
+ name: ubuntu20.04
+ vdu:
+ - cloud-init-file: virtual-pc_init
+ description: virtual-pc
+ id: virtual-pc
+ int-cpd:
+ - id: eth0-int
+ virtual-network-interface-requirement:
+ - name: eth0
+ virtual-interface:
+ type: PARAVIRT
+ name: virtual-pc-vdu
+ sw-image-desc: ubuntu20.04
+ virtual-compute-desc: virtual-pc-vdu-compute
+ virtual-storage-desc:
+ - virtual-pc-vdu-storage
+ vdu-configuration:
+ - id: virtual-pc-vdu-configuration
+ config-access:
+ ssh-access:
+ required: true
+ config-access:
+ ssh-access:
+ default-user: ubuntu
+ required: true
+ initial-config-primitive:
+ - name: config
+ seq: '1'
+ juju:
+ charm: virtual-pc
+ proxy: false
+ version: '1.0'
+ virtual-compute-desc:
+ - id: virtual-pc-vdu-compute
+ virtual-cpu:
+ num-virtual-cpu: 4
+ virtual-memory:
+ size: 8.0
+ virtual-storage-desc:
+ - id: virtual-pc-vdu-storage
+ size-of-storage: 20