diff --git a/squid_cnf/juju-bundles/bundle.yaml b/squid_cnf/juju-bundles/bundle.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c9257831e7d378127891340827ae280f98f7f4ae
--- /dev/null
+++ b/squid_cnf/juju-bundles/bundle.yaml
@@ -0,0 +1,19 @@
+description: Squid Bundle
+bundle: kubernetes
+applications:
+  squid:
+    charm: ./charms/squid-operator
+    scale: 1
+    options:
+      enable-exporter: true
+  # prometheus:
+  #   charm: ./charms/prometheus-operator
+  #   scale: 1
+  # grafana:
+  #   charm: ./charms/grafana-operator
+  #   scale: 1
+# relations:
+# - - prometheus:target
+#   - squid:prometheus-target
+# - - grafana:grafana-source
+#   - prometheus:grafana-source
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/.flake8 b/squid_cnf/juju-bundles/charms/grafana-operator/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..8ef84fcd43f3b7a46768c31b20f36cab48ffdfe0
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/.flake8
@@ -0,0 +1,9 @@
+[flake8]
+max-line-length = 99
+select: E,W,F,C,N
+exclude:
+  venv
+  .git
+  build
+  dist
+  *.egg_info
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/.gitignore b/squid_cnf/juju-bundles/charms/grafana-operator/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..7d315ecbda5024f3f81756c91caa6d7256970db0
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/.gitignore
@@ -0,0 +1,4 @@
+build
+*.charm
+.idea
+__pycache__
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/LICENSE b/squid_cnf/juju-bundles/charms/grafana-operator/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/LICENSE
@@ -0,0 +1,674 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/README.md b/squid_cnf/juju-bundles/charms/grafana-operator/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6f3abb7fe9ce429ce54cc9009e93e1efede56fec
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/README.md
@@ -0,0 +1,64 @@
+# Grafana Charm
+
+## Description
+
+This is the Grafana charm for Kubernetes using the Operator Framework.
+
+## Usage
+
+Initial setup (ensure microk8s is a clean slate with `microk8s.reset` or a fresh install with `snap install microk8s --classic`:
+```bash
+microk8s.enable dns storage registry dashboard
+juju bootstrap microk8s mk8s
+juju add-model lma
+juju create-storage-pool operator-storage kubernetes storage-class=microk8s-hostpath
+```
+
+Deploy Grafana on its own:
+```bash
+git clone git@github.com:canonical/grafana-operator.git
+cd grafana-operator
+charmcraft build
+juju deploy ./grafana.charm --resource grafana-image=grafana/grafana:7.2.1
+```
+
+View the dashboard in a browser:
+1. `juju status` to check the IP of the of the running Grafana application
+2. Navigate to `http://IP_ADDRESS:3000`
+3. Log in with the default credentials username=admin, password=admin.
+
+Add Prometheus as a datasource:
+```bash
+git clone git@github.com:canonical/prometheus-operator.git
+cd prometheus-operator
+charmcraft build
+juju deploy ./prometheus.charm
+juju add-relation grafana prometheus
+watch -c juju status --color  # wait for things to settle down
+```
+> Once the deployed charm and relation settles, you should be able to see Prometheus data propagating to the Grafana dashboard.
+
+### High Availability Grafana
+
+This charm is written to support a high-availability Grafana cluster, but a database relation is required (MySQL or Postgresql).
+
+If HA is not required, there is no need to add a database relation.
+
+> NOTE: HA should not be considered for production use.
+
+...
+
+## Developing
+
+Create and activate a virtualenv,
+and install the development requirements,
+
+    virtualenv -p python3 venv
+    source venv/bin/activate
+    pip install -r requirements-dev.txt
+
+## Testing
+
+Just run `run_tests`:
+
+    ./run_tests
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/config.yaml b/squid_cnf/juju-bundles/charms/grafana-operator/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c91c65a3567d2fba3572c126f52f9f626c2ef05f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/config.yaml
@@ -0,0 +1,11 @@
+options:
+    port:
+        description: The port grafana will be listening on
+        type: int
+        default: 3000
+    grafana_log_level:
+        type: string
+        description: |
+            Logging level for Grafana. Options are “debug”, “info”,
+            “warn”, “error”, and “critical”.
+        default: info
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/dispatch b/squid_cnf/juju-bundles/charms/grafana-operator/dispatch
new file mode 100755
index 0000000000000000000000000000000000000000..fe31c0567bdce62a6542a6470997cb6a874e4bd8
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/dispatch
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/hooks/install b/squid_cnf/juju-bundles/charms/grafana-operator/hooks/install
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/hooks/install
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/hooks/start b/squid_cnf/juju-bundles/charms/grafana-operator/hooks/start
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/hooks/start
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/hooks/upgrade-charm b/squid_cnf/juju-bundles/charms/grafana-operator/hooks/upgrade-charm
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/hooks/upgrade-charm
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/icon.svg b/squid_cnf/juju-bundles/charms/grafana-operator/icon.svg
new file mode 100644
index 0000000000000000000000000000000000000000..2ad84eebbd3188fa28bb7f2379b78ce1a0a1933f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/icon.svg
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Generator: Adobe Illustrator 23.0.4, SVG Export Plug-In . SVG Version: 6.00 Build 0)  -->
+<svg id="Layer_1" style="enable-background:new 0 0 85.12 92.46" xmlns="http://www.w3.org/2000/svg" xml:space="preserve" height="250px" viewBox="0 0 85.12 92.46" width="250px" version="1.1" y="0px" x="0px" xmlns:xlink="http://www.w3.org/1999/xlink">
+<style type="text/css">
+	.st0{fill:url(#SVGID_1_);}
+</style>
+<linearGradient id="SVGID_1_" y2="28.783" gradientUnits="userSpaceOnUse" x2="42.562" y1="113.26" x1="42.562">
+	<stop stop-color="#FFF200" offset="0"/>
+	<stop stop-color="#F15A29" offset="1"/>
+</linearGradient>
+<path class="st0" d="m85.01 40.8c-0.14-1.55-0.41-3.35-0.93-5.32-0.51-1.97-1.28-4.13-2.39-6.37-1.12-2.24-2.57-4.57-4.47-6.82-0.74-0.88-1.54-1.76-2.42-2.6 1.3-5.17-1.59-9.65-1.59-9.65-4.98-0.31-8.14 1.54-9.31 2.39-0.2-0.08-0.39-0.17-0.59-0.25-0.85-0.34-1.72-0.66-2.61-0.95-0.89-0.28-1.81-0.54-2.74-0.76-0.94-0.22-1.89-0.4-2.86-0.55-0.17-0.03-0.34-0.05-0.51-0.07-2.18-6.95-8.41-9.85-8.41-9.85-6.95 4.41-8.27 10.57-8.27 10.57s-0.03 0.14-0.07 0.36c-0.38 0.11-0.77 0.22-1.15 0.34-0.53 0.16-1.06 0.36-1.59 0.55-0.53 0.21-1.06 0.41-1.58 0.64-1.05 0.45-2.09 0.96-3.1 1.53-0.99 0.55-1.95 1.16-2.9 1.82-0.14-0.06-0.24-0.11-0.24-0.11-9.62-3.68-18.17 0.75-18.17 0.75-0.78 10.24 3.84 16.68 4.76 17.86-0.23 0.63-0.44 1.27-0.64 1.92-0.71 2.32-1.24 4.7-1.57 7.16-0.05 0.35-0.09 0.71-0.13 1.07-8.9 4.38-11.53 13.38-11.53 13.38 7.42 8.53 16.07 9.06 16.07 9.06 0.01-0.01 0.02-0.01 0.02-0.02 1.1 1.96 2.37 3.83 3.8 5.57 0.6 0.73 1.23 1.43 1.88 2.11-2.71 7.74 0.38 14.18 0.38 14.18 8.26 0.31 13.69-3.61 14.83-4.52 0.82 0.28 1.66 0.53 2.5 0.74 2.54 0.65 5.14 1.04 7.74 1.15 0.65 0.03 1.3 0.04 1.95 0.04h0.31l0.21-0.01 0.41-0.01 0.4-0.02 0.01 0.01c3.89 5.55 10.74 6.34 10.74 6.34 4.87-5.13 5.15-10.22 5.15-11.33v-0.07-0.15s0 0 0 0c0-0.08-0.01-0.15-0.01-0.23 1.02-0.72 2-1.49 2.92-2.31 1.95-1.76 3.65-3.77 5.06-5.93 0.13-0.2 0.26-0.41 0.39-0.62 5.51 0.32 9.39-3.41 9.39-3.41-0.91-5.74-4.18-8.54-4.87-9.07 0 0-0.03-0.02-0.07-0.05s-0.06-0.05-0.06-0.05c-0.04-0.02-0.08-0.05-0.12-0.08 0.03-0.35 0.06-0.69 0.08-1.04 0.04-0.62 0.06-1.24 0.06-1.85v-0.46-0.23-0.12-0.16l-0.02-0.38-0.03-0.52c-0.01-0.18-0.02-0.34-0.04-0.5-0.01-0.16-0.03-0.32-0.05-0.48l-0.06-0.48-0.07-0.47c-0.09-0.63-0.21-1.26-0.36-1.88-0.58-2.47-1.54-4.82-2.82-6.93s-2.86-3.98-4.65-5.56-3.79-2.85-5.9-3.79c-2.1-0.95-4.31-1.55-6.51-1.83-1.1-0.14-2.2-0.2-3.28-0.19l-0.41 0.01h-0.1-0.14l-0.17 0.01-0.4 0.03c-0.15 0.01-0.31 0.02-0.45 0.04-0.56 0.05-1.11 0.13-1.66 0.23-2.18 0.41-4.24 1.2-6.06 2.28-1.82 1.09-3.39 2.45-4.68 3.98-1.28 1.54-2.28 3.24-2.96 5-0.69 1.76-1.07 3.58-1.18 5.35-0.03 0.44-0.04 0.88-0.03 1.32 0 0.11 0 0.22 0.01 0.33l0.01 0.35c0.02 0.21 0.03 0.42 0.05 0.63 0.09 0.9 0.25 1.75 0.49 2.58 0.48 1.66 1.25 3.15 2.2 4.43s2.08 2.33 3.28 3.15 2.49 1.41 3.76 1.79 2.54 0.54 3.74 0.53c0.15 0 0.3 0 0.44-0.01 0.08 0 0.16-0.01 0.24-0.01s0.16-0.01 0.24-0.01c0.13-0.01 0.25-0.03 0.38-0.04 0.03 0 0.07-0.01 0.11-0.01l0.12-0.02c0.08-0.01 0.15-0.02 0.23-0.03 0.16-0.02 0.29-0.05 0.43-0.08s0.28-0.05 0.42-0.09c0.27-0.06 0.54-0.14 0.8-0.22 0.52-0.17 1.01-0.38 1.46-0.61s0.87-0.5 1.26-0.77c0.11-0.08 0.22-0.16 0.33-0.25 0.42-0.33 0.48-0.94 0.15-1.35-0.29-0.36-0.79-0.45-1.19-0.23-0.1 0.05-0.2 0.11-0.3 0.16-0.35 0.17-0.71 0.32-1.09 0.45-0.39 0.12-0.79 0.22-1.2 0.29-0.21 0.03-0.42 0.06-0.63 0.08-0.11 0.01-0.21 0.02-0.32 0.02s-0.22 0.01-0.32 0.01-0.21 0-0.31-0.01c-0.13-0.01-0.26-0.01-0.39-0.02h-0.01-0.04l-0.09 0.02c-0.06-0.01-0.12-0.01-0.17-0.02-0.12-0.01-0.23-0.03-0.35-0.04-0.93-0.13-1.88-0.4-2.79-0.82-0.91-0.41-1.79-0.98-2.57-1.69-0.79-0.71-1.48-1.56-2.01-2.52-0.54-0.96-0.92-2.03-1.09-3.16-0.09-0.56-0.13-1.14-0.11-1.71 0.01-0.16 0.01-0.31 0.02-0.47v-0.03-0.06l0.01-0.12c0.01-0.08 0.01-0.15 0.02-0.23 0.03-0.31 0.08-0.62 0.13-0.92 0.43-2.45 1.65-4.83 3.55-6.65 0.47-0.45 0.98-0.87 1.53-1.25 0.55-0.37 1.12-0.7 1.73-0.98 0.6-0.28 1.23-0.5 1.88-0.68 0.65-0.17 1.31-0.29 1.98-0.35 0.34-0.03 0.67-0.04 1.01-0.04h0.23l0.27 0.01 0.17 0.01h0.03 0.07l0.27 0.02c0.73 0.06 1.46 0.16 2.17 0.32 1.43 0.32 2.83 0.85 4.13 1.57 2.6 1.44 4.81 3.69 6.17 6.4 0.69 1.35 1.16 2.81 1.4 4.31 0.06 0.38 0.1 0.76 0.13 1.14l0.02 0.29 0.01 0.29c0.01 0.1 0.01 0.19 0.01 0.29 0 0.09 0.01 0.2 0 0.27v0.25l-0.01 0.28c-0.01 0.19-0.02 0.49-0.03 0.67-0.03 0.42-0.07 0.83-0.12 1.24s-0.12 0.82-0.19 1.22c-0.08 0.4-0.17 0.81-0.27 1.21-0.2 0.8-0.46 1.59-0.76 2.36-0.61 1.54-1.42 3-2.4 4.36-1.96 2.7-4.64 4.9-7.69 6.29-1.52 0.69-3.13 1.19-4.78 1.47-0.82 0.14-1.66 0.22-2.5 0.25l-0.15 0.01h-0.13-0.27-0.41-0.21-0.01-0.08c-0.45-0.01-0.9-0.03-1.34-0.07-1.79-0.13-3.55-0.45-5.27-0.95-1.71-0.49-3.38-1.16-4.95-2-3.14-1.68-5.95-3.98-8.15-6.76-1.11-1.38-2.07-2.87-2.87-4.43s-1.42-3.2-1.89-4.88c-0.46-1.68-0.75-3.39-0.86-5.12l-0.02-0.32-0.01-0.08v-0.07-0.14l-0.01-0.28v-0.07-0.1-0.2l-0.01-0.4v-0.08-0.03-0.16c0-0.21 0.01-0.42 0.01-0.63 0.03-0.85 0.1-1.73 0.21-2.61s0.26-1.76 0.44-2.63 0.39-1.74 0.64-2.59c0.49-1.71 1.1-3.36 1.82-4.92 1.44-3.12 3.34-5.88 5.61-8.09 0.57-0.55 1.16-1.08 1.77-1.57s1.25-0.95 1.9-1.37c0.65-0.43 1.32-0.82 2.02-1.18 0.34-0.19 0.7-0.35 1.05-0.52 0.18-0.08 0.36-0.16 0.53-0.24 0.18-0.08 0.36-0.16 0.54-0.23 0.72-0.3 1.46-0.56 2.21-0.8 0.19-0.06 0.38-0.11 0.56-0.17 0.19-0.06 0.38-0.1 0.57-0.16 0.38-0.11 0.76-0.2 1.14-0.29 0.19-0.05 0.39-0.08 0.58-0.13 0.19-0.04 0.38-0.08 0.58-0.12 0.19-0.04 0.39-0.07 0.58-0.11l0.29-0.05 0.29-0.04c0.2-0.03 0.39-0.06 0.59-0.09 0.22-0.04 0.44-0.05 0.66-0.09 0.18-0.02 0.48-0.06 0.65-0.08 0.14-0.01 0.28-0.03 0.41-0.04l0.28-0.03 0.14-0.01 0.16-0.01c0.22-0.01 0.44-0.03 0.66-0.04l0.33-0.02h0.02 0.07l0.14-0.01c0.19-0.01 0.38-0.02 0.56-0.03 0.75-0.02 1.5-0.02 2.24 0 1.48 0.06 2.93 0.22 4.34 0.48 2.82 0.53 5.49 1.43 7.89 2.62 2.41 1.18 4.57 2.63 6.44 4.2 0.12 0.1 0.23 0.2 0.35 0.3 0.11 0.1 0.23 0.2 0.34 0.3 0.23 0.2 0.44 0.41 0.66 0.61s0.43 0.41 0.64 0.62c0.2 0.21 0.41 0.41 0.61 0.63 0.8 0.84 1.53 1.69 2.19 2.55 1.33 1.71 2.39 3.44 3.24 5.07 0.05 0.1 0.11 0.2 0.16 0.3l0.15 0.3c0.1 0.2 0.2 0.4 0.29 0.6s0.19 0.39 0.27 0.59c0.09 0.2 0.17 0.39 0.25 0.58 0.32 0.76 0.61 1.49 0.84 2.18 0.39 1.11 0.67 2.11 0.89 2.98 0.09 0.35 0.42 0.58 0.78 0.55 0.37-0.03 0.66-0.34 0.66-0.71 0.04-0.95 0.01-2.05-0.09-3.3z"/>
+</svg>
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/manifest.yaml b/squid_cnf/juju-bundles/charms/grafana-operator/manifest.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1b9878b3219e0c495a9a1579d0cc30ceaefaa58f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/manifest.yaml
@@ -0,0 +1,7 @@
+bases:
+- architectures:
+  - amd64
+  channel: '20.04'
+  name: ubuntu
+charmcraft-started-at: '2021-05-31T06:47:43.483382Z'
+charmcraft-version: 0.10.0
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/metadata.yaml b/squid_cnf/juju-bundles/charms/grafana-operator/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e52b205e242068576f86454324c25bd118ec65bf
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/metadata.yaml
@@ -0,0 +1,34 @@
+name: grafana
+summary: Data visualization and observability with Grafana
+maintainers:
+    - Justin Clark <justin.clark@canonical.com>
+description: |
+    Grafana provides dashboards for monitoring data and this
+    charm is written to allow for HA on Kubernetes and can take
+    multiple data sources (for example, Prometheus).
+tags:
+    - lma
+    - grafana
+    - prometheus
+    - monitoring
+    - observability
+series:
+    - kubernetes
+provides:
+    grafana-source:
+        interface: grafana-datasource
+    grafana-dashboard:
+        interface: grafana-dash
+requires:
+    database:
+        interface: db
+        limit: 1
+peers:
+    grafana:
+        interface: grafana-peers
+storage:
+    sqlitedb:
+        type: filesystem
+        location: /var/lib/grafana
+deployment:
+    service: loadbalancer
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/requirements-dev.txt b/squid_cnf/juju-bundles/charms/grafana-operator/requirements-dev.txt
new file mode 100644
index 0000000000000000000000000000000000000000..eded44146a5877d5d81b343988b516c4acaa4573
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/requirements-dev.txt
@@ -0,0 +1,2 @@
+-r requirements.txt
+flake8
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/requirements.txt b/squid_cnf/juju-bundles/charms/grafana-operator/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ca625b4c913fa655ee7beb6ab2769131f7b5a21c
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/requirements.txt
@@ -0,0 +1,2 @@
+ops
+git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/run_tests b/squid_cnf/juju-bundles/charms/grafana-operator/run_tests
new file mode 100755
index 0000000000000000000000000000000000000000..14bb4f4e1b3a9a8ffef0da6da128bbddb8861ce5
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/run_tests
@@ -0,0 +1,16 @@
+#!/bin/sh -e
+# Copyright 2020 Justin
+# See LICENSE file for licensing details.
+
+if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then
+    . venv/bin/activate
+fi
+
+if [ -z "$PYTHONPATH" ]; then
+    export PYTHONPATH=src
+else
+    export PYTHONPATH="src:$PYTHONPATH"
+fi
+
+flake8
+python3 -m unittest -v "$@"
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/src/charm.py b/squid_cnf/juju-bundles/charms/grafana-operator/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..1053f8f871535a9eaec0f1f0712ebddd2218f16d
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/src/charm.py
@@ -0,0 +1,494 @@
+#! /usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import logging
+import hashlib
+import textwrap
+
+from oci_image import OCIImageResource, OCIImageResourceError
+from ops.charm import CharmBase
+from ops.framework import StoredState
+from ops.main import main
+from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus
+
+log = logging.getLogger()
+
+
+# These are the required and optional relation data fields
+# In other words, when relating to this charm, these are the fields
+# that will be processed by this charm.
+REQUIRED_DATASOURCE_FIELDS = {
+    'private-address',  # the hostname/IP of the data source server
+    'port',  # the port of the data source server
+    'source-type',  # the data source type (e.g. prometheus)
+}
+
+OPTIONAL_DATASOURCE_FIELDS = {
+    'source-name',  # a human-readable name of the source
+}
+
+# https://grafana.com/docs/grafana/latest/administration/configuration/#database
+REQUIRED_DATABASE_FIELDS = {
+    'type',  # mysql, postgres or sqlite3 (sqlite3 doesn't work for HA)
+    'host',  # in the form '<url_or_ip>:<port>', e.g. 127.0.0.1:3306
+    'name',
+    'user',
+    'password',
+}
+
+# verify with Grafana documentation to ensure fields have valid values
+# as this charm will not directly handle these cases
+# TODO: fill with optional fields
+OPTIONAL_DATABASE_FIELDS = set()
+
+VALID_DATABASE_TYPES = {'mysql', 'postgres', 'sqlite3'}
+
+
+def get_container(pod_spec, container_name):
+    """Find and return the first container in pod_spec whose name is
+    container_name, otherwise return None."""
+    for container in pod_spec['containers']:
+        if container['name'] == container_name:
+            return container
+    raise ValueError("Unable to find container named '{}' in pod spec".format(
+        container_name))
+
+
+class GrafanaK8s(CharmBase):
+    """Charm to run Grafana on Kubernetes.
+
+    This charm allows for high-availability
+    (as long as a non-sqlite database relation is present).
+
+    Developers of this charm should be aware of the Grafana provisioning docs:
+    https://grafana.com/docs/grafana/latest/administration/provisioning/
+    """
+
+    datastore = StoredState()
+
+    def __init__(self, *args):
+        log.debug('Initializing charm.')
+        super().__init__(*args)
+
+        # -- get image information
+        self.image = OCIImageResource(self, 'grafana-image')
+
+        # -- standard hooks
+        self.framework.observe(self.on.config_changed, self.on_config_changed)
+        self.framework.observe(self.on.update_status, self.on_update_status)
+        self.framework.observe(self.on.stop, self._on_stop)
+
+        # -- grafana-source relation observations
+        self.framework.observe(self.on['grafana-source'].relation_changed,
+                               self.on_grafana_source_changed)
+        self.framework.observe(self.on['grafana-source'].relation_broken,
+                               self.on_grafana_source_broken)
+
+        # -- grafana (peer) relation observations
+        self.framework.observe(self.on['grafana'].relation_changed,
+                               self.on_peer_changed)
+        # self.framework.observe(self.on['grafana'].relation_departed,
+        #                        self.on_peer_departed)
+
+        # -- database relation observations
+        self.framework.observe(self.on['database'].relation_changed,
+                               self.on_database_changed)
+        self.framework.observe(self.on['database'].relation_broken,
+                               self.on_database_broken)
+
+        # -- initialize states --
+        self.datastore.set_default(sources=dict())  # available data sources
+        self.datastore.set_default(source_names=set())  # unique source names
+        self.datastore.set_default(sources_to_delete=set())
+        self.datastore.set_default(database=dict())  # db configuration
+
+    @property
+    def has_peer(self) -> bool:
+        rel = self.model.get_relation('grafana')
+        return len(rel.units) > 0 if rel is not None else False
+
+    @property
+    def has_db(self) -> bool:
+        """Only consider a DB connection if we have config info."""
+        return len(self.datastore.database) > 0
+
+    def _on_stop(self, _):
+        """Go into maintenance state if the unit is stopped."""
+        self.unit.status = MaintenanceStatus('Pod is terminating.')
+
+    def on_config_changed(self, _):
+        self.configure_pod()
+
+    def on_update_status(self, _):
+        """Various health checks of the charm."""
+        self._check_high_availability()
+
+    def on_grafana_source_changed(self, event):
+        """ Get relation data for Grafana source and set k8s pod spec.
+
+        This event handler (if the unit is the leader) will get data for
+        an incoming grafana-source relation and make the relation data
+        is available in the app's datastore object (StoredState).
+        """
+
+        # if this unit is the leader, set the required data
+        # of the grafana-source in this charm's datastore
+        if not self.unit.is_leader():
+            return
+
+        # if there is no available unit, remove data-source info if it exists
+        if event.unit is None:
+            log.warning("event unit can't be None when setting data sources.")
+            return
+
+        # dictionary of all the required/optional datasource field values
+        # using this as a more generic way of getting data source fields
+        datasource_fields = \
+            {field: event.relation.data[event.unit].get(field) for field in
+             REQUIRED_DATASOURCE_FIELDS | OPTIONAL_DATASOURCE_FIELDS}
+
+        missing_fields = [field for field
+                          in REQUIRED_DATASOURCE_FIELDS
+                          if datasource_fields.get(field) is None]
+        # check the relation data for missing required fields
+        if len(missing_fields) > 0:
+            log.error("Missing required data fields for grafana-source "
+                      "relation: {}".format(missing_fields))
+            self._remove_source_from_datastore(event.relation.id)
+            return
+
+        # specifically handle optional fields if necessary
+        # check if source-name was not passed or if we have already saved the provided name
+        if datasource_fields['source-name'] is None\
+                or datasource_fields['source-name'] in self.datastore.source_names:
+            default_source_name = '{}_{}'.format(
+                event.app.name,
+                event.relation.id
+            )
+            log.warning("No name 'grafana-source' or provided name is already in use. "
+                        "Using safe default: {}.".format(default_source_name))
+            datasource_fields['source-name'] = default_source_name
+
+        self.datastore.source_names.add(datasource_fields['source-name'])
+
+        # set the first grafana-source as the default (needed for pod config)
+        # if `self.datastore.sources` is currently empty, this is the first
+        datasource_fields['isDefault'] = 'false'
+        if not dict(self.datastore.sources):
+            datasource_fields['isDefault'] = 'true'
+
+        # add unit name so the source can be removed might be a
+        # duplicate of 'source-name', but this will guarantee lookup
+        datasource_fields['unit_name'] = event.unit.name
+
+        # add the new datasource relation data to the current state
+        new_source_data = {
+            field: value for field, value in datasource_fields.items()
+            if value is not None
+        }
+        self.datastore.sources.update({event.relation.id: new_source_data})
+        self.configure_pod()
+
+    def on_grafana_source_broken(self, event):
+        """When a grafana-source is removed, delete from the datastore."""
+        if self.unit.is_leader():
+            self._remove_source_from_datastore(event.relation.id)
+        self.configure_pod()
+
+    def on_peer_changed(self, _):
+        # TODO: https://grafana.com/docs/grafana/latest/tutorials/ha_setup/
+        #       According to these docs ^, as long as we have a DB, HA should
+        #       work out of the box if we are OK with "Sticky Sessions"
+        #       but having "Stateless Sessions" could require more config
+
+        # if the config changed, set a new pod spec
+        self.configure_pod()
+
+    def on_peer_departed(self, _):
+        """Sets pod spec with new info."""
+        # TODO: setting pod spec shouldn't do anything now,
+        #       but if we ever need to change config based peer units,
+        #       we will want to make sure configure_pod() is called
+        self.configure_pod()
+
+    def on_database_changed(self, event):
+        """Sets configuration information for database connection."""
+        if not self.unit.is_leader():
+            return
+
+        if event.unit is None:
+            log.warning("event unit can't be None when setting db config.")
+            return
+
+        # save the necessary configuration of this database connection
+        database_fields = \
+            {field: event.relation.data[event.unit].get(field) for field in
+             REQUIRED_DATABASE_FIELDS | OPTIONAL_DATABASE_FIELDS}
+
+        # if any required fields are missing, warn the user and return
+        missing_fields = [field for field
+                          in REQUIRED_DATABASE_FIELDS
+                          if database_fields.get(field) is None]
+        if len(missing_fields) > 0:
+            log.error("Missing required data fields for related database "
+                      "relation: {}".format(missing_fields))
+            return
+
+        # check if the passed database type is not in VALID_DATABASE_TYPES
+        if database_fields['type'] not in VALID_DATABASE_TYPES:
+            log.error('Grafana can only accept databases of the following '
+                      'types: {}'.format(VALID_DATABASE_TYPES))
+            return
+
+        # add the new database relation data to the datastore
+        self.datastore.database.update({
+            field: value for field, value in database_fields.items()
+            if value is not None
+        })
+        self.configure_pod()
+
+    def on_database_broken(self, _):
+        """Removes database connection info from datastore.
+
+        We are guaranteed to only have one DB connection, so clearing
+        datastore.database is all we need for the change to be propagated
+        to the pod spec."""
+        if not self.unit.is_leader():
+            return
+
+        # remove the existing database info from datastore
+        self.datastore.database = dict()
+
+        # set pod spec because datastore config has changed
+        self.configure_pod()
+
+    def _remove_source_from_datastore(self, rel_id):
+        """Remove the grafana-source from the datastore.
+
+        Once removed from the datastore, this datasource will not
+        part of the next pod spec."""
+        log.info('Removing all data for relation: {}'.format(rel_id))
+        removed_source = self.datastore.sources.pop(rel_id, None)
+        if removed_source is None:
+            log.warning('Could not remove source for relation: {}'.format(
+                rel_id))
+        else:
+            # free name from charm's set of source names
+            # and save to set which will be used in set_pod_spec
+            self.datastore.source_names.remove(removed_source['source-name'])
+            self.datastore.sources_to_delete.add(removed_source['source-name'])
+
+    def _check_high_availability(self):
+        """Checks whether the configuration allows for HA."""
+        if self.has_peer:
+            if self.has_db:
+                log.info('high availability possible.')
+                status = MaintenanceStatus('Grafana ready for HA.')
+            else:
+                log.warning('high availability not possible '
+                            'with current configuration.')
+                status = BlockedStatus('Need database relation for HA.')
+        else:
+            log.info('running Grafana on single node.')
+            status = MaintenanceStatus('Grafana ready on single node.')
+
+        # make sure we don't have a maintenance status overwrite
+        # a currently active status
+        if isinstance(status, MaintenanceStatus) \
+                and isinstance(self.unit.status, ActiveStatus):
+            return status
+
+        self.unit.status = status
+        return status
+
+    def _make_delete_datasources_config_text(self) -> str:
+        """Generate text of data sources to delete."""
+        if not self.datastore.sources_to_delete:
+            return "\n"
+
+        delete_datasources_text = textwrap.dedent("""
+        deleteDatasources:""")
+        for name in self.datastore.sources_to_delete:
+            delete_datasources_text += textwrap.dedent("""
+            - name: {}
+              orgId: 1""".format(name))
+
+        # clear datastore.sources_to_delete and return text result
+        self.datastore.sources_to_delete.clear()
+        return delete_datasources_text + '\n\n'
+
+    def _make_data_source_config_text(self) -> str:
+        """Build config based on Data Sources section of provisioning docs."""
+        # get starting text for the config file and sources to delete
+        delete_text = self._make_delete_datasources_config_text()
+        config_text = textwrap.dedent("""
+        apiVersion: 1
+        """)
+        config_text += delete_text
+        if self.datastore.sources:
+            config_text += "datasources:"
+        for rel_id, source_info in self.datastore.sources.items():
+            # TODO: handle more optional fields and verify that current
+            #       defaults are what we want (e.g. "access")
+            config_text += textwrap.dedent("""
+                - name: {0}
+                  type: {1}
+                  access: proxy
+                  url: http://{2}:{3}
+                  isDefault: {4}
+                  editable: true
+                  orgId: 1""").format(
+                source_info['source-name'],
+                source_info['source-type'],
+                source_info['private-address'],
+                source_info['port'],
+                source_info['isDefault'],
+            )
+
+        # check if there these are empty
+        return config_text + '\n'
+
+    def _update_pod_data_source_config_file(self, pod_spec):
+        """Adds datasources to pod configuration."""
+        file_text = self._make_data_source_config_text()
+        data_source_file_meta = {
+            'name': 'grafana-datasources',
+            'mountPath': '/etc/grafana/provisioning/datasources',
+            'files': [{
+                'path': 'datasources.yaml',
+                'content': file_text,
+            }]
+        }
+        container = get_container(pod_spec, self.app.name)
+        container['volumeConfig'].append(data_source_file_meta)
+
+        # get hash string of the new file text and put into container config
+        # if this changes, it will trigger a pod restart
+        file_text_hash = hashlib.md5(file_text.encode()).hexdigest()
+        if 'DATASOURCES_YAML' in container['envConfig'] \
+                and container['envConfig']['DATASOURCES_YAML'] != file_text_hash:
+            log.info('datasources.yaml hash has changed. '
+                     'Triggering pod restart.')
+        container['envConfig']['DATASOURCES_YAML'] = file_text_hash
+
+    def _make_config_ini_text(self):
+        """Create the text of the config.ini file.
+
+        More information about this can be found in the Grafana docs:
+        https://grafana.com/docs/grafana/latest/administration/configuration/
+        """
+
+        config_text = textwrap.dedent("""
+        [paths]
+        provisioning = /etc/grafana/provisioning
+
+        [log]
+        mode = console
+        level = {0}
+        """.format(
+            self.model.config['grafana_log_level'],
+        ))
+
+        # if there is a database available, add that information
+        if self.datastore.database:
+            db_config = self.datastore.database
+            config_text += textwrap.dedent("""
+            [database]
+            type = {0}
+            host = {1}
+            name = {2}
+            user = {3}
+            password = {4}
+            url = {0}://{3}:{4}@{1}/{2}""".format(
+                db_config['type'],
+                db_config['host'],
+                db_config['name'],
+                db_config['user'],
+                db_config['password'],
+            ))
+        return config_text
+
+    def _update_pod_config_ini_file(self, pod_spec):
+        file_text = self._make_config_ini_text()
+        config_ini_file_meta = {
+            'name': 'grafana-config-ini',
+            'mountPath': '/etc/grafana',
+            'files': [{
+                'path': 'grafana.ini',
+                'content': file_text
+            }]
+        }
+        container = get_container(pod_spec, self.app.name)
+        container['volumeConfig'].append(config_ini_file_meta)
+
+        # get hash string of the new file text and put into container config
+        # if this changes, it will trigger a pod restart
+        file_text_hash = hashlib.md5(file_text.encode()).hexdigest()
+        if 'GRAFANA_INI' in container['envConfig'] \
+                and container['envConfig']['GRAFANA_INI'] != file_text_hash:
+            log.info('grafana.ini hash has changed. Triggering pod restart.')
+        container['envConfig']['GRAFANA_INI'] = file_text_hash
+
+    def _build_pod_spec(self):
+        """Builds the pod spec based on available info in datastore`."""
+
+        config = self.model.config
+
+        spec = {
+            'version': 3,
+            'containers': [{
+                'name': self.app.name,
+                'image': "ubuntu/grafana:latest",
+                'ports': [{
+                    'containerPort': config['port'],
+                    'protocol': 'TCP'
+                }],
+                'volumeConfig': [],
+                'envConfig': {},  # used to store hashes of config file text
+                'kubernetes': {
+                    'readinessProbe': {
+                        'httpGet': {
+                            'path': '/api/health',
+                            'port': config['port']
+                        },
+                        'initialDelaySeconds': 10,
+                        'timeoutSeconds': 30
+                    },
+                },
+            }]
+        }
+
+        return spec
+
+    def configure_pod(self):
+        """Set Juju / Kubernetes pod spec built from `_build_pod_spec()`."""
+
+        # check for valid high availability (or single node) configuration
+        self._check_high_availability()
+
+        # in the case where we have peers but no DB connection,
+        # don't set the pod spec until it is resolved
+        if self.unit.status == BlockedStatus('Need database relation for HA.'):
+            log.error('Application is in a blocked state. '
+                      'Please resolve before pod spec can be set.')
+            return
+
+        if not self.unit.is_leader():
+            self.unit.status = ActiveStatus()
+            return
+
+        # general pod spec component updates
+        self.unit.status = MaintenanceStatus('Building pod spec.')
+        pod_spec = self._build_pod_spec()
+        if not pod_spec:
+            return
+        self._update_pod_data_source_config_file(pod_spec)
+        self._update_pod_config_ini_file(pod_spec)
+
+        # set the pod spec with Juju
+        self.model.pod.set_spec(pod_spec)
+        self.unit.status = ActiveStatus()
+
+
+if __name__ == '__main__':
+    main(GrafanaK8s)
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/tests/__init__.py b/squid_cnf/juju-bundles/charms/grafana-operator/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/tests/test_charm.py b/squid_cnf/juju-bundles/charms/grafana-operator/tests/test_charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6b87e4151bf4ef5e87674bbd914adc12b49fd6a
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/tests/test_charm.py
@@ -0,0 +1,490 @@
+import hashlib
+import textwrap
+import unittest
+
+from ops.testing import Harness
+from ops.model import (
+    TooManyRelatedAppsError,
+    ActiveStatus,
+)
+from charm import (
+    GrafanaK8s,
+    MaintenanceStatus,
+    BlockedStatus,
+    get_container,
+)
+
+BASE_CONFIG = {
+    'port': 3000,
+    'grafana_log_level': 'info',
+}
+
+
+class GrafanaCharmTest(unittest.TestCase):
+
+    def setUp(self) -> None:
+        self.harness = Harness(GrafanaK8s)
+        self.addCleanup(self.harness.cleanup)
+        self.harness.begin()
+        self.harness.add_oci_resource('grafana-image')
+
+    def test__grafana_source_data(self):
+
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.sources, {})
+
+        rel_id = self.harness.add_relation('grafana-source', 'prometheus')
+        self.harness.add_relation_unit(rel_id, 'prometheus/0')
+        self.assertIsInstance(rel_id, int)
+
+        # test that the unit data propagates the correct way
+        # which is through the triggering of on_relation_changed
+        self.harness.update_relation_data(rel_id,
+                                          'prometheus/0',
+                                          {
+                                              'private-address': '192.0.2.1',
+                                              'port': 1234,
+                                              'source-type': 'prometheus',
+                                              'source-name': 'prometheus-app',
+                                          })
+
+        expected_first_source_data = {
+            'private-address': '192.0.2.1',
+            'port': 1234,
+            'source-name': 'prometheus-app',
+            'source-type': 'prometheus',
+            'isDefault': 'true',
+            'unit_name': 'prometheus/0'
+        }
+        self.assertEqual(expected_first_source_data,
+                         dict(self.harness.charm.datastore.sources[rel_id]))
+
+        # test that clearing the relation data leads to
+        # the datastore for this data source being cleared
+        self.harness.update_relation_data(rel_id,
+                                          'prometheus/0',
+                                          {
+                                              'private-address': None,
+                                              'port': None,
+                                          })
+        self.assertEqual(None, self.harness.charm.datastore.sources.get(rel_id))
+
+    def test__ha_database_and_status_check(self):
+        """If there is a peer connection and no database (needed for HA),
+        the charm should put the application in a blocked state."""
+
+        # start charm with one peer and no database relation
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.unit.status,
+                         ActiveStatus())
+
+        # ensure _check_high_availability() ends up with the correct status
+        status = self.harness.charm._check_high_availability()
+        self.assertEqual(status, MaintenanceStatus('Grafana ready on single node.'))
+
+        # make sure that triggering 'update-status' hook does not
+        # overwrite the current active status
+        self.harness.charm.on.update_status.emit()
+        self.assertEqual(self.harness.charm.unit.status,
+                         ActiveStatus())
+
+        peer_rel_id = self.harness.add_relation('grafana', 'grafana')
+
+        # add main unit and its data
+        # self.harness.add_relation_unit(peer_rel_id, 'grafana/0')
+        # will trigger the grafana-changed hook
+        self.harness.update_relation_data(peer_rel_id,
+                                          'grafana/0',
+                                          {'private-address': '10.1.2.3'})
+
+        # add peer unit and its data
+        self.harness.add_relation_unit(peer_rel_id, 'grafana/1')
+        self.harness.update_relation_data(peer_rel_id,
+                                          'grafana/1',
+                                          {'private-address': '10.0.0.1'})
+
+        self.assertTrue(self.harness.charm.has_peer)
+        self.assertFalse(self.harness.charm.has_db)
+        self.assertEqual(
+            self.harness.charm.unit.status,
+            BlockedStatus('Need database relation for HA.')
+        )
+
+        # ensure update-status hook doesn't overwrite this
+        self.harness.charm.on.update_status.emit()
+        self.assertEqual(self.harness.charm.unit.status,
+                         BlockedStatus('Need database relation for HA.'))
+
+        # now add the database connection and the model should
+        # not have a blocked status
+        db_rel_id = self.harness.add_relation('database', 'mysql')
+        self.harness.add_relation_unit(db_rel_id, 'mysql/0')
+        self.harness.update_relation_data(db_rel_id,
+                                          'mysql/0',
+                                          {
+                                              'type': 'mysql',
+                                              'host': '10.10.10.10:3306',
+                                              'name': 'test_mysql_db',
+                                              'user': 'test-admin',
+                                              'password': 'super!secret!password',
+                                          })
+        self.assertTrue(self.harness.charm.has_db)
+        self.assertEqual(self.harness.charm.unit.status, ActiveStatus())
+
+        # ensure _check_high_availability() ends up with the correct status
+        status = self.harness.charm._check_high_availability()
+        self.assertEqual(status, MaintenanceStatus('Grafana ready for HA.'))
+
+    def test__database_relation_data(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.database, {})
+
+        # add relation and update relation data
+        rel_id = self.harness.add_relation('database', 'mysql')
+        rel = self.harness.model.get_relation('database')
+        self.harness.add_relation_unit(rel_id, 'mysql/0')
+        test_relation_data = {
+            'type': 'mysql',
+            'host': '0.1.2.3:3306',
+            'name': 'my-test-db',
+            'user': 'test-user',
+            'password': 'super!secret!password',
+        }
+        self.harness.update_relation_data(rel_id,
+                                          'mysql/0',
+                                          test_relation_data)
+        # check that charm datastore was properly set
+        self.assertEqual(dict(self.harness.charm.datastore.database),
+                         test_relation_data)
+
+        # now depart this relation and ensure the datastore is emptied
+        self.harness.charm.on.database_relation_broken.emit(rel)
+        self.assertEqual({}, dict(self.harness.charm.datastore.database))
+
+    def test__multiple_database_relation_handling(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.database, {})
+
+        # add first database relation
+        self.harness.add_relation('database', 'mysql')
+
+        # add second database relation -- should fail here
+        with self.assertRaises(TooManyRelatedAppsError):
+            self.harness.add_relation('database', 'mysql')
+            self.harness.charm.model.get_relation('database')
+
+    def test__multiple_source_relations(self):
+        """This will test data-source config text with multiple sources.
+
+        Specifically, it will test multiple grafana-source relations."""
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.sources, {})
+
+        # add first relation
+        rel_id0 = self.harness.add_relation('grafana-source', 'prometheus')
+        self.harness.add_relation_unit(rel_id0, 'prometheus/0')
+
+        # add test data to grafana-source relation
+        # and test that _make_data_source_config_text() works as expected
+        prom_source_data = {
+            'private-address': '192.0.2.1',
+            'port': 4321,
+            'source-type': 'prometheus'
+        }
+        self.harness.update_relation_data(rel_id0, 'prometheus/0', prom_source_data)
+        header_text = textwrap.dedent("""
+                apiVersion: 1
+
+                datasources:""")
+        correct_config_text0 = header_text + textwrap.dedent("""
+            - name: prometheus_0
+              type: prometheus
+              access: proxy
+              url: http://192.0.2.1:4321
+              isDefault: true
+              editable: true
+              orgId: 1""")
+
+        generated_text = self.harness.charm._make_data_source_config_text()
+        self.assertEqual(correct_config_text0 + '\n', generated_text)
+
+        # add another source relation and check the resulting config text
+        jaeger_source_data = {
+            'private-address': '255.255.255.0',
+            'port': 7890,
+            'source-type': 'jaeger',
+            'source-name': 'jaeger-application'
+        }
+        rel_id1 = self.harness.add_relation('grafana-source', 'jaeger')
+        self.harness.add_relation_unit(rel_id1, 'jaeger/0')
+        self.harness.update_relation_data(rel_id1, 'jaeger/0', jaeger_source_data)
+
+        correct_config_text1 = correct_config_text0 + textwrap.dedent("""
+            - name: jaeger-application
+              type: jaeger
+              access: proxy
+              url: http://255.255.255.0:7890
+              isDefault: false
+              editable: true
+              orgId: 1""")
+
+        generated_text = self.harness.charm._make_data_source_config_text()
+        self.assertEqual(correct_config_text1 + '\n', generated_text)
+
+        # test removal of second source results in config_text
+        # that is the same as the original
+        self.harness.update_relation_data(rel_id1,
+                                          'jaeger/0',
+                                          {
+                                              'private-address': None,
+                                              'port': None,
+                                          })
+        generated_text = self.harness.charm._make_data_source_config_text()
+        correct_text_after_removal = textwrap.dedent("""
+            apiVersion: 1
+
+            deleteDatasources:
+            - name: jaeger-application
+              orgId: 1
+
+            datasources:
+            - name: prometheus_0
+              type: prometheus
+              access: proxy
+              url: http://192.0.2.1:4321
+              isDefault: true
+              editable: true
+              orgId: 1""")
+
+        self.assertEqual(correct_text_after_removal + '\n', generated_text)
+
+        # now test that the 'deleteDatasources' is gone
+        generated_text = self.harness.charm._make_data_source_config_text()
+        self.assertEqual(correct_config_text0 + '\n', generated_text)
+
+    def test__pod_spec_container_datasources(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.sources, {})
+
+        # add first relation
+        rel_id = self.harness.add_relation('grafana-source', 'prometheus')
+        self.harness.add_relation_unit(rel_id, 'prometheus/0')
+
+        # add test data to grafana-source relation
+        # and test that _make_data_source_config_text() works as expected
+        prom_source_data = {
+            'private-address': '192.0.2.1',
+            'port': 4321,
+            'source-type': 'prometheus'
+        }
+        self.harness.update_relation_data(rel_id, 'prometheus/0', prom_source_data)
+
+        data_source_file_text = textwrap.dedent("""
+            apiVersion: 1
+
+            datasources:
+            - name: prometheus_0
+              type: prometheus
+              access: proxy
+              url: http://192.0.2.1:4321
+              isDefault: true
+              editable: true
+              orgId: 1
+              """)
+
+        config_ini_file_text = textwrap.dedent("""
+        [paths]
+        provisioning = /etc/grafana/provisioning
+
+        [log]
+        mode = console
+        level = {0}
+        """).format(
+            self.harness.model.config['grafana_log_level'],
+        )
+
+        expected_container_files_spec = [
+            {
+                'name': 'grafana-datasources',
+                'mountPath': '/etc/grafana/provisioning/datasources',
+                'files': [{
+                    'path': 'datasources.yaml',
+                    'content': data_source_file_text,
+                }],
+            },
+            {
+                'name': 'grafana-config-ini',
+                'mountPath': '/etc/grafana',
+                'files': [{
+                    'path': 'grafana.ini',
+                    'content': config_ini_file_text,
+                }]
+            }
+        ]
+        pod_spec, _ = self.harness.get_pod_spec()
+        container = get_container(pod_spec, 'grafana')
+        actual_container_files_spec = container['volumeConfig']
+        self.assertEqual(expected_container_files_spec,
+                         actual_container_files_spec)
+
+    def test__access_sqlite_storage_location(self):
+        expected_path = '/var/lib/grafana'
+        actual_path = self.harness.charm.meta.storages['sqlitedb'].location
+        self.assertEqual(expected_path, actual_path)
+
+    def test__config_ini_without_database(self):
+        self.harness.update_config(BASE_CONFIG)
+        expected_config_text = textwrap.dedent("""
+        [paths]
+        provisioning = /etc/grafana/provisioning
+
+        [log]
+        mode = console
+        level = {0}
+        """).format(
+            self.harness.model.config['grafana_log_level'],
+        )
+
+        actual_config_text = self.harness.charm._make_config_ini_text()
+        self.assertEqual(expected_config_text, actual_config_text)
+
+    def test__config_ini_with_database(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+
+        # add database relation and update relation data
+        rel_id = self.harness.add_relation('database', 'mysql')
+        self.harness.add_relation_unit(rel_id, 'mysql/0')
+        test_relation_data = {
+            'type': 'mysql',
+            'host': '0.1.2.3:3306',
+            'name': 'my-test-db',
+            'user': 'test-user',
+            'password': 'super!secret!password',
+        }
+        self.harness.update_relation_data(rel_id,
+                                          'mysql/0',
+                                          test_relation_data)
+
+        # test the results of _make_config_ini_text()
+        expected_config_text = textwrap.dedent("""
+        [paths]
+        provisioning = /etc/grafana/provisioning
+
+        [log]
+        mode = console
+        level = {0}
+
+        [database]
+        type = mysql
+        host = 0.1.2.3:3306
+        name = my-test-db
+        user = test-user
+        password = super!secret!password
+        url = mysql://test-user:super!secret!password@0.1.2.3:3306/my-test-db""").format(
+            self.harness.model.config['grafana_log_level'],
+        )
+
+        actual_config_text = self.harness.charm._make_config_ini_text()
+        self.assertEqual(expected_config_text, actual_config_text)
+
+    def test__duplicate_source_names(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.sources, {})
+
+        # add first relation
+        p_rel_id = self.harness.add_relation('grafana-source', 'prometheus')
+        p_rel = self.harness.model.get_relation('grafana-source', p_rel_id)
+        self.harness.add_relation_unit(p_rel_id, 'prometheus/0')
+
+        # add test data to grafana-source relation
+        prom_source_data0 = {
+            'private-address': '192.0.2.1',
+            'port': 4321,
+            'source-type': 'prometheus',
+            'source-name': 'duplicate-source-name'
+        }
+        self.harness.update_relation_data(p_rel_id, 'prometheus/0', prom_source_data0)
+        expected_prom_source_data = {
+            'private-address': '192.0.2.1',
+            'port': 4321,
+            'source-name': 'duplicate-source-name',
+            'source-type': 'prometheus',
+            'isDefault': 'true',
+            'unit_name': 'prometheus/0'
+        }
+        self.assertEqual(dict(self.harness.charm.datastore.sources[p_rel_id]),
+                         expected_prom_source_data)
+
+        # add second source with the same name as the first source
+        g_rel_id = self.harness.add_relation('grafana-source', 'graphite')
+        g_rel = self.harness.model.get_relation('grafana-source', g_rel_id)
+        self.harness.add_relation_unit(g_rel_id, 'graphite/0')
+
+        graphite_source_data0 = {
+            'private-address': '192.12.23.34',
+            'port': 4321,
+            'source-type': 'graphite',
+            'source-name': 'duplicate-source-name'
+        }
+        expected_graphite_source_data = {
+            'isDefault': 'false',
+            'port': 4321,
+            'private-address': '192.12.23.34',
+            'source-name': 'graphite_1',
+            'source-type': 'graphite',
+            'unit_name': 'graphite/0'
+        }
+        self.harness.update_relation_data(g_rel_id, 'graphite/0', graphite_source_data0)
+        self.assertEqual(
+            expected_graphite_source_data,
+            dict(self.harness.charm.datastore.sources.get(g_rel_id))
+        )
+        self.assertEqual(2, len(self.harness.charm.datastore.sources))
+
+        # now remove the relation and ensure datastore source-name is removed
+        self.harness.charm.on.grafana_source_relation_broken.emit(p_rel)
+        self.assertEqual(None, self.harness.charm.datastore.sources.get(p_rel_id))
+        self.assertEqual(1, len(self.harness.charm.datastore.sources))
+
+        # remove graphite relation
+        self.harness.charm.on.grafana_source_relation_broken.emit(g_rel)
+        self.assertEqual(None, self.harness.charm.datastore.sources.get(g_rel_id))
+        self.assertEqual(0, len(self.harness.charm.datastore.sources))
+
+    def test__idempotent_datasource_file_hash(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+
+        rel_id = self.harness.add_relation('grafana-source', 'prometheus')
+        self.harness.add_relation_unit(rel_id, 'prometheus/0')
+        self.assertIsInstance(rel_id, int)
+
+        # test that the unit data propagates the correct way
+        # which is through the triggering of on_relation_changed
+        self.harness.update_relation_data(rel_id,
+                                          'prometheus/0',
+                                          {
+                                              'private-address': '192.0.2.1',
+                                              'port': 1234,
+                                              'source-type': 'prometheus',
+                                              'source-name': 'prometheus-app',
+                                          })
+
+        # get a hash of the created file and check that it matches the pod spec
+        pod_spec, _ = self.harness.get_pod_spec()
+        container = get_container(pod_spec, 'grafana')
+        hash_text = hashlib.md5(
+            container['volumeConfig'][0]['files'][0]['content'].encode()).hexdigest()
+        self.assertEqual(container['envConfig']['DATASOURCES_YAML'], hash_text)
+
+        # test the idempotence of the call by re-configuring the pod spec
+        self.harness.charm.configure_pod()
+        self.assertEqual(container['envConfig']['DATASOURCES_YAML'], hash_text)
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/INSTALLER b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/LICENSE b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..2f1b8e15e5627d92f0521605c9870bc8e5505cb4
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2017-2021 Ingy döt Net
+Copyright (c) 2006-2016 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/METADATA b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..565f05b73714eb85d96beb669a1aa42920c21c3a
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/METADATA
@@ -0,0 +1,46 @@
+Metadata-Version: 2.1
+Name: PyYAML
+Version: 5.4.1
+Summary: YAML parser and emitter for Python
+Home-page: https://pyyaml.org/
+Author: Kirill Simonov
+Author-email: xi@resolvent.net
+License: MIT
+Download-URL: https://pypi.org/project/PyYAML/
+Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues
+Project-URL: CI, https://github.com/yaml/pyyaml/actions
+Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation
+Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core
+Project-URL: Source Code, https://github.com/yaml/pyyaml
+Platform: Any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Cython
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*
+
+YAML is a data serialization format designed for human readability
+and interaction with scripting languages.  PyYAML is a YAML parser
+and emitter for Python.
+
+PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
+support, capable extension API, and sensible error messages.  PyYAML
+supports standard YAML tags and provides Python-specific tags that
+allow to represent an arbitrary Python object.
+
+PyYAML is applicable for a broad range of tasks from complex
+configuration files to object serialization and persistence.
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/RECORD b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..68ca4da2c4df950a57ff47f60fbcacbb5256d161
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/RECORD
@@ -0,0 +1,43 @@
+PyYAML-5.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+PyYAML-5.4.1.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101
+PyYAML-5.4.1.dist-info/METADATA,sha256=XnrM5LY-uS85ica26gKUK0dGG-xmPjmGfDTSLpIHQFk,2087
+PyYAML-5.4.1.dist-info/RECORD,,
+PyYAML-5.4.1.dist-info/WHEEL,sha256=Dh4w5P6PPWbqyqoE6MHlzbFQwZXlM-voWJDf2WUsS2g,108
+PyYAML-5.4.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11
+_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402
+_yaml/__pycache__/__init__.cpython-38.pyc,,
+yaml/__init__.py,sha256=gfp2CbRVhzknghkiiJD2l6Z0pI-mv_iZHPSJ4aj0-nY,13170
+yaml/__pycache__/__init__.cpython-38.pyc,sha256=n0YyVkjiOLmcjlR2NXE5TIZf7Z2clZ6sqQ5KlyuTWSI,11845
+yaml/__pycache__/composer.cpython-38.pyc,sha256=OVPzAKAvC2-Tiv4HUwUUG9JHCzG17nvsRQcFTCtY9xs,3563
+yaml/__pycache__/constructor.cpython-38.pyc,sha256=EXPDY7Prtv3F6QbXiJc5F4BvJQyCCGRu83WF4u6X7Vo,20822
+yaml/__pycache__/cyaml.cpython-38.pyc,sha256=wI01UFU-WhUcdnnczL5QpKu0ZNQTttSzXbleIvIfcvM,3411
+yaml/__pycache__/dumper.cpython-38.pyc,sha256=9wIctrlMpF4ksMWuCc5QAyZSenGiRVyrtU-1pAfj54U,1823
+yaml/__pycache__/emitter.cpython-38.pyc,sha256=kd_QGJd0GjpfgQPN9DlG_7HwKfJnJ24JxtdiUOxM9iE,25353
+yaml/__pycache__/error.cpython-38.pyc,sha256=j6mkXgDmzV0y0lo6FeUrvZL2vHN6Vkc52k0_R0oOn6g,2300
+yaml/__pycache__/events.cpython-38.pyc,sha256=NFsoAO36pPL_uxoCO-xRxKndQ3vx47mkStOYjfoQVZ8,3974
+yaml/__pycache__/loader.cpython-38.pyc,sha256=lEMB2brjPrfMjXXTJpCEx6-ct4eI6LYovD4hW5ZuGsw,2164
+yaml/__pycache__/nodes.cpython-38.pyc,sha256=Kkxh_oL04gQg-YFWwnfjpIoYspsXO4GEqKTr3NbxOD8,1725
+yaml/__pycache__/parser.cpython-38.pyc,sha256=0R9Qx0cBMUoOLzMOWeXCyXsC4S4KJ7oPHdmTVPQ4FbQ,11924
+yaml/__pycache__/reader.cpython-38.pyc,sha256=ZpOMJ6rZDc8EWffI4vZR_Fhcu3WmhgT_GAkDrKkEtPo,4537
+yaml/__pycache__/representer.cpython-38.pyc,sha256=tR9wWffCThWXwQe47uYFdHg2bCkqNjBcwmG7RSHmWS4,10069
+yaml/__pycache__/resolver.cpython-38.pyc,sha256=zsLBuCKn8KAJPVGo5J_xZSytifJktdTtkUNnltOt__I,5498
+yaml/__pycache__/scanner.cpython-38.pyc,sha256=N8ubxRd6bZBjoRna6CU8wK1Imb_7TWOsudzPh9JDDkQ,25269
+yaml/__pycache__/serializer.cpython-38.pyc,sha256=9JDH7ONP5zFlep0f2yNWRoOSZr5Y28jL012O1EIbuug,3320
+yaml/__pycache__/tokens.cpython-38.pyc,sha256=haBW6UBDhVFog2xIe63OkrAP_9JRFyNKCROFPRJiyu0,4935
+yaml/_yaml.cpython-38-x86_64-linux-gnu.so,sha256=fxjEXaSdzion1SMwhu9Ikx-JOVNtcl6KvW_pyGBt-cU,2342916
+yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883
+yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639
+yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851
+yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837
+yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006
+yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533
+yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445
+yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061
+yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440
+yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495
+yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794
+yaml/representer.py,sha256=82UM3ZxUQKqsKAF4ltWOxCS6jGPIFtXpGs7mvqyv4Xs,14184
+yaml/resolver.py,sha256=Z1W8AOMA6Proy4gIO2OhUO4IPS_bFNAl0Ca3rwChpPg,8999
+yaml/scanner.py,sha256=KeQIKGNlSyPE8QDwionHxy9CgbqE5teJEz05FR9-nAg,51277
+yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165
+yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/WHEEL b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..69d594f055a5127401ebe017f8837cef4c76c020
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: false
+Tag: cp38-cp38-manylinux1_x86_64
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/top_level.txt b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e6475e911f628412049bc4090d86f23ac403adde
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+_yaml
+yaml
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/_yaml/__init__.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/_yaml/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7baa8c4b68127d5cdf0be9a799429e61347c2694
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/_yaml/__init__.py
@@ -0,0 +1,33 @@
+# This is a stub package designed to roughly emulate the _yaml
+# extension module, which previously existed as a standalone module
+# and has been moved into the `yaml` package namespace.
+# It does not perfectly mimic its old counterpart, but should get
+# close enough for anyone who's relying on it even when they shouldn't.
+import yaml
+
+# in some circumstances, the yaml module we imoprted may be from a different version, so we need
+# to tread carefully when poking at it here (it may not have the attributes we expect)
+if not getattr(yaml, '__with_libyaml__', False):
+    from sys import version_info
+
+    exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError
+    raise exc("No module named '_yaml'")
+else:
+    from yaml._yaml import *
+    import warnings
+    warnings.warn(
+        'The _yaml extension module is now located at yaml._yaml'
+        ' and its location is subject to change.  To use the'
+        ' LibYAML-based parser and emitter, import from `yaml`:'
+        ' `from yaml import CLoader as Loader, CDumper as Dumper`.',
+        DeprecationWarning
+    )
+    del warnings
+    # Don't `del yaml` here because yaml is actually an existing
+    # namespace member of _yaml.
+
+__name__ = '_yaml'
+# If the module is top-level (i.e. not a part of any specific package)
+# then the attribute should be set to ''.
+# https://docs.python.org/3.8/library/types.html
+__package__ = ''
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/INSTALLER b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/METADATA b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..b21b997f84c57165c6e89728ebe53da0b518e944
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/METADATA
@@ -0,0 +1,63 @@
+Metadata-Version: 2.1
+Name: oci-image
+Version: 1.0.0
+Summary: Helper for dealing with OCI Image resources in the charm operator framework
+Home-page: https://github.com/juju-solutions/resource-oci-image
+Author: Cory Johns
+Author-email: johnsca@gmail.com
+License: Apache License 2.0
+Platform: UNKNOWN
+
+# OCI Image Resource helper
+
+This is a helper for working with OCI image resources in the charm operator
+framework.
+
+## Installation
+
+Add it to your `requirements.txt`.  Since it's not in PyPI, you'll need to use
+the GitHub archive URL (or `git+` URL, if you want to pin to a specific commit):
+
+```
+https://github.com/juju-solutions/resource-oci-image/archive/master.zip
+```
+
+## Usage
+
+The `OCIImageResource` class will wrap the framework resource for the given
+resource name, and calling `fetch` on it will either return the image info
+or raise an `OCIImageResourceError` if it can't fetch or parse the image
+info. The exception will have a `status` attribute you can use directly,
+or a `status_message` attribute if you just want that.
+
+Example usage:
+
+```python
+from ops.charm import CharmBase
+from ops.main import main
+from oci_image import OCIImageResource, OCIImageResourceError
+
+class MyCharm(CharmBase):
+    def __init__(self, *args):
+        super().__init__(*args)
+        self.image = OCIImageResource(self, 'resource-name')
+        self.framework.observe(self.on.start, self.on_start)
+
+    def on_start(self, event):
+        try:
+            image_info = self.image.fetch()
+        except OCIImageResourceError as e:
+            self.model.unit.status = e.status
+            event.defer()
+            return
+
+        self.model.pod.set_spec({'containers': [{
+            'name': 'my-charm',
+            'imageDetails': image_info,
+        }]})
+
+if __name__ == "__main__":
+    main(MyCharm)
+```
+
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/RECORD b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..c265196b2ce5ac15b3705d0bca4e6da2474d7556
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/RECORD
@@ -0,0 +1,7 @@
+__pycache__/oci_image.cpython-38.pyc,,
+oci_image-1.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+oci_image-1.0.0.dist-info/METADATA,sha256=QIpPa4JcSPa_Ci0n-DaCNp4PkKovZudFW8FnpnauJnQ,1808
+oci_image-1.0.0.dist-info/RECORD,,
+oci_image-1.0.0.dist-info/WHEEL,sha256=EVRjI69F5qVjm_YgqcTXPnTAv3BfSUr0WVAHuSP3Xoo,92
+oci_image-1.0.0.dist-info/top_level.txt,sha256=M4dLaObLx7irI4EO-A4_VJP_b-A6dDD7hB5QyVKdHOY,10
+oci_image.py,sha256=c75VR2vSmOp9pPTP2cnsxo23CqhhFbRtnIOtMjzDyXY,1794
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/WHEEL b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..83ff02e961fce5ad7befce746ff02635e1616315
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.35.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/top_level.txt b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..cd6962384eaf5e60f5976c60d221b84ba5561a1d
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+oci_image
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4d3818f47c3bde81c97dd43a702e2aa4d0dde7f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/oci_image.py
@@ -0,0 +1,53 @@
+from pathlib import Path
+
+import yaml
+from ops.framework import Object
+from ops.model import BlockedStatus, ModelError
+
+
+class OCIImageResource(Object):
+    def __init__(self, charm, resource_name):
+        super().__init__(charm, resource_name)
+        self.resource_name = resource_name
+
+    def fetch(self):
+        try:
+            resource_path = self.model.resources.fetch(self.resource_name)
+        except ModelError as e:
+            raise MissingResourceError(self.resource_name) from e
+        if not resource_path.exists():
+            raise MissingResourceError(self.resource_name)
+        resource_text = Path(resource_path).read_text()
+        if not resource_text:
+            raise MissingResourceError(self.resource_name)
+        try:
+            resource_data = yaml.safe_load(resource_text)
+        except yaml.YAMLError as e:
+            raise InvalidResourceError(self.resource_name) from e
+        else:
+            # Translate the data from the format used by the charm store to the
+            # format used by the Juju K8s pod spec, since that is how this is
+            # typically used.
+            return {
+                'imagePath': resource_data['registrypath'],
+                'username': resource_data['username'],
+                'password': resource_data['password'],
+            }
+
+
+class OCIImageResourceError(ModelError):
+    status_type = BlockedStatus
+    status_message = 'Resource error'
+
+    def __init__(self, resource_name):
+        super().__init__(resource_name)
+        self.status = self.status_type(
+            f'{self.status_message}: {resource_name}')
+
+
+class MissingResourceError(OCIImageResourceError):
+    status_message = 'Missing resource'
+
+
+class InvalidResourceError(OCIImageResourceError):
+    status_message = 'Invalid resource'
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/INSTALLER b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/METADATA b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..37ef931ae974796ba6d2f273f8f226916ff542c9
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/METADATA
@@ -0,0 +1,263 @@
+Metadata-Version: 2.1
+Name: ops
+Version: 1.2.0
+Summary: The Python library behind great charms
+Home-page: https://github.com/canonical/operator
+Author: The Charmcraft team at Canonical Ltd.
+Author-email: charmcraft@lists.launchpad.net
+License: Apache-2.0
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 3
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: POSIX :: Linux
+Requires-Python: >=3.5
+Description-Content-Type: text/markdown
+Requires-Dist: PyYAML
+
+# The Charmed Operator Framework
+
+This Charmed Operator Framework simplifies [Kubernetes
+operator](https://charmhub.io/about) development for 
+[model-driven application
+management](https://juju.is/model-driven-operations).
+
+A Kubernetes operator is a container that drives lifecycle management,
+configuration, integration and daily actions for an application.
+Operators simplify software management and operations. They capture
+reusable app domain knowledge from experts in a software component that
+can be shared.
+
+This project extends the operator pattern to enable 
+[charmed operators](https://juju.is/universal-operators), not just
+for Kubernetes but also operators for traditional Linux or Windows
+application management.
+
+Operators use a [Charmed Operator Lifecycle Manager
+(Charmed OLM)](https://juju.is/operator-lifecycle-manager) to coordinate their
+work in a cluster. The system uses Golang for concurrent event
+processing under the hood, but enables the operators to be written in
+Python.
+
+## Simple, composable operators
+
+Operators should 'do one thing and do it well'. Each operator drives a
+single microservice and can be [composed with other 
+operators](https://juju.is/integration) to deliver a complex application.
+
+It is better to have small, reusable operators that each drive a single
+microservice very well. The operator handles instantiation, scaling,
+configuration, optimisation, networking, service mesh, observability,
+and day-2 operations specific to that microservice.
+
+Operator composition takes place through declarative integration in
+the OLM. Operators declare integration endpoints, and discover lines of
+integration between those endpoints dynamically at runtime.
+
+## Pure Python operators
+
+The framework provides a standard Python library and object model that
+represents the application graph, and an event distribution mechanism for
+distributed system coordination and communication.
+
+The OLM is written in Golang for efficient concurrency in event handling
+and distribution. Operators can be written in any language. We recommend
+this Python framework for ease of design, development and collaboration.
+
+## Better collaboration
+
+Operator developers publish Python libraries that make it easy to integrate
+your operator with their operator. The framework includes standard tools
+to distribute these integration libraries and keep them up to date.
+
+Development collaboration happens at [Charmhub.io](https://charmhub.io/) where
+operators are published along with integration libraries. Design and
+code review discussions are hosted in the
+[Charmhub forum](https://discourse.charmhub.io/). We recommend the
+[Open Operator Manifesto](https://charmhub.io/manifesto) as a guideline for
+high quality operator engineering.
+
+## Event serialization and operator services
+
+Distributed systems can be hard! So this framework exists to make it much
+simpler to reason about operator behaviour, especially in complex deployments.
+The Charmed OLM provides [operator services](https://juju.is/operator-services) such
+as provisioning, event delivery, leader election and model management.
+
+Coordination between operators is provided by a cluster-wide event
+distribution system. Events are serialized to avoid race conditions in any
+given container or machine. This greatly simplifies the development of
+operators for high availability, scale-out and integrated applications.
+
+## Model-driven Operator Lifecycle Manager
+
+A key goal of the project is to improve the user experience for admins
+working with multiple different operators.
+
+We embrace [model-driven operations](https://juju.is/model-driven-operations)
+in the Charmed Operator Lifecycle Manager. The model encompasses capacity,
+storage, networking, the application graph and administrative access.
+
+Admins describe the application graph of integrated microservices, and
+the OLM then drives instantiation. A change in the model is propagated
+to all affected operators, reducing the duplication of effort and
+repetition normally found in operating a complex topology of services.
+
+Administrative actions, updates, configuration and integration are all
+driven through the OLM.
+
+# Getting started
+
+A package of operator code is called a charmed operator or “charm. You will use `charmcraft`
+to register your operator name, and publish it when you are ready.
+
+```
+$ sudo snap install charmcraft --beta
+charmcraft (beta) 0.6.0 from John Lenton (chipaca) installed
+```
+
+Charmed operators written using the Charmed Operator Framework are just Python code. The goal
+is to feel natural for somebody used to coding in Python, and reasonably
+easy to learn for somebody who is not a pythonista.
+
+The dependencies of the operator framework are kept as minimal as possible;
+currently that's Python 3.5 or greater, and `PyYAML` (both are included by
+default in Ubuntu's cloud images from 16.04 on).
+
+# A quick introduction
+
+Make an empty directory `my-charm` and cd into it. Then start a new charmed operator
+with:
+
+```
+$ charmcraft init
+All done.
+There are some notes about things we think you should do.
+These are marked with ‘TODO:’, as is customary. Namely:
+      README.md: fill out the description
+      README.md: explain how to use the charm
+  metadata.yaml: fill out the charm's description
+  metadata.yaml: fill out the charm's summary
+```
+
+Charmed operators are just Python code. The entry point to your charmed operator can
+be any filename, by default this is `src/charm.py` which must be executable
+(and probably have `#!/usr/bin/env python3` on the first line).
+
+You need a `metadata.yaml` to describe your charmed operator, and if you will support
+configuration of your charmed operator then `config.yaml` files is required too. The
+`requirements.txt` specifies any Python dependencies.
+
+```
+$ tree my-charm/
+my-charm/
+├── actions.yaml
+├── config.yaml
+├── LICENSE
+├── metadata.yaml
+├── README.md
+├── requirements-dev.txt
+├── requirements.txt
+├── run_tests
+├── src
+│   └── charm.py
+├── tests
+│   ├── __init__.py
+│   └── my_charm.py
+```
+
+`src/charm.py` here is the entry point to your charm code. At a minimum, it
+needs to define a subclass of `CharmBase` and pass that into the framework
+`main` function:
+
+```python
+from ops.charm import CharmBase
+from ops.main import main
+
+class MyCharm(CharmBase):
+    def __init__(self, *args):
+        super().__init__(*args)
+        self.framework.observe(self.on.start, self.on_start)
+
+    def on_start(self, event):
+        # Handle the start event here.
+
+if __name__ == "__main__":
+    main(MyCharm)
+```
+
+That should be enough for you to be able to run
+
+```
+$ charmcraft build
+Done, charm left in 'my-charm.charm'
+$ juju deploy ./my-charm.charm
+```
+
+> 🛈 More information on [`charmcraft`](https://pypi.org/project/charmcraft/) can
+> also be found on its [github page](https://github.com/canonical/charmcraft).
+
+Happy charming!
+
+# Testing your charmed operators
+
+The operator framework provides a testing harness, so you can check your
+charmed operator does the right thing in different scenarios, without having to create
+a full deployment. `pydoc3 ops.testing` has the details, including this
+example:
+
+```python
+harness = Harness(MyCharm)
+# Do initial setup here
+relation_id = harness.add_relation('db', 'postgresql')
+# Now instantiate the charm to see events as the model changes
+harness.begin()
+harness.add_relation_unit(relation_id, 'postgresql/0')
+harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+# Check that charm has properly handled the relation_joined event for postgresql/0
+self.assertEqual(harness.charm. ...)
+```
+
+## Talk to us
+
+If you need help, have ideas, or would just like to chat with us, reach out on
+IRC: we're in [#smooth-operator] on freenode (or try the [webchat]).
+
+We also pay attention to [Charmhub discourse](https://discourse.charmhub.io/)
+
+You can also deep dive into the [API docs] if that's your thing.
+
+[webchat]: https://webchat.freenode.net/#smooth-operator
+[#smooth-operator]: irc://chat.freenode.net/%23smooth-operator
+[discourse]: https://discourse.juju.is/c/charming
+[API docs]: https://ops.rtfd.io/
+
+## Operator Framework development
+
+To work in the framework itself you will need Python >= 3.5 and the
+dependencies in `requirements-dev.txt` installed in your system, or a
+virtualenv:
+
+    virtualenv --python=python3 env
+    source env/bin/activate
+    pip install -r requirements-dev.txt
+
+Then you can try `./run_tests`, it should all go green.
+
+For improved performance on the tests, ensure that you have PyYAML
+installed with the correct extensions:
+
+    apt-get install libyaml-dev
+    pip install --force-reinstall --no-cache-dir pyyaml
+
+If you want to build the documentation you'll need the requirements from
+`docs/requirements.txt`, or in your virtualenv
+
+    pip install -r docs/requirements.txt
+
+and then you can run `./build_docs`.
+
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/RECORD b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..2bda5389548e6a163ff30ba606a90efb57dbd509
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/RECORD
@@ -0,0 +1,33 @@
+ops-1.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+ops-1.2.0.dist-info/METADATA,sha256=A89r8Y4LYGI3fELqo6UHEL_-pffc7-4Uv5_Wn9x1cnI,9638
+ops-1.2.0.dist-info/RECORD,,
+ops-1.2.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
+ops-1.2.0.dist-info/top_level.txt,sha256=enC05wWafSg8iDKIvj3gvtAtEP2kYCyN5Gmd689q-_I,4
+ops/__init__.py,sha256=jbUCTFsrtEpa6EBUy6wZm72pPvtMUt2lrWvxxKghtf8,2206
+ops/__pycache__/__init__.cpython-38.pyc,,
+ops/__pycache__/charm.cpython-38.pyc,,
+ops/__pycache__/framework.cpython-38.pyc,,
+ops/__pycache__/jujuversion.cpython-38.pyc,,
+ops/__pycache__/log.cpython-38.pyc,,
+ops/__pycache__/main.cpython-38.pyc,,
+ops/__pycache__/model.cpython-38.pyc,,
+ops/__pycache__/pebble.cpython-38.pyc,,
+ops/__pycache__/storage.cpython-38.pyc,,
+ops/__pycache__/testing.cpython-38.pyc,,
+ops/__pycache__/version.cpython-38.pyc,,
+ops/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+ops/_private/__pycache__/__init__.cpython-38.pyc,,
+ops/_private/__pycache__/yaml.cpython-38.pyc,,
+ops/_private/yaml.py,sha256=-uxzjI7-micZH5v-C0wx4qhkHr8LyGImXYqCb6cTSiU,1105
+ops/charm.py,sha256=lsWZc9RgijRTrCIioIvafUstZqoHQ-QzCxLmv9d5pfU,35514
+ops/framework.py,sha256=1ByOtFKRR6kRzOEbfWnGEMNevixOYf18U0oZxKq8LsA,43769
+ops/jujuversion.py,sha256=9wMlUmngcAENV9RkgVVLWtZsyRQaf6XNrQQqUeY_fHA,4139
+ops/lib/__init__.py,sha256=QizPpuRWXjqbH5Gv7mnH8CcPR9BX7q2YNFnxyoSsA0g,9213
+ops/lib/__pycache__/__init__.cpython-38.pyc,,
+ops/log.py,sha256=JVpt_Vkf_lWO2cucUcJfXjAWVTattk4xBscSs65Sn3I,2155
+ops/main.py,sha256=PmCxEVK_3yjz1mmVuF7Qe9c_uAoYkaUM8t9D9Fl-wNY,15729
+ops/model.py,sha256=DpVDRs5_w7OqRYFmxugU8mVQQbu8LDfOdndP7_jrOko,59147
+ops/pebble.py,sha256=aA5ve3LNHYW0arGQ4JMHorbmNg4G8S3GDbcEEB3XA9Q,38963
+ops/storage.py,sha256=jEfszzQGYDrl5wa03I6txvea-7lI661Yq6n7sIPa0fU,14192
+ops/testing.py,sha256=yk5tCrPp0odgcGzNcLBmhsD46WICD14IVXn3Q0PnYUw,48531
+ops/version.py,sha256=icKh3kW5DQjMp40Qp8xmAZKIJkv-Pv34EzE_tl1nM6k,46
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/WHEEL b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..385faab0525ccdbfd1070a8bebcca3ac8617236e
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/top_level.txt b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2d81d3bb6fea804d1db7a1549d67244b513aa145
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops-1.2.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+ops
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/__init__.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..44cb77a44b04b0d453e9534d28b3c3cc706e203b
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/__init__.py
@@ -0,0 +1,44 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""The Charmed Operator Framework.
+
+The Charmed Operator Framework allows the development of operators in a simple
+and straightforward way, using standard Python structures to allow for clean,
+maintainable, and reusable code.
+
+A Kubernetes operator is a container that drives lifecycle management,
+configuration, integration and daily actions for an application. Operators
+simplify software management and operations. They capture reusable app domain
+knowledge from experts in a software component that can be shared.
+
+The Charmed Operator Framework extends the "operator pattern" to enable Charmed
+Operators, packaged as and often referred to as "charms". Charms are not just
+for Kubernetes but also operators for traditional Linux or Windows application
+management. Operators use an Operator Lifecycle Manager (OLM), like Juju, to
+coordinate their work in a cluster. The system uses Golang for concurrent event
+processing under the hood, but enables the operators to be written in Python.
+
+Operators should do one thing and do it well. Each operator drives a single
+application or service and can be composed with other operators to deliver a
+complex application or service. An operator handles instantiation, scaling,
+configuration, optimisation, networking, service mesh, observability,
+and day-2 operations specific to that application.
+
+Full developer documentation is available at https://juju.is/docs/sdk.
+"""
+
+# Import here the bare minimum to break the circular import between modules
+from . import charm  # noqa: F401 (imported but unused)
+from .version import version as __version__  # noqa: F401 (imported but unused)
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/_private/__init__.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/_private/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/_private/yaml.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/_private/yaml.py
new file mode 100644
index 0000000000000000000000000000000000000000..0540182802c1bb5f4f2fccf07e829acae9f51f2a
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/_private/yaml.py
@@ -0,0 +1,32 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Internal YAML helpers."""
+
+import yaml
+
+
+# Use C speedups if available
+_safe_loader = getattr(yaml, 'CSafeLoader', yaml.SafeLoader)
+_safe_dumper = getattr(yaml, 'CSafeDumper', yaml.SafeDumper)
+
+
+def safe_load(stream):
+    """Same as yaml.safe_load, but use fast C loader if available."""
+    return yaml.load(stream, Loader=_safe_loader)
+
+
+def safe_dump(data, stream=None, **kwargs):
+    """Same as yaml.safe_dump, but use fast C dumper if available."""
+    return yaml.dump(data, stream=stream, Dumper=_safe_dumper, **kwargs)
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/charm.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..41ddd258a7afb3bcb4c4018d6aee24bab11312af
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/charm.py
@@ -0,0 +1,893 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base objects for the Charm, events and metadata."""
+
+import enum
+import os
+import pathlib
+import typing
+
+from ops import model
+from ops._private import yaml
+from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents
+
+
+class HookEvent(EventBase):
+    """Events raised by Juju to progress a charm's lifecycle.
+
+    Hooks are callback methods of a charm class (a subclass of
+    :class:`CharmBase`) that are invoked in response to events raised
+    by Juju. These callback methods are the means by which a charm
+    governs the lifecycle of its application.
+
+    The :class:`HookEvent` class is the base of a type hierarchy of events
+    related to the charm's lifecycle.
+
+    :class:`HookEvent` subtypes are grouped into the following categories
+
+    - Core lifecycle events
+    - Relation events
+    - Storage events
+    - Metric events
+    """
+
+
+class ActionEvent(EventBase):
+    """Events raised by Juju when an administrator invokes a Juju Action.
+
+    This class is the data type of events triggered when an administrator
+    invokes a Juju Action. Callbacks bound to these events may be used
+    for responding to the administrator's Juju Action request.
+
+    To read the parameters for the action, see the instance variable :attr:`params`.
+    To respond with the result of the action, call :meth:`set_results`. To add
+    progress messages that are visible as the action is progressing use
+    :meth:`log`.
+
+    Attributes:
+        params: The parameters passed to the action.
+    """
+
+    def defer(self):
+        """Action events are not deferable like other events.
+
+        This is because an action runs synchronously and the administrator
+        is waiting for the result.
+        """
+        raise RuntimeError('cannot defer action events')
+
+    def restore(self, snapshot: dict) -> None:
+        """Used by the operator framework to record the action.
+
+        Not meant to be called directly by charm code.
+        """
+        env_action_name = os.environ.get('JUJU_ACTION_NAME')
+        event_action_name = self.handle.kind[:-len('_action')].replace('_', '-')
+        if event_action_name != env_action_name:
+            # This could only happen if the dev manually emits the action, or from a bug.
+            raise RuntimeError('action event kind does not match current action')
+        # Params are loaded at restore rather than __init__ because
+        # the model is not available in __init__.
+        self.params = self.framework.model._backend.action_get()
+
+    def set_results(self, results: typing.Mapping) -> None:
+        """Report the result of the action.
+
+        Args:
+            results: The result of the action as a Dict
+        """
+        self.framework.model._backend.action_set(results)
+
+    def log(self, message: str) -> None:
+        """Send a message that a user will see while the action is running.
+
+        Args:
+            message: The message for the user.
+        """
+        self.framework.model._backend.action_log(message)
+
+    def fail(self, message: str = '') -> None:
+        """Report that this action has failed.
+
+        Args:
+            message: Optional message to record why it has failed.
+        """
+        self.framework.model._backend.action_fail(message)
+
+
+class InstallEvent(HookEvent):
+    """Event triggered when a charm is installed.
+
+    This event is triggered at the beginning of a charm's
+    lifecycle. Any associated callback method should be used to
+    perform one-time setup operations, such as installing prerequisite
+    software.
+    """
+
+
+class StartEvent(HookEvent):
+    """Event triggered immediately after first configuation change.
+
+    This event is triggered immediately after the first
+    :class:`ConfigChangedEvent`. Callback methods bound to the event should be
+    used to ensure that the charm’s software is in a running state. Note that
+    the charm’s software should be configured so as to persist in this state
+    through reboots without further intervention on Juju’s part.
+    """
+
+
+class StopEvent(HookEvent):
+    """Event triggered when a charm is shut down.
+
+    This event is triggered when an application's removal is requested
+    by the client. The event fires immediately before the end of the
+    unit’s destruction sequence. Callback methods bound to this event
+    should be used to ensure that the charm’s software is not running,
+    and that it will not start again on reboot.
+    """
+
+
+class RemoveEvent(HookEvent):
+    """Event triggered when a unit is about to be terminated.
+
+    This event fires prior to Juju removing the charm and terminating its unit.
+    """
+
+
+class ConfigChangedEvent(HookEvent):
+    """Event triggered when a configuration change is requested.
+
+    This event fires in several different situations.
+
+    - immediately after the :class:`install <InstallEvent>` event.
+    - after a :class:`relation is created <RelationCreatedEvent>`.
+    - after a :class:`leader is elected <LeaderElectedEvent>`.
+    - after changing charm configuration using the GUI or command line
+      interface
+    - when the charm :class:`starts <StartEvent>`.
+    - when a new unit :class:`joins a relation <RelationJoinedEvent>`.
+    - when there is a :class:`change to an existing relation <RelationChangedEvent>`.
+
+    Any callback method bound to this event cannot assume that the
+    software has already been started; it should not start stopped
+    software, but should (if appropriate) restart running software to
+    take configuration changes into account.
+    """
+
+
+class UpdateStatusEvent(HookEvent):
+    """Event triggered by a status update request from Juju.
+
+    This event is periodically triggered by Juju so that it can
+    provide constant feedback to the administrator about the status of
+    the application the charm is modeling. Any callback method bound
+    to this event should determine the "health" of the application and
+    set the status appropriately.
+
+    The interval between :class:`update-status <UpdateStatusEvent>` events can
+    be configured model-wide, e.g.  ``juju model-config
+    update-status-hook-interval=1m``.
+    """
+
+
+class UpgradeCharmEvent(HookEvent):
+    """Event triggered by request to upgrade the charm.
+
+    This event will be triggered when an administrator executes ``juju
+    upgrade-charm``. The event fires after Juju has unpacked the upgraded charm
+    code, and so this event will be handled by the callback method bound to the
+    event in the new codebase. The associated callback method is invoked
+    provided there is no existing error state. The callback method should be
+    used to reconcile current state written by an older version of the charm
+    into whatever form that is needed by the current charm version.
+    """
+
+
+class PreSeriesUpgradeEvent(HookEvent):
+    """Event triggered to prepare a unit for series upgrade.
+
+    This event triggers when an administrator executes ``juju upgrade-series
+    MACHINE prepare``. The event will fire for each unit that is running on the
+    specified machine. Any callback method bound to this event must prepare the
+    charm for an upgrade to the series. This may include things like exporting
+    database content to a version neutral format, or evacuating running
+    instances to other machines.
+
+    It can be assumed that only after all units on a machine have executed the
+    callback method associated with this event, the administrator will initiate
+    steps to actually upgrade the series.  After the upgrade has been completed,
+    the :class:`PostSeriesUpgradeEvent` will fire.
+    """
+
+
+class PostSeriesUpgradeEvent(HookEvent):
+    """Event triggered after a series upgrade.
+
+    This event is triggered after the administrator has done a distribution
+    upgrade (or rolled back and kept the same series). It is called in response
+    to ``juju upgrade-series MACHINE complete``. Associated charm callback
+    methods are expected to do whatever steps are necessary to reconfigure their
+    applications for the new series. This may include things like populating the
+    upgraded version of a database. Note however charms are expected to check if
+    the series has actually changed or whether it was rolled back to the
+    original series.
+    """
+
+
+class LeaderElectedEvent(HookEvent):
+    """Event triggered when a new leader has been elected.
+
+    Juju will trigger this event when a new leader unit is chosen for
+    a given application.
+
+    This event fires at least once after Juju selects a leader
+    unit. Callback methods bound to this event may take any action
+    required for the elected unit to assert leadership. Note that only
+    the elected leader unit will receive this event.
+    """
+
+
+class LeaderSettingsChangedEvent(HookEvent):
+    """Event triggered when leader changes any settings.
+
+    DEPRECATED NOTICE
+
+    This event has been deprecated in favor of using a Peer relation,
+    and having the leader set a value in the Application data bag for
+    that peer relation.  (see :class:`RelationChangedEvent`).
+    """
+
+
+class CollectMetricsEvent(HookEvent):
+    """Event triggered by Juju to collect metrics.
+
+    Juju fires this event every five minutes for the lifetime of the
+    unit. Callback methods bound to this event may use the :meth:`add_metrics`
+    method of this class to send measurements to Juju.
+
+    Note that associated callback methods are currently sandboxed in
+    how they can interact with Juju.
+    """
+
+    def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None:
+        """Record metrics that have been gathered by the charm for this unit.
+
+        Args:
+            metrics: A collection of {key: float} pairs that contains the
+              metrics that have been gathered
+            labels: {key:value} strings that can be applied to the
+                metrics that are being gathered
+        """
+        self.framework.model._backend.add_metrics(metrics, labels)
+
+
+class RelationEvent(HookEvent):
+    """A base class representing the various relation lifecycle events.
+
+    Relation lifecycle events are generated when application units
+    participate in relations.  Units can only participate in relations
+    after they have been "started", and before they have been
+    "stopped". Within that time window, the unit may participate in
+    several different relations at a time, including multiple
+    relations with the same name.
+
+    Attributes:
+        relation: The :class:`~ops.model.Relation` involved in this event
+        app: The remote :class:`~ops.model.Application` that has triggered this
+             event
+        unit: The remote unit that has triggered this event. This may be
+              ``None`` if the relation event was triggered as an
+              :class:`~ops.model.Application` level event
+
+    """
+
+    def __init__(self, handle, relation, app=None, unit=None):
+        super().__init__(handle)
+
+        if unit is not None and unit.app != app:
+            raise RuntimeError(
+                'cannot create RelationEvent with application {} and unit {}'.format(app, unit))
+
+        self.relation = relation
+        self.app = app
+        self.unit = unit
+
+    def snapshot(self) -> dict:
+        """Used by the framework to serialize the event to disk.
+
+        Not meant to be called by charm code.
+        """
+        snapshot = {
+            'relation_name': self.relation.name,
+            'relation_id': self.relation.id,
+        }
+        if self.app:
+            snapshot['app_name'] = self.app.name
+        if self.unit:
+            snapshot['unit_name'] = self.unit.name
+        return snapshot
+
+    def restore(self, snapshot: dict) -> None:
+        """Used by the framework to deserialize the event from disk.
+
+        Not meant to be called by charm code.
+        """
+        self.relation = self.framework.model.get_relation(
+            snapshot['relation_name'], snapshot['relation_id'])
+
+        app_name = snapshot.get('app_name')
+        if app_name:
+            self.app = self.framework.model.get_app(app_name)
+        else:
+            self.app = None
+
+        unit_name = snapshot.get('unit_name')
+        if unit_name:
+            self.unit = self.framework.model.get_unit(unit_name)
+        else:
+            self.unit = None
+
+
+class RelationCreatedEvent(RelationEvent):
+    """Event triggered when a new relation is created.
+
+    This is triggered when a new relation to another app is added in Juju. This
+    can occur before units for those applications have started. All existing
+    relations should be established before start.
+    """
+
+
+class RelationJoinedEvent(RelationEvent):
+    """Event triggered when a new unit joins a relation.
+
+    This event is triggered whenever a new unit of a related
+    application joins the relation.  The event fires only when that
+    remote unit is first observed by the unit. Callback methods bound
+    to this event may set any local unit settings that can be
+    determined using no more than the name of the joining unit and the
+    remote ``private-address`` setting, which is always available when
+    the relation is created and is by convention not deleted.
+    """
+
+
+class RelationChangedEvent(RelationEvent):
+    """Event triggered when relation data changes.
+
+    This event is triggered whenever there is a change to the data bucket for a
+    related application or unit. Look at ``event.relation.data[event.unit/app]``
+    to see the new information, where ``event`` is the event object passed to
+    the callback method bound to this event.
+
+    This event always fires once, after :class:`RelationJoinedEvent`, and
+    will subsequently fire whenever that remote unit changes its settings for
+    the relation. Callback methods bound to this event should be the only ones
+    that rely on remote relation settings. They should not error if the settings
+    are incomplete, since it can be guaranteed that when the remote unit or
+    application changes its settings, the event will fire again.
+
+    The settings that may be queried, or set, are determined by the relation’s
+    interface.
+    """
+
+
+class RelationDepartedEvent(RelationEvent):
+    """Event triggered when a unit leaves a relation.
+
+    This is the inverse of the :class:`RelationJoinedEvent`, representing when a
+    unit is leaving the relation (the unit is being removed, the app is being
+    removed, the relation is being removed). It is fired once for each unit that
+    is going away.
+
+    When the remote unit is known to be leaving the relation, this will result
+    in the :class:`RelationChangedEvent` firing at least once, after which the
+    :class:`RelationDepartedEvent` will fire. The :class:`RelationDepartedEvent`
+    will fire once only. Once the :class:`RelationDepartedEvent` has fired no
+    further :class:`RelationChangedEvent` will fire.
+
+    Callback methods bound to this event may be used to remove all
+    references to the departing remote unit, because there’s no
+    guarantee that it’s still part of the system; it’s perfectly
+    probable (although not guaranteed) that the system running that
+    unit has already shut down.
+
+    Once all callback methods bound to this event have been run for such a
+    relation, the unit agent will fire the :class:`RelationBrokenEvent`.
+    """
+
+
+class RelationBrokenEvent(RelationEvent):
+    """Event triggered when a relation is removed.
+
+    If a relation is being removed (``juju remove-relation`` or ``juju
+    remove-application``), once all the units have been removed, this event will
+    fire to signal that the relationship has been fully terminated.
+
+    The event indicates that the current relation is no longer valid, and that
+    the charm’s software must be configured as though the relation had never
+    existed. It will only be called after every callback method bound to
+    :class:`RelationDepartedEvent` has been run. If a callback method
+    bound to this event is being executed, it is gauranteed that no remote units
+    are currently known locally.
+    """
+
+
+class StorageEvent(HookEvent):
+    """Base class representing storage-related events.
+
+    Juju can provide a variety of storage types to a charms. The
+    charms can define several different types of storage that are
+    allocated from Juju. Changes in state of storage trigger sub-types
+    of :class:`StorageEvent`.
+    """
+
+
+class StorageAttachedEvent(StorageEvent):
+    """Event triggered when new storage becomes available.
+
+    This event is triggered when new storage is available for the
+    charm to use.
+
+    Callback methods bound to this event allow the charm to run code
+    when storage has been added. Such methods will be run before the
+    :class:`InstallEvent` fires, so that the installation routine may
+    use the storage. The name prefix of this hook will depend on the
+    storage key defined in the ``metadata.yaml`` file.
+    """
+
+
+class StorageDetachingEvent(StorageEvent):
+    """Event triggered prior to removal of storage.
+
+    This event is triggered when storage a charm has been using is
+    going away.
+
+    Callback methods bound to this event allow the charm to run code
+    before storage is removed. Such methods will be run before storage
+    is detached, and always before the :class:`StopEvent` fires, thereby
+    allowing the charm to gracefully release resources before they are
+    removed and before the unit terminates. The name prefix of the
+    hook will depend on the storage key defined in the ``metadata.yaml``
+    file.
+    """
+
+
+class WorkloadEvent(HookEvent):
+    """Base class representing workload-related events.
+
+    Workload events are generated for all containers that the charm
+    expects in metadata. Workload containers currently only trigger
+    a PebbleReadyEvent.
+
+    Attributes:
+        workload: The :class:`~ops.model.Container` involved in this event.
+                  Workload currently only can be a Container but in future may
+                  be other types that represent the specific workload type e.g.
+                  a Machine.
+    """
+
+    def __init__(self, handle, workload):
+        super().__init__(handle)
+
+        self.workload = workload
+
+    def snapshot(self) -> dict:
+        """Used by the framework to serialize the event to disk.
+
+        Not meant to be called by charm code.
+        """
+        snapshot = {}
+        if isinstance(self.workload, model.Container):
+            snapshot['container_name'] = self.workload.name
+        return snapshot
+
+    def restore(self, snapshot: dict) -> None:
+        """Used by the framework to deserialize the event from disk.
+
+        Not meant to be called by charm code.
+        """
+        container_name = snapshot.get('container_name')
+        if container_name:
+            self.workload = self.framework.model.unit.get_container(container_name)
+        else:
+            self.workload = None
+
+
+class PebbleReadyEvent(WorkloadEvent):
+    """Event triggered when pebble is ready for a workload.
+
+    This event is triggered when the Pebble process for a workload/container
+    starts up, allowing the charm to configure how services should be launched.
+
+    Callback methods bound to this event allow the charm to run code after
+    a workload has started its Pebble instance and is ready to receive instructions
+    regarding what services should be started. The name prefix of the hook
+    will depend on the container key defined in the ``metadata.yaml`` file.
+    """
+
+
+class CharmEvents(ObjectEvents):
+    """Events generated by Juju pertaining to application lifecycle.
+
+    This class is used to create an event descriptor (``self.on``) attribute for
+    a charm class that inherits from :class:`CharmBase`. The event descriptor
+    may be used to set up event handlers for corresponding events.
+
+    By default the following events will be provided through
+    :class:`CharmBase`::
+
+        self.on.install
+        self.on.start
+        self.on.remove
+        self.on.update_status
+        self.on.config_changed
+        self.on.upgrade_charm
+        self.on.pre_series_upgrade
+        self.on.post_series_upgrade
+        self.on.leader_elected
+        self.on.collect_metrics
+
+
+    In addition to these, depending on the charm's metadata (``metadata.yaml``),
+    named relation and storage events may also be defined.  These named events
+    are created by :class:`CharmBase` using charm metadata.  The named events may be
+    accessed as ``self.on[<name>].<relation_or_storage_event>``
+    """
+
+    install = EventSource(InstallEvent)
+    start = EventSource(StartEvent)
+    stop = EventSource(StopEvent)
+    remove = EventSource(RemoveEvent)
+    update_status = EventSource(UpdateStatusEvent)
+    config_changed = EventSource(ConfigChangedEvent)
+    upgrade_charm = EventSource(UpgradeCharmEvent)
+    pre_series_upgrade = EventSource(PreSeriesUpgradeEvent)
+    post_series_upgrade = EventSource(PostSeriesUpgradeEvent)
+    leader_elected = EventSource(LeaderElectedEvent)
+    leader_settings_changed = EventSource(LeaderSettingsChangedEvent)
+    collect_metrics = EventSource(CollectMetricsEvent)
+
+
+class CharmBase(Object):
+    """Base class that represents the charm overall.
+
+    :class:`CharmBase` is used to create a charm. This is done by inheriting
+    from :class:`CharmBase` and customising the sub class as required. So to
+    create your own charm, say ``MyCharm``, define a charm class and set up the
+    required event handlers (“hooks”) in its constructor::
+
+        import logging
+
+        from ops.charm import CharmBase
+        from ops.main import main
+
+        logger = logging.getLogger(__name__)
+
+        def MyCharm(CharmBase):
+            def __init__(self, *args):
+                logger.debug('Initializing Charm')
+
+                super().__init__(*args)
+
+                self.framework.observe(self.on.config_changed, self._on_config_changed)
+                self.framework.observe(self.on.stop, self._on_stop)
+                # ...
+
+        if __name__ == "__main__":
+            main(MyCharm)
+
+    As shown in the example above, a charm class is instantiated by
+    :func:`~ops.main.main` rather than charm authors directly instantiating a
+    charm.
+
+    Args:
+        framework: The framework responsible for managing the Model and events for this
+            charm.
+        key: Ignored; will remove after deprecation period of the signature change.
+
+    """
+
+    # note that without the #: below, sphinx will copy the whole of CharmEvents
+    # docstring inline which is less than ideal.
+    #: Used to set up event handlers; see :class:`CharmEvents`.
+    on = CharmEvents()
+
+    def __init__(self, framework: Framework, key: typing.Optional = None):
+        super().__init__(framework, None)
+
+        for relation_name in self.framework.meta.relations:
+            relation_name = relation_name.replace('-', '_')
+            self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent)
+            self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent)
+            self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent)
+            self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent)
+            self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent)
+
+        for storage_name in self.framework.meta.storages:
+            storage_name = storage_name.replace('-', '_')
+            self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent)
+            self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent)
+
+        for action_name in self.framework.meta.actions:
+            action_name = action_name.replace('-', '_')
+            self.on.define_event(action_name + '_action', ActionEvent)
+
+        for container_name in self.framework.meta.containers:
+            container_name = container_name.replace('-', '_')
+            self.on.define_event(container_name + '_pebble_ready', PebbleReadyEvent)
+
+    @property
+    def app(self) -> model.Application:
+        """Application that this unit is part of."""
+        return self.framework.model.app
+
+    @property
+    def unit(self) -> model.Unit:
+        """Unit that this execution is responsible for."""
+        return self.framework.model.unit
+
+    @property
+    def meta(self) -> 'CharmMeta':
+        """Metadata of this charm."""
+        return self.framework.meta
+
+    @property
+    def charm_dir(self) -> pathlib.Path:
+        """Root directory of the charm as it is running."""
+        return self.framework.charm_dir
+
+    @property
+    def config(self) -> model.ConfigData:
+        """A mapping containing the charm's config and current values."""
+        return self.model.config
+
+
+class CharmMeta:
+    """Object containing the metadata for the charm.
+
+    This is read from ``metadata.yaml`` and/or ``actions.yaml``. Generally
+    charms will define this information, rather than reading it at runtime. This
+    class is mostly for the framework to understand what the charm has defined.
+
+    The :attr:`maintainers`, :attr:`tags`, :attr:`terms`, :attr:`series`, and
+    :attr:`extra_bindings` attributes are all lists of strings.  The
+    :attr:`requires`, :attr:`provides`, :attr:`peers`, :attr:`relations`,
+    :attr:`storages`, :attr:`resources`, and :attr:`payloads` attributes are all
+    mappings of names to instances of the respective :class:`RelationMeta`,
+    :class:`StorageMeta`, :class:`ResourceMeta`, or :class:`PayloadMeta`.
+
+    The :attr:`relations` attribute is a convenience accessor which includes all
+    of the ``requires``, ``provides``, and ``peers`` :class:`RelationMeta`
+    items.  If needed, the role of the relation definition can be obtained from
+    its :attr:`role <RelationMeta.role>` attribute.
+
+    Attributes:
+        name: The name of this charm
+        summary: Short description of what this charm does
+        description: Long description for this charm
+        maintainers: A list of strings of the email addresses of the maintainers
+                     of this charm.
+        tags: Charm store tag metadata for categories associated with this charm.
+        terms: Charm store terms that should be agreed to before this charm can
+               be deployed. (Used for things like licensing issues.)
+        series: The list of supported OS series that this charm can support.
+                The first entry in the list is the default series that will be
+                used by deploy if no other series is requested by the user.
+        subordinate: True/False whether this charm is intended to be used as a
+                     subordinate charm.
+        min_juju_version: If supplied, indicates this charm needs features that
+                          are not available in older versions of Juju.
+        requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation.
+        provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation.
+        peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation.
+        relations: A dict containing all :class:`RelationMeta` attributes (merged from other
+                   sections)
+        storages: A dict of {name: :class:`StorageMeta`} for each defined storage.
+        resources: A dict of {name: :class:`ResourceMeta`} for each defined resource.
+        payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload.
+        extra_bindings: A dict of additional named bindings that a charm can use
+                        for network configuration.
+        actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined.
+    Args:
+        raw: a mapping containing the contents of metadata.yaml
+        actions_raw: a mapping containing the contents of actions.yaml
+
+    """
+
+    def __init__(self, raw: dict = {}, actions_raw: dict = {}):
+        self.name = raw.get('name', '')
+        self.summary = raw.get('summary', '')
+        self.description = raw.get('description', '')
+        self.maintainers = []
+        if 'maintainer' in raw:
+            self.maintainers.append(raw['maintainer'])
+        if 'maintainers' in raw:
+            self.maintainers.extend(raw['maintainers'])
+        self.tags = raw.get('tags', [])
+        self.terms = raw.get('terms', [])
+        self.series = raw.get('series', [])
+        self.subordinate = raw.get('subordinate', False)
+        self.min_juju_version = raw.get('min-juju-version')
+        self.requires = {name: RelationMeta(RelationRole.requires, name, rel)
+                         for name, rel in raw.get('requires', {}).items()}
+        self.provides = {name: RelationMeta(RelationRole.provides, name, rel)
+                         for name, rel in raw.get('provides', {}).items()}
+        self.peers = {name: RelationMeta(RelationRole.peer, name, rel)
+                      for name, rel in raw.get('peers', {}).items()}
+        self.relations = {}
+        self.relations.update(self.requires)
+        self.relations.update(self.provides)
+        self.relations.update(self.peers)
+        self.storages = {name: StorageMeta(name, storage)
+                         for name, storage in raw.get('storage', {}).items()}
+        self.resources = {name: ResourceMeta(name, res)
+                          for name, res in raw.get('resources', {}).items()}
+        self.payloads = {name: PayloadMeta(name, payload)
+                         for name, payload in raw.get('payloads', {}).items()}
+        self.extra_bindings = raw.get('extra-bindings', {})
+        self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()}
+        # This is taken from Charm Metadata v2, but only the "containers" and
+        # "containers.name" fields that we need right now for Pebble. See:
+        # https://discourse.charmhub.io/t/charm-metadata-v2/3674
+        self.containers = {name: ContainerMeta(name, container)
+                           for name, container in raw.get('containers', {}).items()}
+
+    @classmethod
+    def from_yaml(
+            cls, metadata: typing.Union[str, typing.TextIO],
+            actions: typing.Optional[typing.Union[str, typing.TextIO]] = None):
+        """Instantiate a CharmMeta from a YAML description of metadata.yaml.
+
+        Args:
+            metadata: A YAML description of charm metadata (name, relations, etc.)
+                This can be a simple string, or a file-like object. (passed to `yaml.safe_load`).
+            actions: YAML description of Actions for this charm (eg actions.yaml)
+        """
+        meta = yaml.safe_load(metadata)
+        raw_actions = {}
+        if actions is not None:
+            raw_actions = yaml.safe_load(actions)
+            if raw_actions is None:
+                raw_actions = {}
+        return cls(meta, raw_actions)
+
+
+class RelationRole(enum.Enum):
+    """An annotation for a charm's role in a relation.
+
+    For each relation a charm's role may be
+
+    - A Peer
+    - A service consumer in the relation ('requires')
+    - A service provider in the relation ('provides')
+    """
+    peer = 'peer'
+    requires = 'requires'
+    provides = 'provides'
+
+    def is_peer(self) -> bool:
+        """Return whether the current role is peer.
+
+        A convenience to avoid having to import charm.
+        """
+        return self is RelationRole.peer
+
+
+class RelationMeta:
+    """Object containing metadata about a relation definition.
+
+    Should not be constructed directly by charm code. Is gotten from one of
+    :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`,
+    or :attr:`CharmMeta.relations`.
+
+    Attributes:
+        role: This is :class:`RelationRole`; one of peer/requires/provides
+        relation_name: Name of this relation from metadata.yaml
+        interface_name: Optional definition of the interface protocol.
+        scope: "global" or "container" scope based on how the relation should be used.
+    """
+
+    def __init__(self, role: RelationRole, relation_name: str, raw: dict):
+        if not isinstance(role, RelationRole):
+            raise TypeError("role should be a Role, not {!r}".format(role))
+        self.role = role
+        self.relation_name = relation_name
+        self.interface_name = raw['interface']
+        self.scope = raw.get('scope')
+
+
+class StorageMeta:
+    """Object containing metadata about a storage definition.
+
+    Attributes:
+        storage_name: Name of storage
+        type: Storage type
+        description: A text description of the storage
+        read_only: Whether or not the storage is read only
+        minimum_size: Minimum size of storage
+        location: Mount point of storage
+        multiple_range: Range of numeric qualifiers when multiple storage units are used
+    """
+
+    def __init__(self, name, raw):
+        self.storage_name = name
+        self.type = raw['type']
+        self.description = raw.get('description', '')
+        self.shared = raw.get('shared', False)
+        self.read_only = raw.get('read-only', False)
+        self.minimum_size = raw.get('minimum-size')
+        self.location = raw.get('location')
+        self.multiple_range = None
+        if 'multiple' in raw:
+            range = raw['multiple']['range']
+            if '-' not in range:
+                self.multiple_range = (int(range), int(range))
+            else:
+                range = range.split('-')
+                self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None)
+
+
+class ResourceMeta:
+    """Object containing metadata about a resource definition.
+
+    Attributes:
+        resource_name: Name of resource
+        filename: Name of file
+        description: A text description of resource
+    """
+
+    def __init__(self, name, raw):
+        self.resource_name = name
+        self.type = raw['type']
+        self.filename = raw.get('filename', None)
+        self.description = raw.get('description', '')
+
+
+class PayloadMeta:
+    """Object containing metadata about a payload definition.
+
+    Attributes:
+        payload_name: Name of payload
+        type: Payload type
+    """
+
+    def __init__(self, name, raw):
+        self.payload_name = name
+        self.type = raw['type']
+
+
+class ActionMeta:
+    """Object containing metadata about an action's definition."""
+
+    def __init__(self, name, raw=None):
+        raw = raw or {}
+        self.name = name
+        self.title = raw.get('title', '')
+        self.description = raw.get('description', '')
+        self.parameters = raw.get('params', {})  # {<parameter name>: <JSON Schema definition>}
+        self.required = raw.get('required', [])  # [<parameter name>, ...]
+
+
+class ContainerMeta:
+    """Metadata about an individual container.
+
+    NOTE: this is extremely lightweight right now, and just includes the fields we need for
+    Pebble interaction.
+
+    Attributes:
+        name: Name of container (key in the YAML)
+    """
+
+    def __init__(self, name, raw):
+        self.name = name
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/framework.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/framework.py
new file mode 100644
index 0000000000000000000000000000000000000000..d20c0007ebcd58456a0bac90ae5dc0eaacb9a407
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/framework.py
@@ -0,0 +1,1199 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""The Operator Framework infrastructure."""
+
+import collections
+import collections.abc
+import inspect
+import keyword
+import logging
+import marshal
+import os
+import pathlib
+import pdb
+import re
+import sys
+import types
+import weakref
+
+from ops import charm
+from ops.storage import (
+    NoSnapshotError,
+    SQLiteStorage,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Handle:
+    """Handle defines a name for an object in the form of a hierarchical path.
+
+    The provided parent is the object (or that object's handle) that this handle
+    sits under, or None if the object identified by this handle stands by itself
+    as the root of its own hierarchy.
+
+    The handle kind is a string that defines a namespace so objects with the
+    same parent and kind will have unique keys.
+
+    The handle key is a string uniquely identifying the object. No other objects
+    under the same parent and kind may have the same key.
+    """
+
+    def __init__(self, parent, kind, key):
+        if parent and not isinstance(parent, Handle):
+            parent = parent.handle
+        self._parent = parent
+        self._kind = kind
+        self._key = key
+        if parent:
+            if key:
+                self._path = "{}/{}[{}]".format(parent, kind, key)
+            else:
+                self._path = "{}/{}".format(parent, kind)
+        else:
+            if key:
+                self._path = "{}[{}]".format(kind, key)
+            else:
+                self._path = "{}".format(kind)
+
+    def nest(self, kind, key):
+        """Create a new handle as child of the current one."""
+        return Handle(self, kind, key)
+
+    def __hash__(self):
+        return hash((self.parent, self.kind, self.key))
+
+    def __eq__(self, other):
+        return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key)
+
+    def __str__(self):
+        return self.path
+
+    @property
+    def parent(self):
+        """Return own parent handle."""
+        return self._parent
+
+    @property
+    def kind(self):
+        """Return the handle's kind."""
+        return self._kind
+
+    @property
+    def key(self):
+        """Return the handle's key."""
+        return self._key
+
+    @property
+    def path(self):
+        """Return the handle's path."""
+        return self._path
+
+    @classmethod
+    def from_path(cls, path):
+        """Build a handle from the indicated path."""
+        handle = None
+        for pair in path.split("/"):
+            pair = pair.split("[")
+            good = False
+            if len(pair) == 1:
+                kind, key = pair[0], None
+                good = True
+            elif len(pair) == 2:
+                kind, key = pair
+                if key and key[-1] == ']':
+                    key = key[:-1]
+                    good = True
+            if not good:
+                raise RuntimeError("attempted to restore invalid handle path {}".format(path))
+            handle = Handle(handle, kind, key)
+        return handle
+
+
+class EventBase:
+    """The base for all the different Events.
+
+    Inherit this and override 'snapshot' and 'restore' methods to build a custom event.
+    """
+
+    def __init__(self, handle):
+        self.handle = handle
+        self.deferred = False
+
+    def __repr__(self):
+        return "<%s via %s>" % (self.__class__.__name__, self.handle)
+
+    def defer(self):
+        """Defer the event to the future.
+
+        Deferring an event from a handler puts that handler into a queue, to be
+        called again the next time the charm is invoked. This invocation may be
+        the result of an action, or any event other than metric events. The
+        queue of events will be dispatched before the new event is processed.
+
+        From the above you may deduce, but it's important to point out:
+
+        * ``defer()`` does not interrupt the execution of the current event
+          handler. In almost all cases, a call to ``defer()`` should be followed
+          by an explicit ``return`` from the handler;
+
+        * the re-execution of the deferred event handler starts from the top of
+          the handler method (not where defer was called);
+
+        * only the handlers that actually called ``defer()`` are called again
+          (that is: despite talking about “deferring an event” it is actually
+          the handler/event combination that is deferred); and
+
+        * any deferred events get processed before the event (or action) that
+          caused the current invocation of the charm.
+
+        The general desire to call ``defer()`` happens when some precondition
+        isn't yet met. However, care should be exercised as to whether it is
+        better to defer this event so that you see it again, or whether it is
+        better to just wait for the event that indicates the precondition has
+        been met.
+
+        For example, if ``config-changed`` is fired, and you are waiting for
+        different config, there is no reason to defer the event because there
+        will be a *different* ``config-changed`` event when the config actually
+        changes, rather than checking to see if maybe config has changed prior
+        to every other event that occurs.
+
+        Similarly, if you need 2 events to occur before you are ready to
+        proceed (say event A and B). When you see event A, you could chose to
+        ``defer()`` it because you haven't seen B yet. However, that leads to:
+
+        1. event A fires, calls defer()
+
+        2. event B fires, event A handler is called first, still hasn't seen B
+           happen, so is deferred again. Then B happens, which progresses since
+           it has seen A.
+
+        3. At some future time, event C happens, which also checks if A can
+           proceed.
+
+        """
+        logger.debug("Deferring %s.", self)
+        self.deferred = True
+
+    def snapshot(self):
+        """Return the snapshot data that should be persisted.
+
+        Subclasses must override to save any custom state.
+        """
+        return None
+
+    def restore(self, snapshot):
+        """Restore the value state from the given snapshot.
+
+        Subclasses must override to restore their custom state.
+        """
+        self.deferred = False
+
+
+class EventSource:
+    """EventSource wraps an event type with a descriptor to facilitate observing and emitting.
+
+    It is generally used as:
+
+        class SomethingHappened(EventBase):
+            pass
+
+        class SomeObject(Object):
+            something_happened = EventSource(SomethingHappened)
+
+    With that, instances of that type will offer the someobj.something_happened
+    attribute which is a BoundEvent and may be used to emit and observe the event.
+    """
+
+    def __init__(self, event_type):
+        if not isinstance(event_type, type) or not issubclass(event_type, EventBase):
+            raise RuntimeError(
+                'Event requires a subclass of EventBase as an argument, got {}'.format(event_type))
+        self.event_type = event_type
+        self.event_kind = None
+        self.emitter_type = None
+
+    def _set_name(self, emitter_type, event_kind):
+        if self.event_kind is not None:
+            raise RuntimeError(
+                'EventSource({}) reused as {}.{} and {}.{}'.format(
+                    self.event_type.__name__,
+                    self.emitter_type.__name__,
+                    self.event_kind,
+                    emitter_type.__name__,
+                    event_kind,
+                ))
+        self.event_kind = event_kind
+        self.emitter_type = emitter_type
+
+    def __get__(self, emitter, emitter_type=None):
+        if emitter is None:
+            return self
+        # Framework might not be available if accessed as CharmClass.on.event
+        # rather than charm_instance.on.event, but in that case it couldn't be
+        # emitted anyway, so there's no point to registering it.
+        framework = getattr(emitter, 'framework', None)
+        if framework is not None:
+            framework.register_type(self.event_type, emitter, self.event_kind)
+        return BoundEvent(emitter, self.event_type, self.event_kind)
+
+
+class BoundEvent:
+    """Event bound to an Object."""
+
+    def __repr__(self):
+        return '<BoundEvent {} bound to {}.{} at {}>'.format(
+            self.event_type.__name__,
+            type(self.emitter).__name__,
+            self.event_kind,
+            hex(id(self)),
+        )
+
+    def __init__(self, emitter, event_type, event_kind):
+        self.emitter = emitter
+        self.event_type = event_type
+        self.event_kind = event_kind
+
+    def emit(self, *args, **kwargs):
+        """Emit event to all registered observers.
+
+        The current storage state is committed before and after each observer is notified.
+        """
+        framework = self.emitter.framework
+        key = framework._next_event_key()
+        event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs)
+        framework._emit(event)
+
+
+class HandleKind:
+    """Helper descriptor to define the Object.handle_kind field.
+
+    The handle_kind for an object defaults to its type name, but it may
+    be explicitly overridden if desired.
+    """
+
+    def __get__(self, obj, obj_type):
+        kind = obj_type.__dict__.get("handle_kind")
+        if kind:
+            return kind
+        return obj_type.__name__
+
+
+class _Metaclass(type):
+    """Helper class to ensure proper instantiation of Object-derived classes.
+
+    This class currently has a single purpose: events derived from EventSource
+    that are class attributes of Object-derived classes need to be told what
+    their name is in that class. For example, in
+
+        class SomeObject(Object):
+            something_happened = EventSource(SomethingHappened)
+
+    the instance of EventSource needs to know it's called 'something_happened'.
+
+    Starting from python 3.6 we could use __set_name__ on EventSource for this,
+    but until then this (meta)class does the equivalent work.
+
+    TODO: when we drop support for 3.5 drop this class, and rename _set_name in
+          EventSource to __set_name__; everything should continue to work.
+
+    """
+
+    def __new__(typ, *a, **kw):
+        k = super().__new__(typ, *a, **kw)
+        # k is now the Object-derived class; loop over its class attributes
+        for n, v in vars(k).items():
+            # we could do duck typing here if we want to support
+            # non-EventSource-derived shenanigans. We don't.
+            if isinstance(v, EventSource):
+                # this is what 3.6+ does automatically for us:
+                v._set_name(k, n)
+        return k
+
+
+class Object(metaclass=_Metaclass):
+    """Base class of all the charm-related objects."""
+
+    handle_kind = HandleKind()
+
+    def __init__(self, parent, key):
+        kind = self.handle_kind
+        if isinstance(parent, Framework):
+            self.framework = parent
+            # Avoid Framework instances having a circular reference to themselves.
+            if self.framework is self:
+                self.framework = weakref.proxy(self.framework)
+            self.handle = Handle(None, kind, key)
+        else:
+            self.framework = parent.framework
+            self.handle = Handle(parent, kind, key)
+        self.framework._track(self)
+
+        # TODO Detect conflicting handles here.
+
+    @property
+    def model(self):
+        """Shortcut for more simple access the model."""
+        return self.framework.model
+
+
+class ObjectEvents(Object):
+    """Convenience type to allow defining .on attributes at class level."""
+
+    handle_kind = "on"
+
+    def __init__(self, parent=None, key=None):
+        if parent is not None:
+            super().__init__(parent, key)
+        else:
+            self._cache = weakref.WeakKeyDictionary()
+
+    def __get__(self, emitter, emitter_type):
+        if emitter is None:
+            return self
+        instance = self._cache.get(emitter)
+        if instance is None:
+            # Same type, different instance, more data. Doing this unusual construct
+            # means people can subclass just this one class to have their own 'on'.
+            instance = self._cache[emitter] = type(self)(emitter)
+        return instance
+
+    @classmethod
+    def define_event(cls, event_kind, event_type):
+        """Define an event on this type at runtime.
+
+        cls: a type to define an event on.
+
+        event_kind: an attribute name that will be used to access the
+                    event. Must be a valid python identifier, not be a keyword
+                    or an existing attribute.
+
+        event_type: a type of the event to define.
+
+        """
+        prefix = 'unable to define an event with event_kind that '
+        if not event_kind.isidentifier():
+            raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind)
+        elif keyword.iskeyword(event_kind):
+            raise RuntimeError(prefix + 'is a python keyword: ' + event_kind)
+        try:
+            getattr(cls, event_kind)
+            raise RuntimeError(
+                prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind))
+        except AttributeError:
+            pass
+
+        event_descriptor = EventSource(event_type)
+        event_descriptor._set_name(cls, event_kind)
+        setattr(cls, event_kind, event_descriptor)
+
+    def _event_kinds(self):
+        event_kinds = []
+        # We have to iterate over the class rather than instance to allow for properties which
+        # might call this method (e.g., event views), leading to infinite recursion.
+        for attr_name, attr_value in inspect.getmembers(type(self)):
+            if isinstance(attr_value, EventSource):
+                # We actually care about the bound_event, however, since it
+                # provides the most info for users of this method.
+                event_kinds.append(attr_name)
+        return event_kinds
+
+    def events(self):
+        """Return a mapping of event_kinds to bound_events for all available events."""
+        return {event_kind: getattr(self, event_kind) for event_kind in self._event_kinds()}
+
+    def __getitem__(self, key):
+        return PrefixedEvents(self, key)
+
+    def __repr__(self):
+        k = type(self)
+        event_kinds = ', '.join(sorted(self._event_kinds()))
+        return '<{}.{}: {}>'.format(k.__module__, k.__qualname__, event_kinds)
+
+
+class PrefixedEvents:
+    """Events to be found in all events using a specific prefix."""
+
+    def __init__(self, emitter, key):
+        self._emitter = emitter
+        self._prefix = key.replace("-", "_") + '_'
+
+    def __getattr__(self, name):
+        return getattr(self._emitter, self._prefix + name)
+
+
+class PreCommitEvent(EventBase):
+    """Events that will be emited first on commit."""
+
+
+class CommitEvent(EventBase):
+    """Events that will be emited second on commit."""
+
+
+class FrameworkEvents(ObjectEvents):
+    """Manager of all framework events."""
+    pre_commit = EventSource(PreCommitEvent)
+    commit = EventSource(CommitEvent)
+
+
+class NoTypeError(Exception):
+    """No class to hold it was found when restoring an event."""
+
+    def __init__(self, handle_path):
+        self.handle_path = handle_path
+
+    def __str__(self):
+        return "cannot restore {} since no class was registered for it".format(self.handle_path)
+
+
+# the message to show to the user when a pdb breakpoint goes active
+_BREAKPOINT_WELCOME_MESSAGE = """
+Starting pdb to debug charm operator.
+Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort.
+Future breakpoints may interrupt execution again.
+More details at https://discourse.jujucharms.com/t/debugging-charm-hooks
+
+"""
+
+
+_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$'
+
+
+class Framework(Object):
+    """Main interface to from the Charm to the Operator Framework internals."""
+
+    on = FrameworkEvents()
+
+    # Override properties from Object so that we can set them in __init__.
+    model = None
+    meta = None
+    charm_dir = None
+
+    def __init__(self, storage, charm_dir, meta, model):
+
+        super().__init__(self, None)
+
+        self.charm_dir = charm_dir
+        self.meta = meta
+        self.model = model
+        self._observers = []      # [(observer_path, method_name, parent_path, event_key)]
+        self._observer = weakref.WeakValueDictionary()       # {observer_path: observer}
+        self._objects = weakref.WeakValueDictionary()
+        self._type_registry = {}  # {(parent_path, kind): cls}
+        self._type_known = set()  # {cls}
+
+        if isinstance(storage, (str, pathlib.Path)):
+            logger.warning(
+                "deprecated: Framework now takes a Storage not a path")
+            storage = SQLiteStorage(storage)
+        self._storage = storage
+
+        # We can't use the higher-level StoredState because it relies on events.
+        self.register_type(StoredStateData, None, StoredStateData.handle_kind)
+        stored_handle = Handle(None, StoredStateData.handle_kind, '_stored')
+        try:
+            self._stored = self.load_snapshot(stored_handle)
+        except NoSnapshotError:
+            self._stored = StoredStateData(self, '_stored')
+            self._stored['event_count'] = 0
+
+        # Flag to indicate that we already presented the welcome message in a debugger breakpoint
+        self._breakpoint_welcomed = False
+
+        # Parse the env var once, which may be used multiple times later
+        debug_at = os.environ.get('JUJU_DEBUG_AT')
+        self._juju_debug_at = debug_at.split(',') if debug_at else ()
+
+    def set_breakpointhook(self):
+        """Hook into sys.breakpointhook so the builtin breakpoint() works as expected.
+
+        This method is called by ``main``, and is not intended to be
+        called by users of the framework itself outside of perhaps
+        some testing scenarios.
+
+        It returns the old value of sys.excepthook.
+
+        The breakpoint function is a Python >= 3.7 feature.
+
+        This method was added in ops 1.0; before that, it was done as
+        part of the Framework's __init__.
+        """
+        old_breakpointhook = getattr(sys, 'breakpointhook', None)
+        if old_breakpointhook is not None:
+            # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do
+            # breakpoint()
+            sys.breakpointhook = self.breakpoint
+        return old_breakpointhook
+
+    def close(self):
+        """Close the underlying backends."""
+        self._storage.close()
+
+    def _track(self, obj):
+        """Track object and ensure it is the only object created using its handle path."""
+        if obj is self:
+            # Framework objects don't track themselves
+            return
+        if obj.handle.path in self.framework._objects:
+            raise RuntimeError(
+                'two objects claiming to be {} have been created'.format(obj.handle.path))
+        self._objects[obj.handle.path] = obj
+
+    def _forget(self, obj):
+        """Stop tracking the given object. See also _track."""
+        self._objects.pop(obj.handle.path, None)
+
+    def commit(self):
+        """Save changes to the underlying backends."""
+        # Give a chance for objects to persist data they want to before a commit is made.
+        self.on.pre_commit.emit()
+        # Make sure snapshots are saved by instances of StoredStateData. Any possible state
+        # modifications in on_commit handlers of instances of other classes will not be persisted.
+        self.on.commit.emit()
+        # Save our event count after all events have been emitted.
+        self.save_snapshot(self._stored)
+        self._storage.commit()
+
+    def register_type(self, cls, parent, kind=None):
+        """Register a type to a handle."""
+        if parent and not isinstance(parent, Handle):
+            parent = parent.handle
+        if parent:
+            parent_path = parent.path
+        else:
+            parent_path = None
+        if not kind:
+            kind = cls.handle_kind
+        self._type_registry[(parent_path, kind)] = cls
+        self._type_known.add(cls)
+
+    def save_snapshot(self, value):
+        """Save a persistent snapshot of the provided value.
+
+        The provided value must implement the following interface:
+
+        value.handle = Handle(...)
+        value.snapshot() => {...}  # Simple builtin types only.
+        value.restore(snapshot)    # Restore custom state from prior snapshot.
+        """
+        if type(value) not in self._type_known:
+            raise RuntimeError(
+                'cannot save {} values before registering that type'.format(type(value).__name__))
+        data = value.snapshot()
+
+        # Use marshal as a validator, enforcing the use of simple types, as we later the
+        # information is really pickled, which is too error prone for future evolution of the
+        # stored data (e.g. if the developer stores a custom object and later changes its
+        # class name; when unpickling the original class will not be there and event
+        # data loading will fail).
+        try:
+            marshal.dumps(data)
+        except ValueError:
+            msg = "unable to save the data for {}, it must contain only simple types: {!r}"
+            raise ValueError(msg.format(value.__class__.__name__, data))
+
+        self._storage.save_snapshot(value.handle.path, data)
+
+    def load_snapshot(self, handle):
+        """Load a persistent snapshot."""
+        parent_path = None
+        if handle.parent:
+            parent_path = handle.parent.path
+        cls = self._type_registry.get((parent_path, handle.kind))
+        if not cls:
+            raise NoTypeError(handle.path)
+        data = self._storage.load_snapshot(handle.path)
+        obj = cls.__new__(cls)
+        obj.framework = self
+        obj.handle = handle
+        obj.restore(data)
+        self._track(obj)
+        return obj
+
+    def drop_snapshot(self, handle):
+        """Discard a persistent snapshot."""
+        self._storage.drop_snapshot(handle.path)
+
+    def observe(self, bound_event: BoundEvent, observer: types.MethodType):
+        """Register observer to be called when bound_event is emitted.
+
+        The bound_event is generally provided as an attribute of the object that emits
+        the event, and is created in this style::
+
+            class SomeObject:
+                something_happened = Event(SomethingHappened)
+
+        That event may be observed as::
+
+            framework.observe(someobj.something_happened, self._on_something_happened)
+
+        Raises:
+            RuntimeError: if bound_event or observer are the wrong type.
+        """
+        if not isinstance(bound_event, BoundEvent):
+            raise RuntimeError(
+                'Framework.observe requires a BoundEvent as second parameter, got {}'.format(
+                    bound_event))
+        if not isinstance(observer, types.MethodType):
+            # help users of older versions of the framework
+            if isinstance(observer, charm.CharmBase):
+                raise TypeError(
+                    'observer methods must now be explicitly provided;'
+                    ' please replace observe(self.on.{0}, self)'
+                    ' with e.g. observe(self.on.{0}, self._on_{0})'.format(
+                        bound_event.event_kind))
+            raise RuntimeError(
+                'Framework.observe requires a method as third parameter, got {}'.format(observer))
+
+        event_type = bound_event.event_type
+        event_kind = bound_event.event_kind
+        emitter = bound_event.emitter
+
+        self.register_type(event_type, emitter, event_kind)
+
+        if hasattr(emitter, "handle"):
+            emitter_path = emitter.handle.path
+        else:
+            raise RuntimeError(
+                'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__))
+
+        # Validate that the method has an acceptable call signature.
+        sig = inspect.signature(observer)
+        # Self isn't included in the params list, so the first arg will be the event.
+        extra_params = list(sig.parameters.values())[1:]
+
+        method_name = observer.__name__
+        observer = observer.__self__
+        if not sig.parameters:
+            raise TypeError(
+                '{}.{} must accept event parameter'.format(type(observer).__name__, method_name))
+        elif any(param.default is inspect.Parameter.empty for param in extra_params):
+            # Allow for additional optional params, since there's no reason to exclude them, but
+            # required params will break.
+            raise TypeError(
+                '{}.{} has extra required parameter'.format(type(observer).__name__, method_name))
+
+        # TODO Prevent the exact same parameters from being registered more than once.
+
+        self._observer[observer.handle.path] = observer
+        self._observers.append((observer.handle.path, method_name, emitter_path, event_kind))
+
+    def _next_event_key(self):
+        """Return the next event key that should be used, incrementing the internal counter."""
+        # Increment the count first; this means the keys will start at 1, and 0
+        # means no events have been emitted.
+        self._stored['event_count'] += 1
+        return str(self._stored['event_count'])
+
+    def _emit(self, event):
+        """See BoundEvent.emit for the public way to call this."""
+        saved = False
+        event_path = event.handle.path
+        event_kind = event.handle.kind
+        parent_path = event.handle.parent.path
+        # TODO Track observers by (parent_path, event_kind) rather than as a list of
+        # all observers. Avoiding linear search through all observers for every event
+        for observer_path, method_name, _parent_path, _event_kind in self._observers:
+            if _parent_path != parent_path:
+                continue
+            if _event_kind and _event_kind != event_kind:
+                continue
+            if not saved:
+                # Save the event for all known observers before the first notification
+                # takes place, so that either everyone interested sees it, or nobody does.
+                self.save_snapshot(event)
+                saved = True
+            # Again, only commit this after all notices are saved.
+            self._storage.save_notice(event_path, observer_path, method_name)
+        if saved:
+            self._reemit(event_path)
+
+    def reemit(self):
+        """Reemit previously deferred events to the observers that deferred them.
+
+        Only the specific observers that have previously deferred the event will be
+        notified again. Observers that asked to be notified about events after it's
+        been first emitted won't be notified, as that would mean potentially observing
+        events out of order.
+        """
+        self._reemit()
+
+    def _reemit(self, single_event_path=None):
+        last_event_path = None
+        deferred = True
+        for event_path, observer_path, method_name in self._storage.notices(single_event_path):
+            event_handle = Handle.from_path(event_path)
+
+            if last_event_path != event_path:
+                if not deferred and last_event_path is not None:
+                    self._storage.drop_snapshot(last_event_path)
+                last_event_path = event_path
+                deferred = False
+
+            try:
+                event = self.load_snapshot(event_handle)
+            except NoTypeError:
+                self._storage.drop_notice(event_path, observer_path, method_name)
+                continue
+
+            event.deferred = False
+            observer = self._observer.get(observer_path)
+            if observer:
+                if single_event_path is None:
+                    logger.debug("Re-emitting %s.", event)
+                custom_handler = getattr(observer, method_name, None)
+                if custom_handler:
+                    event_is_from_juju = isinstance(event, charm.HookEvent)
+                    event_is_action = isinstance(event, charm.ActionEvent)
+                    if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at:
+                        # Present the welcome message and run under PDB.
+                        self._show_debug_code_message()
+                        pdb.runcall(custom_handler, event)
+                    else:
+                        # Regular call to the registered method.
+                        custom_handler(event)
+
+            if event.deferred:
+                deferred = True
+            else:
+                self._storage.drop_notice(event_path, observer_path, method_name)
+            # We intentionally consider this event to be dead and reload it from
+            # scratch in the next path.
+            self.framework._forget(event)
+
+        if not deferred and last_event_path is not None:
+            self._storage.drop_snapshot(last_event_path)
+
+    def _show_debug_code_message(self):
+        """Present the welcome message (only once!) when using debugger functionality."""
+        if not self._breakpoint_welcomed:
+            self._breakpoint_welcomed = True
+            print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='')
+
+    def breakpoint(self, name=None):
+        """Add breakpoint, optionally named, at the place where this method is called.
+
+        For the breakpoint to be activated the JUJU_DEBUG_AT environment variable
+        must be set to "all" or to the specific name parameter provided, if any. In every
+        other situation calling this method does nothing.
+
+        The framework also provides a standard breakpoint named "hook", that will
+        stop execution when a hook event is about to be handled.
+
+        For those reasons, the "all" and "hook" breakpoint names are reserved.
+        """
+        # If given, validate the name comply with all the rules
+        if name is not None:
+            if not isinstance(name, str):
+                raise TypeError('breakpoint names must be strings')
+            if name in ('hook', 'all'):
+                raise ValueError('breakpoint names "all" and "hook" are reserved')
+            if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name):
+                raise ValueError('breakpoint names must look like "foo" or "foo-bar"')
+
+        indicated_breakpoints = self._juju_debug_at
+        if not indicated_breakpoints:
+            return
+
+        if 'all' in indicated_breakpoints or name in indicated_breakpoints:
+            self._show_debug_code_message()
+
+            # If we call set_trace() directly it will open the debugger *here*, so indicating
+            # it to use our caller's frame
+            code_frame = inspect.currentframe().f_back
+            pdb.Pdb().set_trace(code_frame)
+        else:
+            logger.warning(
+                "Breakpoint %r skipped (not found in the requested breakpoints: %s)",
+                name, indicated_breakpoints)
+
+    def remove_unreferenced_events(self):
+        """Remove events from storage that are not referenced.
+
+        In older versions of the framework, events that had no observers would get recorded but
+        never deleted. This makes a best effort to find these events and remove them from the
+        database.
+        """
+        event_regex = re.compile(_event_regex)
+        to_remove = []
+        for handle_path in self._storage.list_snapshots():
+            if event_regex.match(handle_path):
+                notices = self._storage.notices(handle_path)
+                if next(notices, None) is None:
+                    # There are no notices for this handle_path, it is valid to remove it
+                    to_remove.append(handle_path)
+        for handle_path in to_remove:
+            self._storage.drop_snapshot(handle_path)
+
+
+class StoredStateData(Object):
+    """Manager of the stored data."""
+
+    def __init__(self, parent, attr_name):
+        super().__init__(parent, attr_name)
+        self._cache = {}
+        self.dirty = False
+
+    def __getitem__(self, key):
+        return self._cache.get(key)
+
+    def __setitem__(self, key, value):
+        self._cache[key] = value
+        self.dirty = True
+
+    def __contains__(self, key):
+        return key in self._cache
+
+    def snapshot(self):
+        """Return the current state."""
+        return self._cache
+
+    def restore(self, snapshot):
+        """Restore current state to the given snapshot."""
+        self._cache = snapshot
+        self.dirty = False
+
+    def on_commit(self, event):
+        """Save changes to the storage backend."""
+        if self.dirty:
+            self.framework.save_snapshot(self)
+            self.dirty = False
+
+
+class BoundStoredState:
+    """Stored state data bound to a specific Object."""
+
+    def __init__(self, parent, attr_name):
+        parent.framework.register_type(StoredStateData, parent)
+
+        handle = Handle(parent, StoredStateData.handle_kind, attr_name)
+        try:
+            data = parent.framework.load_snapshot(handle)
+        except NoSnapshotError:
+            data = StoredStateData(parent, attr_name)
+
+        # __dict__ is used to avoid infinite recursion.
+        self.__dict__["_data"] = data
+        self.__dict__["_attr_name"] = attr_name
+
+        parent.framework.observe(parent.framework.on.commit, self._data.on_commit)
+
+    def __getattr__(self, key):
+        # "on" is the only reserved key that can't be used in the data map.
+        if key == "on":
+            return self._data.on
+        if key not in self._data:
+            raise AttributeError("attribute '{}' is not stored".format(key))
+        return _wrap_stored(self._data, self._data[key])
+
+    def __setattr__(self, key, value):
+        if key == "on":
+            raise AttributeError("attribute 'on' is reserved and cannot be set")
+
+        value = _unwrap_stored(self._data, value)
+
+        if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)):
+            raise AttributeError(
+                'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format(
+                    key, type(value).__name__))
+
+        self._data[key] = _unwrap_stored(self._data, value)
+
+    def set_default(self, **kwargs):
+        """Set the value of any given key if it has not already been set."""
+        for k, v in kwargs.items():
+            if k not in self._data:
+                self._data[k] = v
+
+
+class StoredState:
+    """A class used to store data the charm needs persisted across invocations.
+
+    Example::
+
+        class MyClass(Object):
+            _stored = StoredState()
+
+    Instances of `MyClass` can transparently save state between invocations by
+    setting attributes on `_stored`. Initial state should be set with
+    `set_default` on the bound object, that is::
+
+        class MyClass(Object):
+            _stored = StoredState()
+
+        def __init__(self, parent, key):
+            super().__init__(parent, key)
+            self._stored.set_default(seen=set())
+            self.framework.observe(self.on.seen, self._on_seen)
+
+        def _on_seen(self, event):
+            self._stored.seen.add(event.uuid)
+
+    """
+
+    def __init__(self):
+        self.parent_type = None
+        self.attr_name = None
+
+    def __get__(self, parent, parent_type=None):
+        if self.parent_type is not None and self.parent_type not in parent_type.mro():
+            # the StoredState instance is being shared between two unrelated classes
+            # -> unclear what is exepcted of us -> bail out
+            raise RuntimeError(
+                'StoredState shared by {} and {}'.format(
+                    self.parent_type.__name__, parent_type.__name__))
+
+        if parent is None:
+            # accessing via the class directly (e.g. MyClass.stored)
+            return self
+
+        bound = None
+        if self.attr_name is not None:
+            bound = parent.__dict__.get(self.attr_name)
+            if bound is not None:
+                # we already have the thing from a previous pass, huzzah
+                return bound
+
+        # need to find ourselves amongst the parent's bases
+        for cls in parent_type.mro():
+            for attr_name, attr_value in cls.__dict__.items():
+                if attr_value is not self:
+                    continue
+                # we've found ourselves! is it the first time?
+                if bound is not None:
+                    # the StoredState instance is being stored in two different
+                    # attributes -> unclear what is expected of us -> bail out
+                    raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format(
+                        cls.__name__, self.attr_name, attr_name))
+                # we've found ourselves for the first time; save where, and bind the object
+                self.attr_name = attr_name
+                self.parent_type = cls
+                bound = BoundStoredState(parent, attr_name)
+
+        if bound is not None:
+            # cache the bound object to avoid the expensive lookup the next time
+            # (don't use setattr, to keep things symmetric with the fast-path lookup above)
+            parent.__dict__[self.attr_name] = bound
+            return bound
+
+        raise AttributeError(
+            'cannot find {} attribute in type {}'.format(
+                self.__class__.__name__, parent_type.__name__))
+
+
+def _wrap_stored(parent_data, value):
+    t = type(value)
+    if t is dict:
+        return StoredDict(parent_data, value)
+    if t is list:
+        return StoredList(parent_data, value)
+    if t is set:
+        return StoredSet(parent_data, value)
+    return value
+
+
+def _unwrap_stored(parent_data, value):
+    t = type(value)
+    if t is StoredDict or t is StoredList or t is StoredSet:
+        return value._under
+    return value
+
+
+def _wrapped_repr(obj):
+    t = type(obj)
+    if obj._under:
+        return "{}.{}({!r})".format(t.__module__, t.__name__, obj._under)
+    else:
+        return "{}.{}()".format(t.__module__, t.__name__)
+
+
+class StoredDict(collections.abc.MutableMapping):
+    """A dict-like object that uses the StoredState as backend."""
+
+    def __init__(self, stored_data, under):
+        self._stored_data = stored_data
+        self._under = under
+
+    def __getitem__(self, key):
+        return _wrap_stored(self._stored_data, self._under[key])
+
+    def __setitem__(self, key, value):
+        self._under[key] = _unwrap_stored(self._stored_data, value)
+        self._stored_data.dirty = True
+
+    def __delitem__(self, key):
+        del self._under[key]
+        self._stored_data.dirty = True
+
+    def __iter__(self):
+        return self._under.__iter__()
+
+    def __len__(self):
+        return len(self._under)
+
+    def __eq__(self, other):
+        if isinstance(other, StoredDict):
+            return self._under == other._under
+        elif isinstance(other, collections.abc.Mapping):
+            return self._under == other
+        else:
+            return NotImplemented
+
+    __repr__ = _wrapped_repr
+
+
+class StoredList(collections.abc.MutableSequence):
+    """A list-like object that uses the StoredState as backend."""
+
+    def __init__(self, stored_data, under):
+        self._stored_data = stored_data
+        self._under = under
+
+    def __getitem__(self, index):
+        return _wrap_stored(self._stored_data, self._under[index])
+
+    def __setitem__(self, index, value):
+        self._under[index] = _unwrap_stored(self._stored_data, value)
+        self._stored_data.dirty = True
+
+    def __delitem__(self, index):
+        del self._under[index]
+        self._stored_data.dirty = True
+
+    def __len__(self):
+        return len(self._under)
+
+    def insert(self, index, value):
+        """Insert value before index."""
+        self._under.insert(index, value)
+        self._stored_data.dirty = True
+
+    def append(self, value):
+        """Append value to the end of the list."""
+        self._under.append(value)
+        self._stored_data.dirty = True
+
+    def __eq__(self, other):
+        if isinstance(other, StoredList):
+            return self._under == other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under == other
+        else:
+            return NotImplemented
+
+    def __lt__(self, other):
+        if isinstance(other, StoredList):
+            return self._under < other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under < other
+        else:
+            return NotImplemented
+
+    def __le__(self, other):
+        if isinstance(other, StoredList):
+            return self._under <= other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under <= other
+        else:
+            return NotImplemented
+
+    def __gt__(self, other):
+        if isinstance(other, StoredList):
+            return self._under > other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under > other
+        else:
+            return NotImplemented
+
+    def __ge__(self, other):
+        if isinstance(other, StoredList):
+            return self._under >= other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under >= other
+        else:
+            return NotImplemented
+
+    __repr__ = _wrapped_repr
+
+
+class StoredSet(collections.abc.MutableSet):
+    """A set-like object that uses the StoredState as backend."""
+
+    def __init__(self, stored_data, under):
+        self._stored_data = stored_data
+        self._under = under
+
+    def add(self, key):
+        """Add a key to a set.
+
+        This has no effect if the key is already present.
+        """
+        self._under.add(key)
+        self._stored_data.dirty = True
+
+    def discard(self, key):
+        """Remove a key from a set if it is a member.
+
+        If the key is not a member, do nothing.
+        """
+        self._under.discard(key)
+        self._stored_data.dirty = True
+
+    def __contains__(self, key):
+        return key in self._under
+
+    def __iter__(self):
+        return self._under.__iter__()
+
+    def __len__(self):
+        return len(self._under)
+
+    @classmethod
+    def _from_iterable(cls, it):
+        """Construct an instance of the class from any iterable input.
+
+        Per https://docs.python.org/3/library/collections.abc.html
+        if the Set mixin is being used in a class with a different constructor signature,
+        you will need to override _from_iterable() with a classmethod that can construct
+        new instances from an iterable argument.
+        """
+        return set(it)
+
+    def __le__(self, other):
+        if isinstance(other, StoredSet):
+            return self._under <= other._under
+        elif isinstance(other, collections.abc.Set):
+            return self._under <= other
+        else:
+            return NotImplemented
+
+    def __ge__(self, other):
+        if isinstance(other, StoredSet):
+            return self._under >= other._under
+        elif isinstance(other, collections.abc.Set):
+            return self._under >= other
+        else:
+            return NotImplemented
+
+    def __eq__(self, other):
+        if isinstance(other, StoredSet):
+            return self._under == other._under
+        elif isinstance(other, collections.abc.Set):
+            return self._under == other
+        else:
+            return NotImplemented
+
+    __repr__ = _wrapped_repr
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/jujuversion.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/jujuversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..61d420d369d9b0e75b9c2c242574ddcd4b89be51
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/jujuversion.py
@@ -0,0 +1,114 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A helper to work with the Juju version."""
+
+import os
+import re
+from functools import total_ordering
+
+
+@total_ordering
+class JujuVersion:
+    """Helper to work with the Juju version.
+
+    It knows how to parse the ``JUJU_VERSION`` environment variable, and exposes different
+    capabilities according to the specific version, allowing also to compare with other
+    versions.
+    """
+
+    PATTERN = r'''^
+    (?P<major>\d{1,9})\.(?P<minor>\d{1,9})       # <major> and <minor> numbers are always there
+    ((?:\.|-(?P<tag>[a-z]+))(?P<patch>\d{1,9}))? # sometimes with .<patch> or -<tag><patch>
+    (\.(?P<build>\d{1,9}))?$                     # and sometimes with a <build> number.
+    '''
+
+    def __init__(self, version):
+        m = re.match(self.PATTERN, version, re.VERBOSE)
+        if not m:
+            raise RuntimeError('"{}" is not a valid Juju version string'.format(version))
+
+        d = m.groupdict()
+        self.major = int(m.group('major'))
+        self.minor = int(m.group('minor'))
+        self.tag = d['tag'] or ''
+        self.patch = int(d['patch'] or 0)
+        self.build = int(d['build'] or 0)
+
+    def __repr__(self):
+        if self.tag:
+            s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch)
+        else:
+            s = '{}.{}.{}'.format(self.major, self.minor, self.patch)
+        if self.build > 0:
+            s += '.{}'.format(self.build)
+        return s
+
+    def __eq__(self, other):
+        if self is other:
+            return True
+        if isinstance(other, str):
+            other = type(self)(other)
+        elif not isinstance(other, JujuVersion):
+            raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+        return (
+            self.major == other.major
+            and self.minor == other.minor
+            and self.tag == other.tag
+            and self.build == other.build
+            and self.patch == other.patch)
+
+    def __lt__(self, other):
+        if self is other:
+            return False
+        if isinstance(other, str):
+            other = type(self)(other)
+        elif not isinstance(other, JujuVersion):
+            raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+
+        if self.major != other.major:
+            return self.major < other.major
+        elif self.minor != other.minor:
+            return self.minor < other.minor
+        elif self.tag != other.tag:
+            if not self.tag:
+                return False
+            elif not other.tag:
+                return True
+            return self.tag < other.tag
+        elif self.patch != other.patch:
+            return self.patch < other.patch
+        elif self.build != other.build:
+            return self.build < other.build
+        return False
+
+    @classmethod
+    def from_environ(cls) -> 'JujuVersion':
+        """Build a JujuVersion from JUJU_VERSION."""
+        v = os.environ.get('JUJU_VERSION')
+        if v is None:
+            v = '0.0.0'
+        return cls(v)
+
+    def has_app_data(self) -> bool:
+        """Determine whether this juju version knows about app data."""
+        return (self.major, self.minor, self.patch) >= (2, 7, 0)
+
+    def is_dispatch_aware(self) -> bool:
+        """Determine whether this juju version knows about dispatch."""
+        return (self.major, self.minor, self.patch) >= (2, 8, 0)
+
+    def has_controller_storage(self) -> bool:
+        """Determine whether this juju version supports controller-side storage."""
+        return (self.major, self.minor, self.patch) >= (2, 8, 0)
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/lib/__init__.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/lib/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..22b5a84e517df8a061b7ca2742678536a481b616
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/lib/__init__.py
@@ -0,0 +1,264 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Infrastructure for the opslib functionality."""
+
+import logging
+import os
+import re
+import sys
+
+from ast import literal_eval
+from importlib.util import module_from_spec
+from importlib.machinery import ModuleSpec
+from pkgutil import get_importer
+from types import ModuleType
+from typing import List
+
+__all__ = ('use', 'autoimport')
+
+logger = logging.getLogger(__name__)
+
+_libraries = None
+
+_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''')
+_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''')
+
+# Not perfect, but should do for now.
+_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''')
+
+
+def use(name: str, api: int, author: str) -> ModuleType:
+    """Use a library from the ops libraries.
+
+    Args:
+        name: the name of the library requested.
+        api: the API version of the library.
+        author: the author of the library. If not given, requests the
+            one in the standard library.
+
+    Raises:
+        ImportError: if the library cannot be found.
+        TypeError: if the name, api, or author are the wrong type.
+        ValueError: if the name, api, or author are invalid.
+    """
+    if not isinstance(name, str):
+        raise TypeError("invalid library name: {!r} (must be a str)".format(name))
+    if not isinstance(author, str):
+        raise TypeError("invalid library author: {!r} (must be a str)".format(author))
+    if not isinstance(api, int):
+        raise TypeError("invalid library API: {!r} (must be an int)".format(api))
+    if api < 0:
+        raise ValueError('invalid library api: {} (must be ≥0)'.format(api))
+    if not _libname_re.match(name):
+        raise ValueError("invalid library name: {!r} (chars and digits only)".format(name))
+    if not _libauthor_re.match(author):
+        raise ValueError("invalid library author email: {!r}".format(author))
+
+    if _libraries is None:
+        autoimport()
+
+    versions = _libraries.get((name, author), ())
+    for lib in versions:
+        if lib.api == api:
+            return lib.import_module()
+
+    others = ', '.join(str(lib.api) for lib in versions)
+    if others:
+        msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format(
+            name, author, api, others)
+    else:
+        msg = 'cannot find library "{}" from "{}"'.format(name, author)
+
+    raise ImportError(msg, name=name)
+
+
+def autoimport():
+    """Find all libs in the path and enable use of them.
+
+    You only need to call this if you've installed a package or
+    otherwise changed sys.path in the current run, and need to see the
+    changes. Otherwise libraries are found on first call of `use`.
+    """
+    global _libraries
+    _libraries = {}
+    for spec in _find_all_specs(sys.path):
+        lib = _parse_lib(spec)
+        if lib is None:
+            continue
+
+        versions = _libraries.setdefault((lib.name, lib.author), [])
+        versions.append(lib)
+        versions.sort(reverse=True)
+
+
+def _find_all_specs(path):
+    for sys_dir in path:
+        if sys_dir == "":
+            sys_dir = "."
+        try:
+            top_dirs = os.listdir(sys_dir)
+        except (FileNotFoundError, NotADirectoryError):
+            continue
+        except OSError as e:
+            logger.debug("Tried to look for ops.lib packages under '%s': %s", sys_dir, e)
+            continue
+        logger.debug("Looking for ops.lib packages under '%s'", sys_dir)
+        for top_dir in top_dirs:
+            opslib = os.path.join(sys_dir, top_dir, 'opslib')
+            try:
+                lib_dirs = os.listdir(opslib)
+            except (FileNotFoundError, NotADirectoryError):
+                continue
+            except OSError as e:
+                logger.debug("  Tried '%s': %s", opslib, e)  # *lots* of things checked here
+                continue
+            else:
+                logger.debug("  Trying '%s'", opslib)
+            finder = get_importer(opslib)
+            if finder is None:
+                logger.debug("  Finder for '%s' is None", opslib)
+                continue
+            if not hasattr(finder, 'find_spec'):
+                logger.debug("  Finder for '%s' has no find_spec", opslib)
+                continue
+            for lib_dir in lib_dirs:
+                spec_name = "{}.opslib.{}".format(top_dir, lib_dir)
+                spec = finder.find_spec(spec_name)
+                if spec is None:
+                    logger.debug("    No spec for %r", spec_name)
+                    continue
+                if spec.loader is None:
+                    # a namespace package; not supported
+                    logger.debug("    No loader for %r (probably a namespace package)", spec_name)
+                    continue
+
+                logger.debug("    Found %r", spec_name)
+                yield spec
+
+
+# only the first this many lines of a file are looked at for the LIB* constants
+_MAX_LIB_LINES = 99
+# these keys, with these types, are needed to have an opslib
+_NEEDED_KEYS = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int}
+
+
+def _join_and(keys: List[str]) -> str:
+    if len(keys) == 0:
+        return ""
+    if len(keys) == 1:
+        return keys[0]
+    return ", ".join(keys[:-1]) + ", and " + keys[-1]
+
+
+class _Missing:
+    """Helper to get the difference between what was found and what was needed when logging."""
+
+    def __init__(self, found):
+        self._found = found
+
+    def __str__(self):
+        exp = set(_NEEDED_KEYS)
+        got = set(self._found)
+        if len(got) == 0:
+            return "missing {}".format(_join_and(sorted(exp)))
+        return "got {}, but missing {}".format(
+            _join_and(sorted(got)),
+            _join_and(sorted(exp - got)))
+
+
+def _parse_lib(spec):
+    if spec.origin is None:
+        # "can't happen"
+        logger.warning("No origin for %r (no idea why; please report)", spec.name)
+        return None
+
+    logger.debug("    Parsing %r", spec.name)
+
+    try:
+        with open(spec.origin, 'rt', encoding='utf-8') as f:
+            libinfo = {}
+            for n, line in enumerate(f):
+                if len(libinfo) == len(_NEEDED_KEYS):
+                    break
+                if n > _MAX_LIB_LINES:
+                    logger.debug(
+                        "      Missing opslib metadata after reading to line %d: %s",
+                        _MAX_LIB_LINES, _Missing(libinfo))
+                    return None
+                m = _libline_re.match(line)
+                if m is None:
+                    continue
+                key, value = m.groups()
+                if key in _NEEDED_KEYS:
+                    value = literal_eval(value)
+                    if not isinstance(value, _NEEDED_KEYS[key]):
+                        logger.debug(
+                            "      Bad type for %s: expected %s, got %s",
+                            key, _NEEDED_KEYS[key].__name__, type(value).__name__)
+                        return None
+                    libinfo[key] = value
+            else:
+                if len(libinfo) != len(_NEEDED_KEYS):
+                    logger.debug(
+                        "      Missing opslib metadata after reading to end of file: %s",
+                        _Missing(libinfo))
+                    return None
+    except Exception as e:
+        logger.debug("      Failed: %s", e)
+        return None
+
+    lib = _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH'])
+    logger.debug("    Success: found library %s", lib)
+
+    return lib
+
+
+class _Lib:
+
+    def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int):
+        self.spec = spec
+        self.name = name
+        self.author = author
+        self.api = api
+        self.patch = patch
+
+        self._module = None
+
+    def __repr__(self):
+        return "<_Lib {}>".format(self)
+
+    def __str__(self):
+        return "{0.name} by {0.author}, API {0.api}, patch {0.patch}".format(self)
+
+    def import_module(self) -> ModuleType:
+        if self._module is None:
+            module = module_from_spec(self.spec)
+            self.spec.loader.exec_module(module)
+            self._module = module
+        return self._module
+
+    def __eq__(self, other):
+        if not isinstance(other, _Lib):
+            return NotImplemented
+        a = (self.name, self.author, self.api, self.patch)
+        b = (other.name, other.author, other.api, other.patch)
+        return a == b
+
+    def __lt__(self, other):
+        if not isinstance(other, _Lib):
+            return NotImplemented
+        a = (self.name, self.author, self.api, self.patch)
+        b = (other.name, other.author, other.api, other.patch)
+        return a < b
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/log.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/log.py
new file mode 100644
index 0000000000000000000000000000000000000000..b47013dd597c5500ea763d9e4beada10e6f2ca87
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/log.py
@@ -0,0 +1,58 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Interface to emit messages to the Juju logging system."""
+
+import sys
+import logging
+
+
+class JujuLogHandler(logging.Handler):
+    """A handler for sending logs to Juju via juju-log."""
+
+    def __init__(self, model_backend, level=logging.DEBUG):
+        super().__init__(level)
+        self.model_backend = model_backend
+
+    def emit(self, record):
+        """Send the specified logging record to the Juju backend.
+
+        This method is not used directly by the Operator Framework code, but by
+        :class:`logging.Handler` itself as part of the logging machinery.
+        """
+        self.model_backend.juju_log(record.levelname, self.format(record))
+
+
+def setup_root_logging(model_backend, debug=False):
+    """Setup python logging to forward messages to juju-log.
+
+    By default, logging is set to DEBUG level, and messages will be filtered by Juju.
+    Charmers can also set their own default log level with::
+
+      logging.getLogger().setLevel(logging.INFO)
+
+    model_backend -- a ModelBackend to use for juju-log
+    debug -- if True, write logs to stderr as well as to juju-log.
+    """
+    logger = logging.getLogger()
+    logger.setLevel(logging.DEBUG)
+    logger.addHandler(JujuLogHandler(model_backend))
+    if debug:
+        handler = logging.StreamHandler()
+        formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
+        handler.setFormatter(formatter)
+        logger.addHandler(handler)
+
+    sys.excepthook = lambda etype, value, tb: logger.error(
+        "Uncaught exception while in charm code:", exc_info=(etype, value, tb))
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/main.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbfb9220d6bc1f48642912889e6f4084343ad767
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/main.py
@@ -0,0 +1,410 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Main entry point to the Operator Framework."""
+
+import inspect
+import logging
+import os
+import shutil
+import subprocess
+import sys
+import typing
+import warnings
+from pathlib import Path
+
+import yaml
+
+import ops.charm
+import ops.framework
+import ops.model
+import ops.storage
+
+from ops.log import setup_root_logging
+from ops.jujuversion import JujuVersion
+
+CHARM_STATE_FILE = '.unit-state.db'
+
+
+logger = logging.getLogger()
+
+
+def _exe_path(path: Path) -> typing.Optional[Path]:
+    """Find and return the full path to the given binary.
+
+    Here path is the absolute path to a binary, but might be missing an extension.
+    """
+    p = shutil.which(path.name, mode=os.F_OK, path=str(path.parent))
+    if p is None:
+        return None
+    return Path(p)
+
+
+def _get_charm_dir():
+    charm_dir = os.environ.get("JUJU_CHARM_DIR")
+    if charm_dir is None:
+        # Assume $JUJU_CHARM_DIR/lib/op/main.py structure.
+        charm_dir = Path('{}/../../..'.format(__file__)).resolve()
+    else:
+        charm_dir = Path(charm_dir).resolve()
+    return charm_dir
+
+
+def _create_event_link(charm, bound_event, link_to):
+    """Create a symlink for a particular event.
+
+    charm -- A charm object.
+    bound_event -- An event for which to create a symlink.
+    link_to -- What the event link should point to
+    """
+    if issubclass(bound_event.event_type, ops.charm.HookEvent):
+        event_dir = charm.framework.charm_dir / 'hooks'
+        event_path = event_dir / bound_event.event_kind.replace('_', '-')
+    elif issubclass(bound_event.event_type, ops.charm.ActionEvent):
+        if not bound_event.event_kind.endswith("_action"):
+            raise RuntimeError(
+                'action event name {} needs _action suffix'.format(bound_event.event_kind))
+        event_dir = charm.framework.charm_dir / 'actions'
+        # The event_kind is suffixed with "_action" while the executable is not.
+        event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-')
+    else:
+        raise RuntimeError(
+            'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type))
+
+    event_dir.mkdir(exist_ok=True)
+    if not event_path.exists():
+        target_path = os.path.relpath(link_to, str(event_dir))
+
+        # Ignore the non-symlink files or directories
+        # assuming the charm author knows what they are doing.
+        logger.debug(
+            'Creating a new relative symlink at %s pointing to %s',
+            event_path, target_path)
+        event_path.symlink_to(target_path)
+
+
+def _setup_event_links(charm_dir, charm):
+    """Set up links for supported events that originate from Juju.
+
+    Whether a charm can handle an event or not can be determined by
+    introspecting which events are defined on it.
+
+    Hooks or actions are created as symlinks to the charm code file
+    which is determined by inspecting symlinks provided by the charm
+    author at hooks/install or hooks/start.
+
+    charm_dir -- A root directory of the charm.
+    charm -- An instance of the Charm class.
+
+    """
+    # XXX: on windows this function does not accomplish what it wants to:
+    #      it creates symlinks with no extension pointing to a .py
+    #      and juju only knows how to handle .exe, .bat, .cmd, and .ps1
+    #      so it does its job, but does not accomplish anything as the
+    #      hooks aren't 'callable'.
+    link_to = os.path.realpath(os.environ.get("JUJU_DISPATCH_PATH", sys.argv[0]))
+    for bound_event in charm.on.events().values():
+        # Only events that originate from Juju need symlinks.
+        if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)):
+            _create_event_link(charm, bound_event, link_to)
+
+
+def _emit_charm_event(charm, event_name):
+    """Emits a charm event based on a Juju event name.
+
+    charm -- A charm instance to emit an event from.
+    event_name -- A Juju event name to emit on a charm.
+    """
+    event_to_emit = None
+    try:
+        event_to_emit = getattr(charm.on, event_name)
+    except AttributeError:
+        logger.debug("Event %s not defined for %s.", event_name, charm)
+
+    # If the event is not supported by the charm implementation, do
+    # not error out or try to emit it. This is to support rollbacks.
+    if event_to_emit is not None:
+        args, kwargs = _get_event_args(charm, event_to_emit)
+        logger.debug('Emitting Juju event %s.', event_name)
+        event_to_emit.emit(*args, **kwargs)
+
+
+def _get_event_args(charm, bound_event):
+    event_type = bound_event.event_type
+    model = charm.framework.model
+
+    if issubclass(event_type, ops.charm.WorkloadEvent):
+        workload_name = os.environ['JUJU_WORKLOAD_NAME']
+        container = model.unit.get_container(workload_name)
+        return [container], {}
+    elif issubclass(event_type, ops.charm.RelationEvent):
+        relation_name = os.environ['JUJU_RELATION']
+        relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
+        relation = model.get_relation(relation_name, relation_id)
+    else:
+        relation = None
+
+    remote_app_name = os.environ.get('JUJU_REMOTE_APP', '')
+    remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '')
+    if remote_app_name or remote_unit_name:
+        if not remote_app_name:
+            if '/' not in remote_unit_name:
+                raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name))
+            remote_app_name = remote_unit_name.split('/')[0]
+        args = [relation, model.get_app(remote_app_name)]
+        if remote_unit_name:
+            args.append(model.get_unit(remote_unit_name))
+        return args, {}
+    elif relation:
+        return [relation], {}
+    return [], {}
+
+
+class _Dispatcher:
+    """Encapsulate how to figure out what event Juju wants us to run.
+
+    Also knows how to run “legacy” hooks when Juju called us via a top-level
+    ``dispatch`` binary.
+
+    Args:
+        charm_dir: the toplevel directory of the charm
+
+    Attributes:
+        event_name: the name of the event to run
+        is_dispatch_aware: are we running under a Juju that knows about the
+            dispatch binary, and is that binary present?
+
+    """
+
+    def __init__(self, charm_dir: Path):
+        self._charm_dir = charm_dir
+        self._exec_path = Path(os.environ.get('JUJU_DISPATCH_PATH', sys.argv[0]))
+
+        dispatch = charm_dir / 'dispatch'
+        if JujuVersion.from_environ().is_dispatch_aware() and _exe_path(dispatch) is not None:
+            self._init_dispatch()
+        else:
+            self._init_legacy()
+
+    def ensure_event_links(self, charm):
+        """Make sure necessary symlinks are present on disk."""
+        if self.is_dispatch_aware:
+            # links aren't needed
+            return
+
+        # When a charm is force-upgraded and a unit is in an error state Juju
+        # does not run upgrade-charm and instead runs the failed hook followed
+        # by config-changed. Given the nature of force-upgrading the hook setup
+        # code is not triggered on config-changed.
+        #
+        # 'start' event is included as Juju does not fire the install event for
+        # K8s charms (see LP: #1854635).
+        if (self.event_name in ('install', 'start', 'upgrade_charm')
+                or self.event_name.endswith('_storage_attached')):
+            _setup_event_links(self._charm_dir, charm)
+
+    def run_any_legacy_hook(self):
+        """Run any extant legacy hook.
+
+        If there is both a dispatch file and a legacy hook for the
+        current event, run the wanted legacy hook.
+        """
+        if not self.is_dispatch_aware:
+            # we *are* the legacy hook
+            return
+
+        dispatch_path = _exe_path(self._charm_dir / self._dispatch_path)
+        if dispatch_path is None:
+            logger.debug("Legacy %s does not exist.", self._dispatch_path)
+            return
+
+        # super strange that there isn't an is_executable
+        if not os.access(str(dispatch_path), os.X_OK):
+            logger.warning("Legacy %s exists but is not executable.", self._dispatch_path)
+            return
+
+        if dispatch_path.resolve() == Path(sys.argv[0]).resolve():
+            logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path)
+            return
+
+        argv = sys.argv.copy()
+        argv[0] = str(dispatch_path)
+        logger.info("Running legacy %s.", self._dispatch_path)
+        try:
+            subprocess.run(argv, check=True)
+        except subprocess.CalledProcessError as e:
+            logger.warning("Legacy %s exited with status %d.", self._dispatch_path, e.returncode)
+            sys.exit(e.returncode)
+        except OSError as e:
+            logger.warning("Unable to run legacy %s: %s", self._dispatch_path, e)
+            sys.exit(1)
+        else:
+            logger.debug("Legacy %s exited with status 0.", self._dispatch_path)
+
+    def _set_name_from_path(self, path: Path):
+        """Sets the name attribute to that which can be inferred from the given path."""
+        name = path.name.replace('-', '_')
+        if path.parent.name == 'actions':
+            name = '{}_action'.format(name)
+        self.event_name = name
+
+    def _init_legacy(self):
+        """Set up the 'legacy' dispatcher.
+
+        The current Juju doesn't know about 'dispatch' and calls hooks
+        explicitly.
+        """
+        self.is_dispatch_aware = False
+        self._set_name_from_path(self._exec_path)
+
+    def _init_dispatch(self):
+        """Set up the new 'dispatch' dispatcher.
+
+        The current Juju will run 'dispatch' if it exists, and otherwise fall
+        back to the old behaviour.
+
+        JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install,
+        in both cases.
+        """
+        self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH'])
+
+        if 'OPERATOR_DISPATCH' in os.environ:
+            logger.debug("Charm called itself via %s.", self._dispatch_path)
+            sys.exit(0)
+        os.environ['OPERATOR_DISPATCH'] = '1'
+
+        self.is_dispatch_aware = True
+        self._set_name_from_path(self._dispatch_path)
+
+    def is_restricted_context(self):
+        """Return True if we are running in a restricted Juju context.
+
+        When in a restricted context, most commands (relation-get, config-get,
+        state-get) are not available. As such, we change how we interact with
+        Juju.
+        """
+        return self.event_name in ('collect_metrics',)
+
+
+def _should_use_controller_storage(db_path: Path, meta: ops.charm.CharmMeta) -> bool:
+    """Figure out whether we want to use controller storage or not."""
+    # if you've previously used local state, carry on using that
+    if db_path.exists():
+        logger.debug("Using local storage: %s already exists", db_path)
+        return False
+
+    # if you're not in k8s you don't need controller storage
+    if 'kubernetes' not in meta.series:
+        logger.debug("Using local storage: not a kubernetes charm")
+        return False
+
+    # are we in a new enough Juju?
+    cur_version = JujuVersion.from_environ()
+
+    if cur_version.has_controller_storage():
+        logger.debug("Using controller storage: JUJU_VERSION=%s", cur_version)
+        return True
+    else:
+        logger.debug("Using local storage: JUJU_VERSION=%s", cur_version)
+        return False
+
+
+def main(charm_class: ops.charm.CharmBase, use_juju_for_storage: bool = None):
+    """Setup the charm and dispatch the observed event.
+
+    The event name is based on the way this executable was called (argv[0]).
+
+    Args:
+        charm_class: your charm class.
+        use_juju_for_storage: whether to use controller-side storage. If not specified
+            then kubernetes charms that haven't previously used local storage and that
+            are running on a new enough Juju default to controller-side storage,
+            otherwise local storage is used.
+    """
+    charm_dir = _get_charm_dir()
+
+    model_backend = ops.model._ModelBackend()
+    debug = ('JUJU_DEBUG' in os.environ)
+    setup_root_logging(model_backend, debug=debug)
+    logger.debug("Operator Framework %s up and running.", ops.__version__)
+
+    dispatcher = _Dispatcher(charm_dir)
+    dispatcher.run_any_legacy_hook()
+
+    metadata = (charm_dir / 'metadata.yaml').read_text()
+    actions_meta = charm_dir / 'actions.yaml'
+    if actions_meta.exists():
+        actions_metadata = actions_meta.read_text()
+    else:
+        actions_metadata = None
+
+    if not yaml.__with_libyaml__:
+        logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader')
+    meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata)
+    model = ops.model.Model(meta, model_backend)
+
+    charm_state_path = charm_dir / CHARM_STATE_FILE
+
+    if use_juju_for_storage and not ops.storage.juju_backend_available():
+        # raise an exception; the charm is broken and needs fixing.
+        msg = 'charm set use_juju_for_storage=True, but Juju version {} does not support it'
+        raise RuntimeError(msg.format(JujuVersion.from_environ()))
+
+    if use_juju_for_storage is None:
+        use_juju_for_storage = _should_use_controller_storage(charm_state_path, meta)
+
+    if use_juju_for_storage:
+        if dispatcher.is_restricted_context():
+            # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event
+            #  Though we eventually expect that juju will run collect-metrics in a
+            #  non-restricted context. Once we can determine that we are running collect-metrics
+            #  in a non-restricted context, we should fire the event as normal.
+            logger.debug('"%s" is not supported when using Juju for storage\n'
+                         'see: https://github.com/canonical/operator/issues/348',
+                         dispatcher.event_name)
+            # Note that we don't exit nonzero, because that would cause Juju to rerun the hook
+            return
+        store = ops.storage.JujuStorage()
+    else:
+        store = ops.storage.SQLiteStorage(charm_state_path)
+    framework = ops.framework.Framework(store, charm_dir, meta, model)
+    framework.set_breakpointhook()
+    try:
+        sig = inspect.signature(charm_class)
+        try:
+            sig.bind(framework)
+        except TypeError:
+            msg = (
+                "the second argument, 'key', has been deprecated and will be "
+                "removed after the 0.7 release")
+            warnings.warn(msg, DeprecationWarning)
+            charm = charm_class(framework, None)
+        else:
+            charm = charm_class(framework)
+        dispatcher.ensure_event_links(charm)
+
+        # TODO: Remove the collect_metrics check below as soon as the relevant
+        #       Juju changes are made. Also adjust the docstring on
+        #       EventBase.defer().
+        #
+        # Skip reemission of deferred events for collect-metrics events because
+        # they do not have the full access to all hook tools.
+        if not dispatcher.is_restricted_context():
+            framework.reemit()
+
+        _emit_charm_event(charm, dispatcher.event_name)
+
+        framework.commit()
+    finally:
+        framework.close()
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/model.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4e9b08c65993756a758e771e85bfcfecb2fb05b
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/model.py
@@ -0,0 +1,1579 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Representations of Juju's model, application, unit, and other entities."""
+
+import datetime
+import decimal
+import ipaddress
+import json
+import os
+import re
+import shutil
+import tempfile
+import time
+import typing
+import weakref
+
+from abc import ABC, abstractmethod
+from collections.abc import Mapping, MutableMapping
+from pathlib import Path
+from subprocess import run, PIPE, CalledProcessError
+
+from ops._private import yaml
+from ops.jujuversion import JujuVersion
+import ops
+import ops.pebble as pebble
+
+
+class Model:
+    """Represents the Juju Model as seen from this unit.
+
+    This should not be instantiated directly by Charmers, but can be accessed as `self.model`
+    from any class that derives from Object.
+    """
+
+    def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'):
+        self._cache = _ModelCache(meta, backend)
+        self._backend = backend
+        self._unit = self.get_unit(self._backend.unit_name)
+        self._relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache)
+        self._config = ConfigData(self._backend)
+        self._resources = Resources(list(meta.resources), self._backend)
+        self._pod = Pod(self._backend)
+        self._storages = StorageMapping(list(meta.storages), self._backend)
+        self._bindings = BindingMapping(self._backend)
+
+    @property
+    def unit(self) -> 'Unit':
+        """A :class:`Unit` that represents the unit that is running this code (eg yourself)."""
+        return self._unit
+
+    @property
+    def app(self):
+        """A :class:`Application` that represents the application this unit is a part of."""
+        return self._unit.app
+
+    @property
+    def relations(self) -> 'RelationMapping':
+        """Mapping of endpoint to list of :class:`Relation`.
+
+        Answers the question "what am I currently related to".
+        See also :meth:`.get_relation`.
+        """
+        return self._relations
+
+    @property
+    def config(self) -> 'ConfigData':
+        """Return a mapping of config for the current application."""
+        return self._config
+
+    @property
+    def resources(self) -> 'Resources':
+        """Access to resources for this charm.
+
+        Use ``model.resources.fetch(resource_name)`` to get the path on disk
+        where the resource can be found.
+        """
+        return self._resources
+
+    @property
+    def storages(self) -> 'StorageMapping':
+        """Mapping of storage_name to :class:`Storage` as defined in metadata.yaml."""
+        return self._storages
+
+    @property
+    def pod(self) -> 'Pod':
+        """Use ``model.pod.set_spec`` to set the container specification for Kubernetes charms."""
+        return self._pod
+
+    @property
+    def name(self) -> str:
+        """Return the name of the Model that this unit is running in.
+
+        This is read from the environment variable ``JUJU_MODEL_NAME``.
+        """
+        return self._backend.model_name
+
+    def get_unit(self, unit_name: str) -> 'Unit':
+        """Get an arbitrary unit by name.
+
+        Internally this uses a cache, so asking for the same unit two times will
+        return the same object.
+        """
+        return self._cache.get(Unit, unit_name)
+
+    def get_app(self, app_name: str) -> 'Application':
+        """Get an application by name.
+
+        Internally this uses a cache, so asking for the same application two times will
+        return the same object.
+        """
+        return self._cache.get(Application, app_name)
+
+    def get_relation(
+            self, relation_name: str,
+            relation_id: typing.Optional[int] = None) -> 'Relation':
+        """Get a specific Relation instance.
+
+        If relation_id is not given, this will return the Relation instance if the
+        relation is established only once or None if it is not established. If this
+        same relation is established multiple times the error TooManyRelatedAppsError is raised.
+
+        Args:
+            relation_name: The name of the endpoint for this charm
+            relation_id: An identifier for a specific relation. Used to disambiguate when a
+                given application has more than one relation on a given endpoint.
+
+        Raises:
+            TooManyRelatedAppsError: is raised if there is more than one relation to the
+                supplied relation_name and no relation_id was supplied
+        """
+        return self.relations._get_unique(relation_name, relation_id)
+
+    def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding':
+        """Get a network space binding.
+
+        Args:
+            binding_key: The relation name or instance to obtain bindings for.
+
+        Returns:
+            If ``binding_key`` is a relation name, the method returns the default binding
+            for that relation. If a relation instance is provided, the method first looks
+            up a more specific binding for that specific relation ID, and if none is found
+            falls back to the default binding for the relation name.
+        """
+        return self._bindings.get(binding_key)
+
+
+class _ModelCache:
+
+    def __init__(self, meta, backend):
+        self._meta = meta
+        self._backend = backend
+        self._weakrefs = weakref.WeakValueDictionary()
+
+    def get(self, entity_type, *args):
+        key = (entity_type,) + args
+        entity = self._weakrefs.get(key)
+        if entity is None:
+            entity = entity_type(*args, meta=self._meta, backend=self._backend, cache=self)
+            self._weakrefs[key] = entity
+        return entity
+
+
+class Application:
+    """Represents a named application in the model.
+
+    This might be your application, or might be an application that you are related to.
+    Charmers should not instantiate Application objects directly, but should use
+    :meth:`Model.get_app` if they need a reference to a given application.
+
+    Attributes:
+        name: The name of this application (eg, 'mysql'). This name may differ from the name of
+            the charm, if the user has deployed it to a different name.
+    """
+
+    def __init__(self, name, meta, backend, cache):
+        self.name = name
+        self._backend = backend
+        self._cache = cache
+        self._is_our_app = self.name == self._backend.app_name
+        self._status = None
+
+    def _invalidate(self):
+        self._status = None
+
+    @property
+    def status(self) -> 'StatusBase':
+        """Used to report or read the status of the overall application.
+
+        Can only be read and set by the lead unit of the application.
+
+        The status of remote units is always Unknown.
+
+        Raises:
+            RuntimeError: if you try to set the status of another application, or if you try to
+                set the status of this application as a unit that is not the leader.
+            InvalidStatusError: if you try to set the status to something that is not a
+                :class:`StatusBase`
+
+        Example::
+
+            self.model.app.status = BlockedStatus('I need a human to come help me')
+        """
+        if not self._is_our_app:
+            return UnknownStatus()
+
+        if not self._backend.is_leader():
+            raise RuntimeError('cannot get application status as a non-leader unit')
+
+        if self._status:
+            return self._status
+
+        s = self._backend.status_get(is_app=True)
+        self._status = StatusBase.from_name(s['status'], s['message'])
+        return self._status
+
+    @status.setter
+    def status(self, value: 'StatusBase'):
+        if not isinstance(value, StatusBase):
+            raise InvalidStatusError(
+                'invalid value provided for application {} status: {}'.format(self, value)
+            )
+
+        if not self._is_our_app:
+            raise RuntimeError('cannot to set status for a remote application {}'.format(self))
+
+        if not self._backend.is_leader():
+            raise RuntimeError('cannot set application status as a non-leader unit')
+
+        self._backend.status_set(value.name, value.message, is_app=True)
+        self._status = value
+
+    def __repr__(self):
+        return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+
+class Unit:
+    """Represents a named unit in the model.
+
+    This might be your unit, another unit of your application, or a unit of another application
+    that you are related to.
+
+    Attributes:
+        name: The name of the unit (eg, 'mysql/0')
+        app: The Application the unit is a part of.
+    """
+
+    def __init__(self, name, meta, backend, cache):
+        self.name = name
+
+        app_name = name.split('/')[0]
+        self.app = cache.get(Application, app_name)
+
+        self._backend = backend
+        self._cache = cache
+        self._is_our_unit = self.name == self._backend.unit_name
+        self._status = None
+
+        if self._is_our_unit:
+            self._containers = ContainerMapping(meta.containers, backend)
+
+    def _invalidate(self):
+        self._status = None
+
+    @property
+    def status(self) -> 'StatusBase':
+        """Used to report or read the status of a specific unit.
+
+        The status of any unit other than yourself is always Unknown.
+
+        Raises:
+            RuntimeError: if you try to set the status of a unit other than yourself.
+            InvalidStatusError: if you try to set the status to something other than
+                a :class:`StatusBase`
+        Example::
+
+            self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators')
+        """
+        if not self._is_our_unit:
+            return UnknownStatus()
+
+        if self._status:
+            return self._status
+
+        s = self._backend.status_get(is_app=False)
+        self._status = StatusBase.from_name(s['status'], s['message'])
+        return self._status
+
+    @status.setter
+    def status(self, value: 'StatusBase'):
+        if not isinstance(value, StatusBase):
+            raise InvalidStatusError(
+                'invalid value provided for unit {} status: {}'.format(self, value)
+            )
+
+        if not self._is_our_unit:
+            raise RuntimeError('cannot set status for a remote unit {}'.format(self))
+
+        self._backend.status_set(value.name, value.message, is_app=False)
+        self._status = value
+
+    def __repr__(self):
+        return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+    def is_leader(self) -> bool:
+        """Return whether this unit is the leader of its application.
+
+        This can only be called for your own unit.
+
+        Returns:
+            True if you are the leader, False otherwise
+        Raises:
+            RuntimeError: if called for a unit that is not yourself
+        """
+        if self._is_our_unit:
+            # This value is not cached as it is not guaranteed to persist for the whole duration
+            # of a hook execution.
+            return self._backend.is_leader()
+        else:
+            raise RuntimeError(
+                'leadership status of remote units ({}) is not visible to other'
+                ' applications'.format(self)
+            )
+
+    def set_workload_version(self, version: str) -> None:
+        """Record the version of the software running as the workload.
+
+        This shouldn't be confused with the revision of the charm. This is informative only;
+        shown in the output of 'juju status'.
+        """
+        if not isinstance(version, str):
+            raise TypeError("workload version must be a str, not {}: {!r}".format(
+                type(version).__name__, version))
+        self._backend.application_version_set(version)
+
+    @property
+    def containers(self) -> 'ContainerMapping':
+        """Return a mapping of containers indexed by name."""
+        if not self._is_our_unit:
+            raise RuntimeError('cannot get container for a remote unit {}'.format(self))
+        return self._containers
+
+    def get_container(self, container_name: str) -> 'Container':
+        """Get a single container by name.
+
+        Raises:
+            ModelError: if the named container doesn't exist
+        """
+        try:
+            return self.containers[container_name]
+        except KeyError:
+            raise ModelError('container {!r} not found'.format(container_name))
+
+
+class LazyMapping(Mapping, ABC):
+    """Represents a dict that isn't populated until it is accessed.
+
+    Charm authors should generally never need to use this directly, but it forms
+    the basis for many of the dicts that the framework tracks.
+    """
+
+    _lazy_data = None
+
+    @abstractmethod
+    def _load(self):
+        raise NotImplementedError()
+
+    @property
+    def _data(self):
+        data = self._lazy_data
+        if data is None:
+            data = self._lazy_data = self._load()
+        return data
+
+    def _invalidate(self):
+        self._lazy_data = None
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, key):
+        return self._data[key]
+
+    def __repr__(self):
+        return repr(self._data)
+
+
+class RelationMapping(Mapping):
+    """Map of relation names to lists of :class:`Relation` instances."""
+
+    def __init__(self, relations_meta, our_unit, backend, cache):
+        self._peers = set()
+        for name, relation_meta in relations_meta.items():
+            if relation_meta.role.is_peer():
+                self._peers.add(name)
+        self._our_unit = our_unit
+        self._backend = backend
+        self._cache = cache
+        self._data = {relation_name: None for relation_name in relations_meta}
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, relation_name):
+        is_peer = relation_name in self._peers
+        relation_list = self._data[relation_name]
+        if relation_list is None:
+            relation_list = self._data[relation_name] = []
+            for rid in self._backend.relation_ids(relation_name):
+                relation = Relation(relation_name, rid, is_peer,
+                                    self._our_unit, self._backend, self._cache)
+                relation_list.append(relation)
+        return relation_list
+
+    def _invalidate(self, relation_name):
+        """Used to wipe the cache of a given relation_name.
+
+        Not meant to be used by Charm authors. The content of relation data is
+        static for the lifetime of a hook, so it is safe to cache in memory once
+        accessed.
+        """
+        self._data[relation_name] = None
+
+    def _get_unique(self, relation_name, relation_id=None):
+        if relation_id is not None:
+            if not isinstance(relation_id, int):
+                raise ModelError('relation id {} must be int or None not {}'.format(
+                    relation_id,
+                    type(relation_id).__name__))
+            for relation in self[relation_name]:
+                if relation.id == relation_id:
+                    return relation
+            else:
+                # The relation may be dead, but it is not forgotten.
+                is_peer = relation_name in self._peers
+                return Relation(relation_name, relation_id, is_peer,
+                                self._our_unit, self._backend, self._cache)
+        num_related = len(self[relation_name])
+        if num_related == 0:
+            return None
+        elif num_related == 1:
+            return self[relation_name][0]
+        else:
+            # TODO: We need something in the framework to catch and gracefully handle
+            # errors, ideally integrating the error catching with Juju's mechanisms.
+            raise TooManyRelatedAppsError(relation_name, num_related, 1)
+
+
+class BindingMapping:
+    """Mapping of endpoints to network bindings.
+
+    Charm authors should not instantiate this directly, but access it via
+    :meth:`Model.get_binding`
+    """
+
+    def __init__(self, backend):
+        self._backend = backend
+        self._data = {}
+
+    def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding':
+        """Get a specific Binding for an endpoint/relation.
+
+        Not used directly by Charm authors. See :meth:`Model.get_binding`
+        """
+        if isinstance(binding_key, Relation):
+            binding_name = binding_key.name
+            relation_id = binding_key.id
+        elif isinstance(binding_key, str):
+            binding_name = binding_key
+            relation_id = None
+        else:
+            raise ModelError('binding key must be str or relation instance, not {}'
+                             ''.format(type(binding_key).__name__))
+        binding = self._data.get(binding_key)
+        if binding is None:
+            binding = Binding(binding_name, relation_id, self._backend)
+            self._data[binding_key] = binding
+        return binding
+
+
+class Binding:
+    """Binding to a network space.
+
+    Attributes:
+        name: The name of the endpoint this binding represents (eg, 'db')
+    """
+
+    def __init__(self, name, relation_id, backend):
+        self.name = name
+        self._relation_id = relation_id
+        self._backend = backend
+        self._network = None
+
+    @property
+    def network(self) -> 'Network':
+        """The network information for this binding."""
+        if self._network is None:
+            try:
+                self._network = Network(self._backend.network_get(self.name, self._relation_id))
+            except RelationNotFoundError:
+                if self._relation_id is None:
+                    raise
+                # If a relation is dead, we can still get network info associated with an
+                # endpoint itself
+                self._network = Network(self._backend.network_get(self.name))
+        return self._network
+
+
+class Network:
+    """Network space details.
+
+    Charm authors should not instantiate this directly, but should get access to the Network
+    definition from :meth:`Model.get_binding` and its ``network`` attribute.
+
+    Attributes:
+        interfaces: A list of :class:`NetworkInterface` details. This includes the
+            information about how your application should be configured (eg, what
+            IP addresses should you bind to.)
+            Note that multiple addresses for a single interface are represented as multiple
+            interfaces. (eg, ``[NetworkInfo('ens1', '10.1.1.1/32'),
+            NetworkInfo('ens1', '10.1.2.1/32'])``)
+        ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP
+            addresses that other units should use to get in touch with you.
+        egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that
+            other units will see you connecting from. Due to things like NAT it isn't always
+            possible to narrow it down to a single address, but when it is clear, the CIDRs
+            will be constrained to a single address. (eg, 10.0.0.1/32)
+    Args:
+        network_info: A dict of network information as returned by ``network-get``.
+    """
+
+    def __init__(self, network_info: dict):
+        self.interfaces = []
+        # Treat multiple addresses on an interface as multiple logical
+        # interfaces with the same name.
+        for interface_info in network_info.get('bind-addresses', []):
+            interface_name = interface_info.get('interface-name')
+            addrs = interface_info.get('addresses')
+            if addrs is not None:
+                for address_info in addrs:
+                    self.interfaces.append(NetworkInterface(interface_name, address_info))
+        self.ingress_addresses = []
+        for address in network_info.get('ingress-addresses', []):
+            self.ingress_addresses.append(ipaddress.ip_address(address))
+        self.egress_subnets = []
+        for subnet in network_info.get('egress-subnets', []):
+            self.egress_subnets.append(ipaddress.ip_network(subnet))
+
+    @property
+    def bind_address(self):
+        """A single address that your application should bind() to.
+
+        For the common case where there is a single answer. This represents a single
+        address from :attr:`.interfaces` that can be used to configure where your
+        application should bind() and listen().
+        """
+        if self.interfaces:
+            return self.interfaces[0].address
+        else:
+            return None
+
+    @property
+    def ingress_address(self):
+        """The address other applications should use to connect to your unit.
+
+        Due to things like public/private addresses, NAT and tunneling, the address you bind()
+        to is not always the address other people can use to connect() to you.
+        This is just the first address from :attr:`.ingress_addresses`.
+        """
+        if self.ingress_addresses:
+            return self.ingress_addresses[0]
+        else:
+            return None
+
+
+class NetworkInterface:
+    """Represents a single network interface that the charm needs to know about.
+
+    Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding`
+    to get the network information for a given endpoint.
+
+    Attributes:
+        name: The name of the interface (eg. 'eth0', or 'ens1')
+        subnet: An :class:`ipaddress.ip_network` representation of the IP for the network
+            interface. This may be a single address (eg '10.0.1.2/32')
+    """
+
+    def __init__(self, name: str, address_info: dict):
+        self.name = name
+        # TODO: expose a hardware address here, see LP: #1864070.
+        address = address_info.get('value')
+        # The value field may be empty.
+        if address:
+            self.address = ipaddress.ip_address(address)
+        else:
+            self.address = None
+        cidr = address_info.get('cidr')
+        # The cidr field may be empty, see LP: #1864102.
+        if cidr:
+            self.subnet = ipaddress.ip_network(cidr)
+        elif address:
+            # If we have an address, convert it to a /32 or /128 IP network.
+            self.subnet = ipaddress.ip_network(address)
+        else:
+            self.subnet = None
+        # TODO: expose a hostname/canonical name for the address here, see LP: #1864086.
+
+
+class Relation:
+    """Represents an established relation between this application and another application.
+
+    This class should not be instantiated directly, instead use :meth:`Model.get_relation`
+    or :attr:`ops.charm.RelationEvent.relation`.
+
+    Attributes:
+        name: The name of the local endpoint of the relation (eg 'db')
+        id: The identifier for a particular relation (integer)
+        app: An :class:`Application` representing the remote application of this relation.
+            For peer relations this will be the local application.
+        units: A set of :class:`Unit` for units that have started and joined this relation.
+        data: A :class:`RelationData` holding the data buckets for each entity
+            of a relation. Accessed via eg Relation.data[unit]['foo']
+    """
+
+    def __init__(
+            self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit,
+            backend: '_ModelBackend', cache: '_ModelCache'):
+        self.name = relation_name
+        self.id = relation_id
+        self.app = None
+        self.units = set()
+
+        if is_peer:
+            # For peer relations, both the remote and the local app are the same.
+            self.app = our_unit.app
+
+        try:
+            for unit_name in backend.relation_list(self.id):
+                unit = cache.get(Unit, unit_name)
+                self.units.add(unit)
+                if self.app is None:
+                    # Use the app of one of the units if available.
+                    self.app = unit.app
+        except RelationNotFoundError:
+            # If the relation is dead, just treat it as if it has no remote units.
+            pass
+
+        # If we didn't get the remote app via our_unit.app or the units list,
+        # look it up via JUJU_REMOTE_APP or "relation-list --app".
+        if self.app is None:
+            app_name = backend.relation_remote_app_name(relation_id)
+            if app_name is not None:
+                self.app = cache.get(Application, app_name)
+
+        self.data = RelationData(self, our_unit, backend)
+
+    def __repr__(self):
+        return '<{}.{} {}:{}>'.format(type(self).__module__,
+                                      type(self).__name__,
+                                      self.name,
+                                      self.id)
+
+
+class RelationData(Mapping):
+    """Represents the various data buckets of a given relation.
+
+    Each unit and application involved in a relation has their own data bucket.
+    Eg: ``{entity: RelationDataContent}``
+    where entity can be either a :class:`Unit` or a :class:`Application`.
+
+    Units can read and write their own data, and if they are the leader,
+    they can read and write their application data. They are allowed to read
+    remote unit and application data.
+
+    This class should not be created directly. It should be accessed via
+    :attr:`Relation.data`
+    """
+
+    def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'):
+        self.relation = weakref.proxy(relation)
+        self._data = {
+            our_unit: RelationDataContent(self.relation, our_unit, backend),
+            our_unit.app: RelationDataContent(self.relation, our_unit.app, backend),
+        }
+        self._data.update({
+            unit: RelationDataContent(self.relation, unit, backend)
+            for unit in self.relation.units})
+        # The relation might be dead so avoid a None key here.
+        if self.relation.app is not None:
+            self._data.update({
+                self.relation.app: RelationDataContent(self.relation, self.relation.app, backend),
+            })
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, key):
+        return self._data[key]
+
+    def __repr__(self):
+        return repr(self._data)
+
+
+# We mix in MutableMapping here to get some convenience implementations, but whether it's actually
+# mutable or not is controlled by the flag.
+class RelationDataContent(LazyMapping, MutableMapping):
+    """Data content of a unit or application in a relation."""
+
+    def __init__(self, relation, entity, backend):
+        self.relation = relation
+        self._entity = entity
+        self._backend = backend
+        self._is_app = isinstance(entity, Application)
+
+    def _load(self):
+        """Load the data from the current entity / relation."""
+        try:
+            return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app)
+        except RelationNotFoundError:
+            # Dead relations tell no tales (and have no data).
+            return {}
+
+    def _is_mutable(self):
+        """Return if the data content can be modified."""
+        if self._is_app:
+            is_our_app = self._backend.app_name == self._entity.name
+            if not is_our_app:
+                return False
+            # Whether the application data bag is mutable or not depends on
+            # whether this unit is a leader or not, but this is not guaranteed
+            # to be always true during the same hook execution.
+            return self._backend.is_leader()
+        else:
+            is_our_unit = self._backend.unit_name == self._entity.name
+            if is_our_unit:
+                return True
+        return False
+
+    def __setitem__(self, key, value):
+        if not self._is_mutable():
+            raise RelationDataError('cannot set relation data for {}'.format(self._entity.name))
+        if not isinstance(value, str):
+            raise RelationDataError('relation data values must be strings')
+
+        self._backend.relation_set(self.relation.id, key, value, self._is_app)
+
+        # Don't load data unnecessarily if we're only updating.
+        if self._lazy_data is not None:
+            if value == '':
+                # Match the behavior of Juju, which is that setting the value to an
+                # empty string will remove the key entirely from the relation data.
+                self._data.pop(key, None)
+            else:
+                self._data[key] = value
+
+    def __delitem__(self, key):
+        # Match the behavior of Juju, which is that setting the value to an empty
+        # string will remove the key entirely from the relation data.
+        self.__setitem__(key, '')
+
+
+class ConfigData(LazyMapping):
+    """Configuration data.
+
+    This class should not be created directly. It should be accessed via :attr:`Model.config`.
+    """
+
+    def __init__(self, backend):
+        self._backend = backend
+
+    def _load(self):
+        return self._backend.config_get()
+
+
+class StatusBase:
+    """Status values specific to applications and units.
+
+    To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just
+    directly use the child class to indicate their status.
+    """
+
+    _statuses = {}
+    name = None
+
+    def __init__(self, message: str):
+        self.message = message
+
+    def __new__(cls, *args, **kwargs):
+        """Forbid the usage of StatusBase directly."""
+        if cls is StatusBase:
+            raise TypeError("cannot instantiate a base class")
+        return super().__new__(cls)
+
+    def __eq__(self, other):
+        if not isinstance(self, type(other)):
+            return False
+        return self.message == other.message
+
+    def __repr__(self):
+        return "{.__class__.__name__}({!r})".format(self, self.message)
+
+    @classmethod
+    def from_name(cls, name: str, message: str):
+        """Get the specific Status for the name (or UnknownStatus if not registered)."""
+        if name == 'unknown':
+            # unknown is special
+            return UnknownStatus()
+        else:
+            return cls._statuses[name](message)
+
+    @classmethod
+    def register(cls, child):
+        """Register a Status for the child's name."""
+        if child.name is None:
+            raise AttributeError('cannot register a Status which has no name')
+        cls._statuses[child.name] = child
+        return child
+
+
+@StatusBase.register
+class UnknownStatus(StatusBase):
+    """The unit status is unknown.
+
+    A unit-agent has finished calling install, config-changed and start, but the
+    charm has not called status-set yet.
+
+    """
+    name = 'unknown'
+
+    def __init__(self):
+        # Unknown status cannot be set and does not have a message associated with it.
+        super().__init__('')
+
+    def __repr__(self):
+        return "UnknownStatus()"
+
+
+@StatusBase.register
+class ActiveStatus(StatusBase):
+    """The unit is ready.
+
+    The unit believes it is correctly offering all the services it has been asked to offer.
+    """
+    name = 'active'
+
+    def __init__(self, message: str = ''):
+        super().__init__(message)
+
+
+@StatusBase.register
+class BlockedStatus(StatusBase):
+    """The unit requires manual intervention.
+
+    An operator has to manually intervene to unblock the unit and let it proceed.
+    """
+    name = 'blocked'
+
+
+@StatusBase.register
+class MaintenanceStatus(StatusBase):
+    """The unit is performing maintenance tasks.
+
+    The unit is not yet providing services, but is actively doing work in preparation
+    for providing those services.  This is a "spinning" state, not an error state. It
+    reflects activity on the unit itself, not on peers or related units.
+
+    """
+    name = 'maintenance'
+
+
+@StatusBase.register
+class WaitingStatus(StatusBase):
+    """A unit is unable to progress.
+
+    The unit is unable to progress to an active state because an application to which
+    it is related is not running.
+
+    """
+    name = 'waiting'
+
+
+class Resources:
+    """Object representing resources for the charm."""
+
+    def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'):
+        self._backend = backend
+        self._paths = {name: None for name in names}
+
+    def fetch(self, name: str) -> Path:
+        """Fetch the resource from the controller or store.
+
+        If successfully fetched, this returns a Path object to where the resource is stored
+        on disk, otherwise it raises a ModelError.
+        """
+        if name not in self._paths:
+            raise RuntimeError('invalid resource name: {}'.format(name))
+        if self._paths[name] is None:
+            self._paths[name] = Path(self._backend.resource_get(name))
+        return self._paths[name]
+
+
+class Pod:
+    """Represents the definition of a pod spec in Kubernetes models.
+
+    Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`.
+    """
+
+    def __init__(self, backend: '_ModelBackend'):
+        self._backend = backend
+
+    def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None):
+        """Set the specification for pods that Juju should start in kubernetes.
+
+        See `juju help-tool pod-spec-set` for details of what should be passed.
+
+        Args:
+            spec: The mapping defining the pod specification
+            k8s_resources: Additional kubernetes specific specification.
+
+        Returns:
+            None
+        """
+        if not self._backend.is_leader():
+            raise ModelError('cannot set a pod spec as this unit is not a leader')
+        self._backend.pod_spec_set(spec, k8s_resources)
+
+
+class StorageMapping(Mapping):
+    """Map of storage names to lists of Storage instances."""
+
+    def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'):
+        self._backend = backend
+        self._storage_map = {storage_name: None for storage_name in storage_names}
+
+    def __contains__(self, key: str):
+        return key in self._storage_map
+
+    def __len__(self):
+        return len(self._storage_map)
+
+    def __iter__(self):
+        return iter(self._storage_map)
+
+    def __getitem__(self, storage_name: str) -> typing.List['Storage']:
+        storage_list = self._storage_map[storage_name]
+        if storage_list is None:
+            storage_list = self._storage_map[storage_name] = []
+            for storage_id in self._backend.storage_list(storage_name):
+                storage_list.append(Storage(storage_name, storage_id, self._backend))
+        return storage_list
+
+    def request(self, storage_name: str, count: int = 1):
+        """Requests new storage instances of a given name.
+
+        Uses storage-add tool to request additional storage. Juju will notify the unit
+        via <storage-name>-storage-attached events when it becomes available.
+        """
+        if storage_name not in self._storage_map:
+            raise ModelError(('cannot add storage {!r}:'
+                              ' it is not present in the charm metadata').format(storage_name))
+        self._backend.storage_add(storage_name, count)
+
+
+class Storage:
+    """Represents a storage as defined in metadata.yaml.
+
+    Attributes:
+        name: Simple string name of the storage
+        id: The provider id for storage
+    """
+
+    def __init__(self, storage_name, storage_id, backend):
+        self.name = storage_name
+        self.id = storage_id
+        self._backend = backend
+        self._location = None
+
+    @property
+    def location(self):
+        """Return the location of the storage."""
+        if self._location is None:
+            raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location")
+            self._location = Path(raw)
+        return self._location
+
+
+class Container:
+    """Represents a named container in a unit.
+
+    This class should not be instantiated directly, instead use :meth:`Unit.get_container`
+    or :attr:`Unit.containers`.
+
+    Attributes:
+        name: The name of the container from metadata.yaml (eg, 'postgres').
+    """
+
+    def __init__(self, name, backend, pebble_client=None):
+        self.name = name
+
+        if pebble_client is None:
+            socket_path = '/charm/containers/{}/pebble.socket'.format(name)
+            pebble_client = backend.get_pebble(socket_path)
+        self._pebble = pebble_client
+
+    @property
+    def pebble(self) -> 'pebble.Client':
+        """Return the low-level Pebble client instance for this container."""
+        return self._pebble
+
+    def autostart(self):
+        """Autostart all services marked as startup: enabled."""
+        self._pebble.autostart_services()
+
+    def start(self, *service_names: str):
+        """Start given service(s) by name."""
+        self._pebble.start_services(service_names)
+
+    def stop(self, *service_names: str):
+        """Stop given service(s) by name."""
+        self._pebble.stop_services(service_names)
+
+    def add_layer(self, label: str, layer: typing.Union[str, typing.Dict, 'pebble.Layer'], *,
+                  combine: bool = False):
+        """Dynamically add a new layer onto the Pebble configuration layers.
+
+        Args:
+            label: Label for new layer (and label of layer to merge with if
+                combining).
+            layer: A YAML string, configuration layer dict, or pebble.Layer
+                object containing the Pebble layer to add.
+            combine: If combine is False (the default), append the new layer
+                as the top layer with the given label (must be unique). If
+                combine is True and the label already exists, the two layers
+                are combined into a single one considering the layer override
+                rules; if the layer doesn't exist, it is added as usual.
+        """
+        self._pebble.add_layer(label, layer, combine=combine)
+
+    def get_plan(self) -> 'pebble.Plan':
+        """Get the current effective pebble configuration."""
+        return self._pebble.get_plan()
+
+    def get_services(self, *service_names: str) -> 'ServiceInfoMapping':
+        """Fetch and return a mapping of status information indexed by service name.
+
+        If no service names are specified, return status information for all
+        services, otherwise return information for only the given services.
+        """
+        services = self._pebble.get_services(service_names)
+        return ServiceInfoMapping(services)
+
+    def get_service(self, service_name: str) -> 'pebble.ServiceInfo':
+        """Get status information for a single named service.
+
+        Raises model error if service_name is not found.
+        """
+        services = self.get_services(service_name)
+        if not services:
+            raise ModelError('service {!r} not found'.format(service_name))
+        if len(services) > 1:
+            raise RuntimeError('expected 1 service, got {}'.format(len(services)))
+        return services[service_name]
+
+    def pull(self, path: str, *, encoding: str = 'utf-8') -> typing.Union[typing.BinaryIO,
+                                                                          typing.TextIO]:
+        """Read a file's content from the remote system.
+
+        Args:
+            path: Path of the file to read from the remote system.
+            encoding: Encoding to use for decoding the file's bytes to str,
+                or None to specify no decoding.
+
+        Returns:
+            A readable file-like object, whose read() method will return str
+            objects decoded according to the specified encoding, or bytes if
+            encoding is None.
+        """
+        return self._pebble.pull(path, encoding=encoding)
+
+    def push(
+            self, path: str, source: typing.Union[bytes, str, typing.BinaryIO, typing.TextIO], *,
+            encoding: str = 'utf-8', make_dirs: bool = False, permissions: int = None,
+            user_id: int = None, user: str = None, group_id: int = None, group: str = None):
+        """Write content to a given file path on the remote system.
+
+        Args:
+            path: Path of the file to write to on the remote system.
+            source: Source of data to write. This is either a concrete str or
+                bytes instance, or a readable file-like object.
+            encoding: Encoding to use for encoding source str to bytes, or
+                strings read from source if it is a TextIO type. Ignored if
+                source is bytes or BinaryIO.
+            make_dirs: If True, create parent directories if they don't exist.
+            permissions: Permissions (mode) to create file with (Pebble default
+                is 0o644).
+            user_id: UID for file.
+            user: Username for file (user_id takes precedence).
+            group_id: GID for file.
+            group: Group name for file (group_id takes precedence).
+        """
+        self._pebble.push(path, source, encoding=encoding, make_dirs=make_dirs,
+                          permissions=permissions, user_id=user_id, user=user,
+                          group_id=group_id, group=group)
+
+    def list_files(self, path: str, *, pattern: str = None,
+                   itself: bool = False) -> typing.List['pebble.FileInfo']:
+        """Return list of file information from given path on remote system.
+
+        Args:
+            path: Path of the directory to list, or path of the file to return
+                information about.
+            pattern: If specified, filter the list to just the files that match,
+                for example "*.txt".
+            itself: If path refers to a directory, return information about the
+                directory itself, rather than its contents.
+        """
+        return self._pebble.list_files(path, pattern=pattern, itself=itself)
+
+    def make_dir(
+            self, path: str, *, make_parents: bool = False, permissions: int = None,
+            user_id: int = None, user: str = None, group_id: int = None, group: str = None):
+        """Create a directory on the remote system with the given attributes.
+
+        Args:
+            path: Path of the directory to create on the remote system.
+            make_parents: If True, create parent directories if they don't exist.
+            permissions: Permissions (mode) to create directory with (Pebble
+                default is 0o755).
+            user_id: UID for directory.
+            user: Username for directory (user_id takes precedence).
+            group_id: GID for directory.
+            group: Group name for directory (group_id takes precedence).
+        """
+        self._pebble.make_dir(path, make_parents=make_parents, permissions=permissions,
+                              user_id=user_id, user=user, group_id=group_id, group=group)
+
+    def remove_path(self, path: str, *, recursive: bool = False):
+        """Remove a file or directory on the remote system.
+
+        Args:
+            path: Path of the file or directory to delete from the remote system.
+            recursive: If True, recursively delete path and everything under it.
+        """
+        self._pebble.remove_path(path, recursive=recursive)
+
+
+class ContainerMapping(Mapping):
+    """Map of container names to Container objects.
+
+    This is done as a mapping object rather than a plain dictionary so that we
+    can extend it later, and so it's not mutable.
+    """
+
+    def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'):
+        self._containers = {name: Container(name, backend) for name in names}
+
+    def __getitem__(self, key: str):
+        return self._containers[key]
+
+    def __iter__(self):
+        return iter(self._containers)
+
+    def __len__(self):
+        return len(self._containers)
+
+    def __repr__(self):
+        return repr(self._containers)
+
+
+class ServiceInfoMapping(Mapping):
+    """Map of service names to pebble.ServiceInfo objects.
+
+    This is done as a mapping object rather than a plain dictionary so that we
+    can extend it later, and so it's not mutable.
+    """
+
+    def __init__(self, services: typing.Iterable['pebble.ServiceInfo']):
+        self._services = {s.name: s for s in services}
+
+    def __getitem__(self, key: str):
+        return self._services[key]
+
+    def __iter__(self):
+        return iter(self._services)
+
+    def __len__(self):
+        return len(self._services)
+
+    def __repr__(self):
+        return repr(self._services)
+
+
+class ModelError(Exception):
+    """Base class for exceptions raised when interacting with the Model."""
+    pass
+
+
+class TooManyRelatedAppsError(ModelError):
+    """Raised by :meth:`Model.get_relation` if there is more than one related application."""
+
+    def __init__(self, relation_name, num_related, max_supported):
+        super().__init__('Too many remote applications on {} ({} > {})'.format(
+            relation_name, num_related, max_supported))
+        self.relation_name = relation_name
+        self.num_related = num_related
+        self.max_supported = max_supported
+
+
+class RelationDataError(ModelError):
+    """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid.
+
+    This is raised if you're either trying to set a value to something that isn't a string,
+    or if you are trying to set a value in a bucket that you don't have access to. (eg,
+    another application/unit or setting your application data but you aren't the leader.)
+    """
+
+
+class RelationNotFoundError(ModelError):
+    """Backend error when querying juju for a given relation and that relation doesn't exist."""
+
+
+class InvalidStatusError(ModelError):
+    """Raised if trying to set an Application or Unit status to something invalid."""
+
+
+class _ModelBackend:
+    """Represents the connection between the Model representation and talking to Juju.
+
+    Charm authors should not directly interact with the ModelBackend, it is a private
+    implementation of Model.
+    """
+
+    LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30)
+
+    def __init__(self, unit_name=None, model_name=None):
+        if unit_name is None:
+            self.unit_name = os.environ['JUJU_UNIT_NAME']
+        else:
+            self.unit_name = unit_name
+        if model_name is None:
+            model_name = os.environ.get('JUJU_MODEL_NAME')
+        self.model_name = model_name
+        self.app_name = self.unit_name.split('/')[0]
+
+        self._is_leader = None
+        self._leader_check_time = None
+
+    def _run(self, *args, return_output=False, use_json=False):
+        kwargs = dict(stdout=PIPE, stderr=PIPE, check=True)
+        args = (shutil.which(args[0]),) + args[1:]
+        if use_json:
+            args += ('--format=json',)
+        try:
+            result = run(args, **kwargs)
+        except CalledProcessError as e:
+            raise ModelError(e.stderr)
+        if return_output:
+            if result.stdout is None:
+                return ''
+            else:
+                text = result.stdout.decode('utf8')
+                if use_json:
+                    return json.loads(text)
+                else:
+                    return text
+
+    @staticmethod
+    def _is_relation_not_found(model_error):
+        return 'relation not found' in str(model_error)
+
+    def relation_ids(self, relation_name):
+        relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True)
+        return [int(relation_id.split(':')[-1]) for relation_id in relation_ids]
+
+    def relation_list(self, relation_id):
+        try:
+            return self._run('relation-list', '-r', str(relation_id),
+                             return_output=True, use_json=True)
+        except ModelError as e:
+            if self._is_relation_not_found(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def relation_remote_app_name(self, relation_id: int) -> typing.Optional[str]:
+        """Return remote app name for given relation ID, or None if not known."""
+        if 'JUJU_RELATION_ID' in os.environ and 'JUJU_REMOTE_APP' in os.environ:
+            event_relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
+            if relation_id == event_relation_id:
+                # JUJU_RELATION_ID is this relation, use JUJU_REMOTE_APP.
+                return os.environ['JUJU_REMOTE_APP']
+
+        # If caller is asking for information about another relation, use
+        # "relation-list --app" to get it.
+        try:
+            return self._run('relation-list', '-r', str(relation_id), '--app',
+                             return_output=True, use_json=True)
+        except ModelError as e:
+            if self._is_relation_not_found(e):
+                return None
+            if 'option provided but not defined: --app' in str(e):
+                # "--app" was introduced to relation-list in Juju 2.8.1, so
+                # handle previous verions of Juju gracefully
+                return None
+            raise
+
+    def relation_get(self, relation_id, member_name, is_app):
+        if not isinstance(is_app, bool):
+            raise TypeError('is_app parameter to relation_get must be a boolean')
+
+        if is_app:
+            version = JujuVersion.from_environ()
+            if not version.has_app_data():
+                raise RuntimeError(
+                    'getting application data is not supported on Juju version {}'.format(version))
+
+        args = ['relation-get', '-r', str(relation_id), '-', member_name]
+        if is_app:
+            args.append('--app')
+
+        try:
+            return self._run(*args, return_output=True, use_json=True)
+        except ModelError as e:
+            if self._is_relation_not_found(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def relation_set(self, relation_id, key, value, is_app):
+        if not isinstance(is_app, bool):
+            raise TypeError('is_app parameter to relation_set must be a boolean')
+
+        if is_app:
+            version = JujuVersion.from_environ()
+            if not version.has_app_data():
+                raise RuntimeError(
+                    'setting application data is not supported on Juju version {}'.format(version))
+
+        args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)]
+        if is_app:
+            args.append('--app')
+
+        try:
+            return self._run(*args)
+        except ModelError as e:
+            if self._is_relation_not_found(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def config_get(self):
+        return self._run('config-get', return_output=True, use_json=True)
+
+    def is_leader(self):
+        """Obtain the current leadership status for the unit the charm code is executing on.
+
+        The value is cached for the duration of a lease which is 30s in Juju.
+        """
+        now = time.monotonic()
+        if self._leader_check_time is None:
+            check = True
+        else:
+            time_since_check = datetime.timedelta(seconds=now - self._leader_check_time)
+            check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None)
+        if check:
+            # Current time MUST be saved before running is-leader to ensure the cache
+            # is only used inside the window that is-leader itself asserts.
+            self._leader_check_time = now
+            self._is_leader = self._run('is-leader', return_output=True, use_json=True)
+
+        return self._is_leader
+
+    def resource_get(self, resource_name):
+        return self._run('resource-get', resource_name, return_output=True).strip()
+
+    def pod_spec_set(self, spec, k8s_resources):
+        tmpdir = Path(tempfile.mkdtemp('-pod-spec-set'))
+        try:
+            spec_path = tmpdir / 'spec.yaml'
+            with spec_path.open("wt", encoding="utf8") as f:
+                yaml.safe_dump(spec, stream=f)
+            args = ['--file', str(spec_path)]
+            if k8s_resources:
+                k8s_res_path = tmpdir / 'k8s-resources.yaml'
+                with k8s_res_path.open("wt", encoding="utf8") as f:
+                    yaml.safe_dump(k8s_resources, stream=f)
+                args.extend(['--k8s-resources', str(k8s_res_path)])
+            self._run('pod-spec-set', *args)
+        finally:
+            shutil.rmtree(str(tmpdir))
+
+    def status_get(self, *, is_app=False):
+        """Get a status of a unit or an application.
+
+        Args:
+            is_app: A boolean indicating whether the status should be retrieved for a unit
+                or an application.
+        """
+        content = self._run(
+            'status-get', '--include-data', '--application={}'.format(is_app),
+            use_json=True,
+            return_output=True)
+        # Unit status looks like (in YAML):
+        # message: 'load: 0.28 0.26 0.26'
+        # status: active
+        # status-data: {}
+        # Application status looks like (in YAML):
+        # application-status:
+        #   message: 'load: 0.28 0.26 0.26'
+        #   status: active
+        #   status-data: {}
+        #   units:
+        #     uo/0:
+        #       message: 'load: 0.28 0.26 0.26'
+        #       status: active
+        #       status-data: {}
+
+        if is_app:
+            return {'status': content['application-status']['status'],
+                    'message': content['application-status']['message']}
+        else:
+            return content
+
+    def status_set(self, status, message='', *, is_app=False):
+        """Set a status of a unit or an application.
+
+        Args:
+            status: The status to set.
+            message: The message to set in the status.
+            is_app: A boolean indicating whether the status should be set for a unit or an
+                    application.
+        """
+        if not isinstance(is_app, bool):
+            raise TypeError('is_app parameter must be boolean')
+        return self._run('status-set', '--application={}'.format(is_app), status, message)
+
+    def storage_list(self, name):
+        return [int(s.split('/')[1]) for s in self._run('storage-list', name,
+                                                        return_output=True, use_json=True)]
+
+    def storage_get(self, storage_name_id, attribute):
+        return self._run('storage-get', '-s', storage_name_id, attribute,
+                         return_output=True, use_json=True)
+
+    def storage_add(self, name, count=1):
+        if not isinstance(count, int) or isinstance(count, bool):
+            raise TypeError('storage count must be integer, got: {} ({})'.format(count,
+                                                                                 type(count)))
+        self._run('storage-add', '{}={}'.format(name, count))
+
+    def action_get(self):
+        return self._run('action-get', return_output=True, use_json=True)
+
+    def action_set(self, results):
+        self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()])
+
+    def action_log(self, message):
+        self._run('action-log', message)
+
+    def action_fail(self, message=''):
+        self._run('action-fail', message)
+
+    def application_version_set(self, version):
+        self._run('application-version-set', '--', version)
+
+    def juju_log(self, level, message):
+        self._run('juju-log', '--log-level', level, "--", message)
+
+    def network_get(self, binding_name, relation_id=None):
+        """Return network info provided by network-get for a given binding.
+
+        Args:
+            binding_name: A name of a binding (relation name or extra-binding name).
+            relation_id: An optional relation id to get network info for.
+        """
+        cmd = ['network-get', binding_name]
+        if relation_id is not None:
+            cmd.extend(['-r', str(relation_id)])
+        try:
+            return self._run(*cmd, return_output=True, use_json=True)
+        except ModelError as e:
+            if self._is_relation_not_found(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def add_metrics(self, metrics, labels=None):
+        cmd = ['add-metric']
+
+        if labels:
+            label_args = []
+            for k, v in labels.items():
+                _ModelBackendValidator.validate_metric_label(k)
+                _ModelBackendValidator.validate_label_value(k, v)
+                label_args.append('{}={}'.format(k, v))
+            cmd.extend(['--labels', ','.join(label_args)])
+
+        metric_args = []
+        for k, v in metrics.items():
+            _ModelBackendValidator.validate_metric_key(k)
+            metric_value = _ModelBackendValidator.format_metric_value(v)
+            metric_args.append('{}={}'.format(k, metric_value))
+        cmd.extend(metric_args)
+        self._run(*cmd)
+
+    def get_pebble(self, socket_path: str) -> 'pebble.Client':
+        """Create a pebble.Client instance from given socket path."""
+        return pebble.Client(socket_path=socket_path)
+
+
+class _ModelBackendValidator:
+    """Provides facilities for validating inputs and formatting them for model backends."""
+
+    METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$')
+
+    @classmethod
+    def validate_metric_key(cls, key):
+        if cls.METRIC_KEY_REGEX.match(key) is None:
+            raise ModelError(
+                'invalid metric key {!r}: must match {}'.format(
+                    key, cls.METRIC_KEY_REGEX.pattern))
+
+    @classmethod
+    def validate_metric_label(cls, label_name):
+        if cls.METRIC_KEY_REGEX.match(label_name) is None:
+            raise ModelError(
+                'invalid metric label name {!r}: must match {}'.format(
+                    label_name, cls.METRIC_KEY_REGEX.pattern))
+
+    @classmethod
+    def format_metric_value(cls, value):
+        try:
+            decimal_value = decimal.Decimal.from_float(value)
+        except TypeError as e:
+            e2 = ModelError('invalid metric value {!r} provided:'
+                            ' must be a positive finite float'.format(value))
+            raise e2 from e
+        if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0:
+            raise ModelError('invalid metric value {!r} provided:'
+                             ' must be a positive finite float'.format(value))
+        return str(decimal_value)
+
+    @classmethod
+    def validate_label_value(cls, label, value):
+        # Label values cannot be empty, contain commas or equal signs as those are
+        # used by add-metric as separators.
+        if not value:
+            raise ModelError(
+                'metric label {} has an empty value, which is not allowed'.format(label))
+        v = str(value)
+        if re.search('[,=]', v) is not None:
+            raise ModelError(
+                'metric label values must not contain "," or "=": {}={!r}'.format(label, value))
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/pebble.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/pebble.py
new file mode 100644
index 0000000000000000000000000000000000000000..a34d901aec54096c7ffd150bff2d72822c45605d
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/pebble.py
@@ -0,0 +1,1112 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for the Pebble API (HTTP over Unix socket).
+
+For a command-line interface for local testing, see test/pebble_cli.py.
+"""
+
+from email.mime.multipart import MIMEBase, MIMEMultipart
+import cgi
+import datetime
+import email.parser
+import enum
+import http.client
+import io
+import json
+import re
+import socket
+import sys
+import time
+import typing
+import urllib.error
+import urllib.parse
+import urllib.request
+
+from ops._private import yaml
+
+
+_not_provided = object()
+
+
+class _UnixSocketConnection(http.client.HTTPConnection):
+    """Implementation of HTTPConnection that connects to a named Unix socket."""
+
+    def __init__(self, host, timeout=_not_provided, socket_path=None):
+        if timeout is _not_provided:
+            super().__init__(host)
+        else:
+            super().__init__(host, timeout=timeout)
+        self.socket_path = socket_path
+
+    def connect(self):
+        """Override connect to use Unix socket (instead of TCP socket)."""
+        if not hasattr(socket, 'AF_UNIX'):
+            raise NotImplementedError('Unix sockets not supported on {}'.format(sys.platform))
+        self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        self.sock.connect(self.socket_path)
+        if self.timeout is not _not_provided:
+            self.sock.settimeout(self.timeout)
+
+
+class _UnixSocketHandler(urllib.request.AbstractHTTPHandler):
+    """Implementation of HTTPHandler that uses a named Unix socket."""
+
+    def __init__(self, socket_path):
+        super().__init__()
+        self.socket_path = socket_path
+
+    def http_open(self, req):
+        """Override http_open to use a Unix socket connection (instead of TCP)."""
+        return self.do_open(_UnixSocketConnection, req, socket_path=self.socket_path)
+
+
+# Matches yyyy-mm-ddTHH:MM:SS(.sss)ZZZ
+_TIMESTAMP_RE = re.compile(
+    r'(\d{4})-(\d{2})-(\d{2})[Tt](\d{2}):(\d{2}):(\d{2})(\.\d+)?(.*)')
+
+# Matches [-+]HH:MM
+_TIMEOFFSET_RE = re.compile(r'([-+])(\d{2}):(\d{2})')
+
+
+def _parse_timestamp(s):
+    """Parse timestamp from Go-encoded JSON.
+
+    This parses RFC3339 timestamps (which are a subset of ISO8601 timestamps)
+    that Go's encoding/json package produces for time.Time values.
+
+    Unfortunately we can't use datetime.fromisoformat(), as that does not
+    support more than 6 digits for the fractional second, nor the 'Z' for UTC.
+    Also, it was only introduced in Python 3.7.
+    """
+    match = _TIMESTAMP_RE.match(s)
+    if not match:
+        raise ValueError('invalid timestamp {!r}'.format(s))
+    y, m, d, hh, mm, ss, sfrac, zone = match.groups()
+
+    if zone in ('Z', 'z'):
+        tz = datetime.timezone.utc
+    else:
+        match = _TIMEOFFSET_RE.match(zone)
+        if not match:
+            raise ValueError('invalid timestamp {!r}'.format(s))
+        sign, zh, zm = match.groups()
+        tz_delta = datetime.timedelta(hours=int(zh), minutes=int(zm))
+        tz = datetime.timezone(tz_delta if sign == '+' else -tz_delta)
+
+    microsecond = round(float(sfrac or '0') * 1000000)
+
+    return datetime.datetime(int(y), int(m), int(d), int(hh), int(mm), int(ss),
+                             microsecond=microsecond, tzinfo=tz)
+
+
+def _json_loads(s: typing.Union[str, bytes]) -> typing.Dict:
+    """Like json.loads(), but handle str or bytes.
+
+    This is needed because an HTTP response's read() method returns bytes on
+    Python 3.5, and json.load doesn't handle bytes.
+    """
+    if isinstance(s, bytes):
+        s = s.decode('utf-8')
+    return json.loads(s)
+
+
+class Error(Exception):
+    """Base class of most errors raised by the Pebble client."""
+
+
+class TimeoutError(TimeoutError, Error):
+    """Raised when a polling timeout occurs."""
+
+
+class ConnectionError(Error):
+    """Raised when the Pebble client can't connect to the socket."""
+
+
+class ProtocolError(Error):
+    """Raised when there's a higher-level protocol error talking to Pebble."""
+
+
+class PathError(Error):
+    """Raised when there's an error with a specific path."""
+
+    def __init__(self, kind: str, message: str):
+        """This shouldn't be instantiated directly."""
+        self.kind = kind
+        self.message = message
+
+    def __str__(self):
+        return '{} - {}'.format(self.kind, self.message)
+
+    def __repr__(self):
+        return 'PathError({!r}, {!r})'.format(self.kind, self.message)
+
+
+class APIError(Error):
+    """Raised when an HTTP API error occurs talking to the Pebble server."""
+
+    def __init__(self, body: typing.Dict, code: int, status: str, message: str):
+        """This shouldn't be instantiated directly."""
+        super().__init__(message)  # Makes str(e) return message
+        self.body = body
+        self.code = code
+        self.status = status
+        self.message = message
+
+    def __repr__(self):
+        return 'APIError({!r}, {!r}, {!r}, {!r})'.format(
+            self.body, self.code, self.status, self.message)
+
+
+class ChangeError(Error):
+    """Raised by actions when a change is ready but has an error.
+
+    For example, this happens when you attempt to start an already-started
+    service:
+
+    cannot perform the following tasks:
+    - Start service "test" (service "test" was previously started)
+    """
+
+    def __init__(self, err: str, change: 'Change'):
+        """This shouldn't be instantiated directly."""
+        super().__init__(err)  # Makes str(e) return err
+        self.err = err
+        self.change = change
+
+    def __repr__(self):
+        return 'ChangeError({!r}, {!r})'.format(self.err, self.change)
+
+
+class WarningState(enum.Enum):
+    """Enum of states for get_warnings() select parameter."""
+
+    ALL = 'all'
+    PENDING = 'pending'
+
+
+class ChangeState(enum.Enum):
+    """Enum of states for get_changes() select parameter."""
+
+    ALL = 'all'
+    IN_PROGRESS = 'in-progress'
+    READY = 'ready'
+
+
+class SystemInfo:
+    """System information object."""
+
+    def __init__(self, version: str):
+        self.version = version
+
+    @classmethod
+    def from_dict(cls, d: typing.Dict) -> 'SystemInfo':
+        """Create new SystemInfo object from dict parsed from JSON."""
+        return cls(version=d['version'])
+
+    def __repr__(self):
+        return 'SystemInfo(version={self.version!r})'.format(self=self)
+
+
+class Warning:
+    """Warning object."""
+
+    def __init__(
+        self,
+        message: str,
+        first_added: datetime.datetime,
+        last_added: datetime.datetime,
+        last_shown: typing.Optional[datetime.datetime],
+        expire_after: str,
+        repeat_after: str,
+    ):
+        self.message = message
+        self.first_added = first_added
+        self.last_added = last_added
+        self.last_shown = last_shown
+        self.expire_after = expire_after
+        self.repeat_after = repeat_after
+
+    @classmethod
+    def from_dict(cls, d: typing.Dict) -> 'Warning':
+        """Create new Warning object from dict parsed from JSON."""
+        return cls(
+            message=d['message'],
+            first_added=_parse_timestamp(d['first-added']),
+            last_added=_parse_timestamp(d['last-added']),
+            last_shown=_parse_timestamp(d['last-shown']) if d.get('last-shown') else None,
+            expire_after=d['expire-after'],
+            repeat_after=d['repeat-after'],
+        )
+
+    def __repr__(self):
+        return ('Warning('
+                'message={self.message!r}, '
+                'first_added={self.first_added!r}, '
+                'last_added={self.last_added!r}, '
+                'last_shown={self.last_shown!r}, '
+                'expire_after={self.expire_after!r}, '
+                'repeat_after={self.repeat_after!r})'
+                ).format(self=self)
+
+
+class TaskProgress:
+    """Task progress object."""
+
+    def __init__(
+        self,
+        label: str,
+        done: int,
+        total: int,
+    ):
+        self.label = label
+        self.done = done
+        self.total = total
+
+    @classmethod
+    def from_dict(cls, d: typing.Dict) -> 'TaskProgress':
+        """Create new TaskProgress object from dict parsed from JSON."""
+        return cls(
+            label=d['label'],
+            done=d['done'],
+            total=d['total'],
+        )
+
+    def __repr__(self):
+        return ('TaskProgress('
+                'label={self.label!r}, '
+                'done={self.done!r}, '
+                'total={self.total!r})'
+                ).format(self=self)
+
+
+class TaskID(str):
+    """Task ID (a more strongly-typed string)."""
+
+    def __repr__(self):
+        return 'TaskID({!r})'.format(str(self))
+
+
+class Task:
+    """Task object."""
+
+    def __init__(
+        self,
+        id: TaskID,
+        kind: str,
+        summary: str,
+        status: str,
+        log: typing.List[str],
+        progress: TaskProgress,
+        spawn_time: datetime.datetime,
+        ready_time: typing.Optional[datetime.datetime],
+    ):
+        self.id = id
+        self.kind = kind
+        self.summary = summary
+        self.status = status
+        self.log = log
+        self.progress = progress
+        self.spawn_time = spawn_time
+        self.ready_time = ready_time
+
+    @classmethod
+    def from_dict(cls, d: typing.Dict) -> 'Task':
+        """Create new Task object from dict parsed from JSON."""
+        return cls(
+            id=TaskID(d['id']),
+            kind=d['kind'],
+            summary=d['summary'],
+            status=d['status'],
+            log=d.get('log') or [],
+            progress=TaskProgress.from_dict(d['progress']),
+            spawn_time=_parse_timestamp(d['spawn-time']),
+            ready_time=_parse_timestamp(d['ready-time']) if d.get('ready-time') else None,
+        )
+
+    def __repr__(self):
+        return ('Task('
+                'id={self.id!r}, '
+                'kind={self.kind!r}, '
+                'summary={self.summary!r}, '
+                'status={self.status!r}, '
+                'log={self.log!r}, '
+                'progress={self.progress!r}, '
+                'spawn_time={self.spawn_time!r}, '
+                'ready_time={self.ready_time!r})'
+                ).format(self=self)
+
+
+class ChangeID(str):
+    """Change ID (a more strongly-typed string)."""
+
+    def __repr__(self):
+        return 'ChangeID({!r})'.format(str(self))
+
+
+class Change:
+    """Change object."""
+
+    def __init__(
+        self,
+        id: ChangeID,
+        kind: str,
+        summary: str,
+        status: str,
+        tasks: typing.List[Task],
+        ready: bool,
+        err: typing.Optional[str],
+        spawn_time: datetime.datetime,
+        ready_time: typing.Optional[datetime.datetime],
+    ):
+        self.id = id
+        self.kind = kind
+        self.summary = summary
+        self.status = status
+        self.tasks = tasks
+        self.ready = ready
+        self.err = err
+        self.spawn_time = spawn_time
+        self.ready_time = ready_time
+
+    @classmethod
+    def from_dict(cls, d: typing.Dict) -> 'Change':
+        """Create new Change object from dict parsed from JSON."""
+        return cls(
+            id=ChangeID(d['id']),
+            kind=d['kind'],
+            summary=d['summary'],
+            status=d['status'],
+            tasks=[Task.from_dict(t) for t in d.get('tasks') or []],
+            ready=d['ready'],
+            err=d.get('err'),
+            spawn_time=_parse_timestamp(d['spawn-time']),
+            ready_time=_parse_timestamp(d['ready-time']) if d.get('ready-time') else None,
+        )
+
+    def __repr__(self):
+        return ('Change('
+                'id={self.id!r}, '
+                'kind={self.kind!r}, '
+                'summary={self.summary!r}, '
+                'status={self.status!r}, '
+                'tasks={self.tasks!r}, '
+                'ready={self.ready!r}, '
+                'err={self.err!r}, '
+                'spawn_time={self.spawn_time!r}, '
+                'ready_time={self.ready_time!r})'
+                ).format(self=self)
+
+
+class Plan:
+    """Represents the effective Pebble configuration."""
+
+    def __init__(self, raw: str):
+        d = yaml.safe_load(raw) or {}
+        self._raw = raw
+        self._services = {name: Service(name, service)
+                          for name, service in d.get('services', {}).items()}
+
+    @property
+    def services(self):
+        """This plan's services mapping (maps service name to Service).
+
+        This property is currently read-only.
+        """
+        return self._services
+
+    def to_dict(self) -> typing.Dict[str, typing.Any]:
+        """Convert this plan to its dict representation."""
+        as_dicts = {name: service.to_dict() for name, service in self._services.items()}
+        if not as_dicts:
+            return {}
+        return {
+            'services': as_dicts,
+        }
+
+    def to_yaml(self) -> str:
+        """Return this plan's YAML representation."""
+        return yaml.safe_dump(self.to_dict())
+
+    __str__ = to_yaml
+
+
+class Layer:
+    """Represents a Pebble configuration layer.
+
+    The format of this is not documented, but is captured in code here:
+    https://github.com/canonical/pebble/blob/master/internal/plan/plan.go
+
+    Attributes:
+        summary: A summary of the purpose of this layer
+        description: A long form description of this layer
+        services: A mapping of name: :class:`Service` defined by this layer
+    """
+
+    # This is how you do type annotations, but it is not supported by Python 3.5
+    # summary: str
+    # description: str
+    # services: typing.Mapping[str, 'Service']
+
+    def __init__(self, raw: typing.Union[str, typing.Dict] = None):
+        if isinstance(raw, str):
+            d = yaml.safe_load(raw) or {}
+        else:
+            d = raw or {}
+        self.summary = d.get('summary', '')
+        self.description = d.get('description', '')
+        self.services = {name: Service(name, service)
+                         for name, service in d.get('services', {}).items()}
+
+    def to_yaml(self) -> str:
+        """Convert this layer to its YAML representation."""
+        return yaml.safe_dump(self.to_dict())
+
+    def to_dict(self) -> typing.Dict[str, typing.Any]:
+        """Convert this layer to its dict representation."""
+        fields = [
+            ('summary', self.summary),
+            ('description', self.description),
+            ('services', {name: service.to_dict() for name, service in self.services.items()})
+        ]
+        return {name: value for name, value in fields if value}
+
+    def __repr__(self) -> str:
+        return 'Layer({!r})'.format(self.to_dict())
+
+    __str__ = to_yaml
+
+
+class Service:
+    """Represents a service description in a Pebble configuration layer."""
+
+    def __init__(self, name: str, raw: typing.Dict = None):
+        self.name = name
+        raw = raw or {}
+        self.summary = raw.get('summary', '')
+        self.description = raw.get('description', '')
+        self.startup = raw.get('startup', '')
+        self.override = raw.get('override', '')
+        self.command = raw.get('command', '')
+        self.after = list(raw.get('after', []))
+        self.before = list(raw.get('before', []))
+        self.requires = list(raw.get('requires', []))
+        self.environment = dict(raw.get('environment', {}))
+
+    def to_dict(self) -> typing.Dict:
+        """Convert this service object to its dict representation."""
+        fields = [
+            ('summary', self.summary),
+            ('description', self.description),
+            ('startup', self.startup),
+            ('override', self.override),
+            ('command', self.command),
+            ('after', self.after),
+            ('before', self.before),
+            ('requires', self.requires),
+            ('environment', self.environment),
+        ]
+        return {name: value for name, value in fields if value}
+
+    def __repr__(self) -> str:
+        return 'Service({!r})'.format(self.to_dict())
+
+
+class ServiceStartup(enum.Enum):
+    """Enum of service startup options."""
+
+    ENABLED = 'enabled'
+    DISABLED = 'disabled'
+
+
+class ServiceStatus(enum.Enum):
+    """Enum of service statuses."""
+
+    ACTIVE = 'active'
+    INACTIVE = 'inactive'
+    ERROR = 'error'
+
+
+class ServiceInfo:
+    """Service status information."""
+
+    def __init__(
+        self,
+        name: str,
+        startup: typing.Union[ServiceStartup, str],
+        current: typing.Union[ServiceStatus, str],
+    ):
+        self.name = name
+        self.startup = startup
+        self.current = current
+
+    def is_running(self) -> bool:
+        """Return True if this service is running (in the active state)."""
+        return self.current == ServiceStatus.ACTIVE
+
+    @classmethod
+    def from_dict(cls, d: typing.Dict) -> 'ServiceInfo':
+        """Create new ServiceInfo object from dict parsed from JSON."""
+        try:
+            startup = ServiceStartup(d['startup'])
+        except ValueError:
+            startup = d['startup']
+        try:
+            current = ServiceStatus(d['current'])
+        except ValueError:
+            current = d['current']
+        return cls(
+            name=d['name'],
+            startup=startup,
+            current=current,
+        )
+
+    def __repr__(self):
+        return ('ServiceInfo('
+                'name={self.name!r}, '
+                'startup={self.startup}, '
+                'current={self.current})'
+                ).format(self=self)
+
+
+class FileType(enum.Enum):
+    """Enum of file types."""
+
+    FILE = 'file'
+    DIRECTORY = 'directory'
+    SYMLINK = 'symlink'
+    SOCKET = 'socket'
+    NAMED_PIPE = 'named-pipe'
+    DEVICE = 'device'
+    UNKNOWN = 'unknown'
+
+
+class FileInfo:
+    """Stat-like information about a single file."""
+
+    def __init__(
+        self,
+        path: str,
+        name: str,
+        type: typing.Union['FileType', str],
+        size: typing.Optional[int],
+        permissions: int,
+        last_modified: datetime.datetime,
+        user_id: typing.Optional[int],
+        user: typing.Optional[str],
+        group_id: typing.Optional[int],
+        group: typing.Optional[str],
+    ):
+        self.path = path
+        self.name = name
+        self.type = type
+        self.size = size
+        self.permissions = permissions
+        self.last_modified = last_modified
+        self.user_id = user_id
+        self.user = user
+        self.group_id = group_id
+        self.group = group
+
+    @classmethod
+    def from_dict(cls, d: typing.Dict) -> 'FileInfo':
+        """Create new FileInfo object from dict parsed from JSON."""
+        try:
+            file_type = FileType(d['type'])
+        except ValueError:
+            file_type = d['type']
+        return cls(
+            path=d['path'],
+            name=d['name'],
+            type=file_type,
+            size=d.get('size'),
+            permissions=int(d['permissions'], 8),
+            last_modified=_parse_timestamp(d['last-modified']),
+            user_id=d.get('user-id'),
+            user=d.get('user'),
+            group_id=d.get('group-id'),
+            group=d.get('group'),
+        )
+
+    def __repr__(self):
+        return ('FileInfo('
+                'path={self.path!r}, '
+                'name={self.name!r}, '
+                'type={self.type}, '
+                'size={self.size}, '
+                'permissions=0o{self.permissions:o}, '
+                'last_modified={self.last_modified!r}, '
+                'user_id={self.user_id}, '
+                'user={self.user!r}, '
+                'group_id={self.group_id}, '
+                'group={self.group!r})'
+                ).format(self=self)
+
+
+class Client:
+    """Pebble API client."""
+
+    def __init__(self, socket_path=None, opener=None, base_url='http://localhost', timeout=5.0):
+        """Initialize a client instance.
+
+        Defaults to using a Unix socket at socket_path (which must be specified
+        unless a custom opener is provided).
+        """
+        if opener is None:
+            if socket_path is None:
+                raise ValueError('no socket path provided')
+            opener = self._get_default_opener(socket_path)
+        self.opener = opener
+        self.base_url = base_url
+        self.timeout = timeout
+
+    @classmethod
+    def _get_default_opener(cls, socket_path):
+        """Build the default opener to use for requests (HTTP over Unix socket)."""
+        opener = urllib.request.OpenerDirector()
+        opener.add_handler(_UnixSocketHandler(socket_path))
+        opener.add_handler(urllib.request.HTTPDefaultErrorHandler())
+        opener.add_handler(urllib.request.HTTPRedirectHandler())
+        opener.add_handler(urllib.request.HTTPErrorProcessor())
+        return opener
+
+    def _request(
+        self, method: str, path: str, query: typing.Dict = None, body: typing.Dict = None,
+    ) -> typing.Dict:
+        """Make a JSON request to the Pebble server with the given HTTP method and path.
+
+        If query dict is provided, it is encoded and appended as a query string
+        to the URL. If body dict is provided, it is serialied as JSON and used
+        as the HTTP body (with Content-Type: "application/json"). The resulting
+        body is decoded from JSON.
+        """
+        headers = {'Accept': 'application/json'}
+        data = None
+        if body is not None:
+            data = json.dumps(body).encode('utf-8')
+            headers['Content-Type'] = 'application/json'
+
+        response = self._request_raw(method, path, query, headers, data)
+        self._ensure_content_type(response.headers, 'application/json')
+        return _json_loads(response.read())
+
+    @staticmethod
+    def _ensure_content_type(headers, expected):
+        """Parse Content-Type header from headers and ensure it's equal to expected.
+
+        Return a dict of any options in the header, e.g., {'boundary': ...}.
+        """
+        ctype, options = cgi.parse_header(headers.get('Content-Type', ''))
+        if ctype != expected:
+            raise ProtocolError('expected Content-Type {!r}, got {!r}'.format(expected, ctype))
+        return options
+
+    def _request_raw(
+        self, method: str, path: str, query: typing.Dict = None, headers: typing.Dict = None,
+        data: bytes = None,
+    ) -> http.client.HTTPResponse:
+        """Make a request to the Pebble server; return the raw HTTPResponse object."""
+        url = self.base_url + path
+        if query:
+            url = url + '?' + urllib.parse.urlencode(query)
+
+        if headers is None:
+            headers = {}
+        request = urllib.request.Request(url, method=method, data=data, headers=headers)
+
+        try:
+            response = self.opener.open(request, timeout=self.timeout)
+        except urllib.error.HTTPError as e:
+            code = e.code
+            status = e.reason
+            try:
+                body = _json_loads(e.read())
+                message = body['result']['message']
+            except (IOError, ValueError, KeyError) as e2:
+                # Will only happen on read error or if Pebble sends invalid JSON.
+                body = {}
+                message = '{} - {}'.format(type(e2).__name__, e2)
+            raise APIError(body, code, status, message)
+        except urllib.error.URLError as e:
+            raise ConnectionError(e.reason)
+
+        return response
+
+    def get_system_info(self) -> SystemInfo:
+        """Get system info."""
+        resp = self._request('GET', '/v1/system-info')
+        return SystemInfo.from_dict(resp['result'])
+
+    def get_warnings(self, select: WarningState = WarningState.PENDING) -> typing.List[Warning]:
+        """Get list of warnings in given state (pending or all)."""
+        query = {'select': select.value}
+        resp = self._request('GET', '/v1/warnings', query)
+        return [Warning.from_dict(w) for w in resp['result']]
+
+    def ack_warnings(self, timestamp: datetime.datetime) -> int:
+        """Acknowledge warnings up to given timestamp, return number acknowledged."""
+        body = {'action': 'okay', 'timestamp': timestamp.isoformat()}
+        resp = self._request('POST', '/v1/warnings', body=body)
+        return resp['result']
+
+    def get_changes(
+        self, select: ChangeState = ChangeState.IN_PROGRESS, service: str = None,
+    ) -> typing.List[Change]:
+        """Get list of changes in given state, filter by service name if given."""
+        query = {'select': select.value}
+        if service is not None:
+            query['for'] = service
+        resp = self._request('GET', '/v1/changes', query)
+        return [Change.from_dict(c) for c in resp['result']]
+
+    def get_change(self, change_id: ChangeID) -> Change:
+        """Get single change by ID."""
+        resp = self._request('GET', '/v1/changes/{}'.format(change_id))
+        return Change.from_dict(resp['result'])
+
+    def abort_change(self, change_id: ChangeID) -> Change:
+        """Abort change with given ID."""
+        body = {'action': 'abort'}
+        resp = self._request('POST', '/v1/changes/{}'.format(change_id), body=body)
+        return Change.from_dict(resp['result'])
+
+    def autostart_services(self, timeout: float = 30.0, delay: float = 0.1) -> ChangeID:
+        """Start the startup-enabled services and wait (poll) for them to be started.
+
+        Raises ChangeError if one or more of the services didn't start. If
+        timeout is 0, submit the action but don't wait; just return the change
+        ID immediately.
+        """
+        return self._services_action('autostart', [], timeout, delay)
+
+    def start_services(
+        self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
+    ) -> ChangeID:
+        """Start services by name and wait (poll) for them to be started.
+
+        Raises ChangeError if one or more of the services didn't start. If
+        timeout is 0, submit the action but don't wait; just return the change
+        ID immediately.
+        """
+        return self._services_action('start', services, timeout, delay)
+
+    def stop_services(
+        self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
+    ) -> ChangeID:
+        """Stop services by name and wait (poll) for them to be started.
+
+        Raises ChangeError if one or more of the services didn't stop. If
+        timeout is 0, submit the action but don't wait; just return the change
+        ID immediately.
+        """
+        return self._services_action('stop', services, timeout, delay)
+
+    def _services_action(
+        self, action: str, services: typing.Iterable[str], timeout: float, delay: float,
+    ) -> ChangeID:
+        if not isinstance(services, (list, tuple)):
+            raise TypeError('services must be a list of str, not {}'.format(
+                type(services).__name__))
+        for s in services:
+            if not isinstance(s, str):
+                raise TypeError('service names must be str, not {}'.format(type(s).__name__))
+
+        body = {'action': action, 'services': services}
+        resp = self._request('POST', '/v1/services', body=body)
+        change_id = ChangeID(resp['change'])
+        if timeout:
+            change = self.wait_change(change_id, timeout=timeout, delay=delay)
+            if change.err:
+                raise ChangeError(change.err, change)
+        return change_id
+
+    def wait_change(
+        self, change_id: ChangeID, timeout: float = 30.0, delay: float = 0.1,
+    ) -> Change:
+        """Poll change every delay seconds (up to timeout) for it to be ready."""
+        deadline = time.time() + timeout
+
+        while time.time() < deadline:
+            change = self.get_change(change_id)
+            if change.ready:
+                return change
+
+            time.sleep(delay)
+
+        raise TimeoutError(
+            'timed out waiting for change {} ({} seconds)'.format(change_id, timeout))
+
+    def add_layer(
+            self, label: str, layer: typing.Union[str, dict, Layer], *, combine: bool = False):
+        """Dynamically add a new layer onto the Pebble configuration layers.
+
+        If combine is False (the default), append the new layer as the top
+        layer with the given label. If combine is True and the label already
+        exists, the two layers are combined into a single one considering the
+        layer override rules; if the layer doesn't exist, it is added as usual.
+        """
+        if not isinstance(label, str):
+            raise TypeError('label must be a str, not {}'.format(type(label).__name__))
+
+        if isinstance(layer, str):
+            layer_yaml = layer
+        elif isinstance(layer, dict):
+            layer_yaml = Layer(layer).to_yaml()
+        elif isinstance(layer, Layer):
+            layer_yaml = layer.to_yaml()
+        else:
+            raise TypeError('layer must be str, dict, or pebble.Layer, not {}'.format(
+                type(layer).__name__))
+
+        body = {
+            'action': 'add',
+            'combine': combine,
+            'label': label,
+            'format': 'yaml',
+            'layer': layer_yaml,
+        }
+        self._request('POST', '/v1/layers', body=body)
+
+    def get_plan(self) -> Plan:
+        """Get the Pebble plan (currently contains only combined services)."""
+        resp = self._request('GET', '/v1/plan', {'format': 'yaml'})
+        return Plan(resp['result'])
+
+    def get_services(self, names: typing.List[str] = None) -> typing.List[ServiceInfo]:
+        """Get the service status for the configured services.
+
+        If names is specified, only fetch the service status for the services
+        named.
+        """
+        query = None
+        if names is not None:
+            query = {'names': ','.join(names)}
+        resp = self._request('GET', '/v1/services', query)
+        return [ServiceInfo.from_dict(info) for info in resp['result']]
+
+    def pull(self, path: str, *, encoding: str = 'utf-8') -> typing.Union[typing.BinaryIO,
+                                                                          typing.TextIO]:
+        """Read a file's content from the remote system.
+
+        Args:
+            path: Path of the file to read from the remote system.
+            encoding: Encoding to use for decoding the file's bytes to str,
+                or None to specify no decoding.
+
+        Returns:
+            A readable file-like object, whose read() method will return str
+            objects decoded according to the specified encoding, or bytes if
+            encoding is None.
+        """
+        query = {
+            'action': 'read',
+            'path': path,
+        }
+        headers = {'Accept': 'multipart/form-data'}
+        response = self._request_raw('GET', '/v1/files', query, headers)
+
+        options = self._ensure_content_type(response.headers, 'multipart/form-data')
+        boundary = options.get('boundary', '')
+        if not boundary:
+            raise ProtocolError('invalid boundary {!r}'.format(boundary))
+
+        # We have to manually write the Content-Type with boundary, because
+        # email.parser expects the entire multipart message with headers.
+        parser = email.parser.BytesFeedParser()
+        parser.feed(b'Content-Type: multipart/form-data; boundary=' +
+                    boundary.encode('utf-8') + b'\r\n\r\n')
+
+        # Then read the rest of the response and feed it to the parser.
+        while True:
+            chunk = response.read(8192)
+            if not chunk:
+                break
+            parser.feed(chunk)
+        message = parser.close()
+
+        # Walk over the multipart parts and read content and metadata.
+        resp = None
+        content = None
+        for part in message.walk():
+            name = part.get_param('name', header='Content-Disposition')
+            if name == 'response':
+                resp = _json_loads(part.get_payload())
+            elif name == 'files':
+                filename = part.get_filename()
+                if filename != path:
+                    raise ProtocolError('path not expected: {}'.format(filename))
+                # decode=True, ironically, avoids decoding bytes to str
+                content = part.get_payload(decode=True)
+
+        if resp is None:
+            raise ProtocolError('no "response" field in multipart body')
+        self._raise_on_path_error(resp, path)
+
+        if content is None:
+            raise ProtocolError('no file content in multipart response')
+        if encoding is not None:
+            reader = io.StringIO(content.decode(encoding))
+        else:
+            reader = io.BytesIO(content)
+        return reader
+
+    @staticmethod
+    def _raise_on_path_error(resp, path):
+        result = resp['result'] or []  # in case it's null instead of []
+        paths = {item['path']: item for item in result}
+        if path not in paths:
+            raise ProtocolError('path not found in response metadata: {}'.format(resp))
+        error = paths[path].get('error')
+        if error:
+            raise PathError(error['kind'], error['message'])
+
+    def push(
+            self, path: str, source: typing.Union[bytes, str, typing.BinaryIO, typing.TextIO], *,
+            encoding: str = 'utf-8', make_dirs: bool = False, permissions: int = None,
+            user_id: int = None, user: str = None, group_id: int = None, group: str = None):
+        """Write content to a given file path on the remote system.
+
+        Args:
+            path: Path of the file to write to on the remote system.
+            source: Source of data to write. This is either a concrete str or
+                bytes instance, or a readable file-like object.
+            encoding: Encoding to use for encoding source str to bytes, or
+                strings read from source if it is a TextIO type. Ignored if
+                source is bytes or BinaryIO.
+            make_dirs: If True, create parent directories if they don't exist.
+            permissions: Permissions (mode) to create file with (Pebble default
+                is 0o644).
+            user_id: UID for file.
+            user: Username for file (user_id takes precedence).
+            group_id: GID for file.
+            group: Group name for file (group_id takes precedence).
+        """
+        info = self._make_auth_dict(permissions, user_id, user, group_id, group)
+        info['path'] = path
+        if make_dirs:
+            info['make-dirs'] = True
+        metadata = {
+            'action': 'write',
+            'files': [info],
+        }
+
+        multipart = MIMEMultipart('form-data')
+
+        part = MIMEBase('application', 'json')
+        part.add_header('Content-Disposition', 'form-data', name='request')
+        part.set_payload(json.dumps(metadata))
+        multipart.attach(part)
+
+        part = MIMEBase('application', 'octet-stream')
+        part.add_header('Content-Disposition', 'form-data', name='files', filename=path)
+        if hasattr(source, 'read'):
+            content = source.read()
+        else:
+            content = source
+        if isinstance(content, str):
+            content = content.encode(encoding)
+        part.set_payload(content)
+        multipart.attach(part)
+
+        data = multipart.as_bytes()  # must be called before accessing multipart['Content-Type']
+        headers = {
+            'Accept': 'application/json',
+            'Content-Type': multipart['Content-Type'],
+        }
+        response = self._request_raw('POST', '/v1/files', None, headers, data)
+        self._ensure_content_type(response.headers, 'application/json')
+        resp = _json_loads(response.read())
+        self._raise_on_path_error(resp, path)
+
+    @staticmethod
+    def _make_auth_dict(permissions, user_id, user, group_id, group) -> typing.Dict:
+        d = {}
+        if permissions is not None:
+            d['permissions'] = format(permissions, '03o')
+        if user_id is not None:
+            d['user-id'] = user_id
+        if user is not None:
+            d['user'] = user
+        if group_id is not None:
+            d['group-id'] = group_id
+        if group is not None:
+            d['group'] = group
+        return d
+
+    def list_files(self, path: str, *, pattern: str = None,
+                   itself: bool = False) -> typing.List[FileInfo]:
+        """Return list of file information from given path on remote system.
+
+        Args:
+            path: Path of the directory to list, or path of the file to return
+                information about.
+            pattern: If specified, filter the list to just the files that match,
+                for example "*.txt".
+            itself: If path refers to a directory, return information about the
+                directory itself, rather than its contents.
+        """
+        query = {
+            'action': 'list',
+            'path': path,
+        }
+        if pattern:
+            query['pattern'] = pattern
+        if itself:
+            query['itself'] = 'true'
+        resp = self._request('GET', '/v1/files', query)
+        result = resp['result'] or []  # in case it's null instead of []
+        return [FileInfo.from_dict(d) for d in result]
+
+    def make_dir(
+            self, path: str, *, make_parents: bool = False, permissions: int = None,
+            user_id: int = None, user: str = None, group_id: int = None, group: str = None):
+        """Create a directory on the remote system with the given attributes.
+
+        Args:
+            path: Path of the directory to create on the remote system.
+            make_parents: If True, create parent directories if they don't exist.
+            permissions: Permissions (mode) to create directory with (Pebble
+                default is 0o755).
+            user_id: UID for directory.
+            user: Username for directory (user_id takes precedence).
+            group_id: GID for directory.
+            group: Group name for directory (group_id takes precedence).
+        """
+        info = self._make_auth_dict(permissions, user_id, user, group_id, group)
+        info['path'] = path
+        if make_parents:
+            info['make-parents'] = True
+        body = {
+            'action': 'make-dirs',
+            'dirs': [info],
+        }
+        resp = self._request('POST', '/v1/files', None, body)
+        self._raise_on_path_error(resp, path)
+
+    def remove_path(self, path: str, *, recursive: bool = False):
+        """Remove a file or directory on the remote system.
+
+        Args:
+            path: Path of the file or directory to delete from the remote system.
+            recursive: If True, recursively delete path and everything under it.
+        """
+        info = {'path': path}
+        if recursive:
+            info['recursive'] = True
+        body = {
+            'action': 'remove',
+            'paths': [info],
+        }
+        resp = self._request('POST', '/v1/files', None, body)
+        self._raise_on_path_error(resp, path)
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/storage.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..562cde770bcc3b5961aa6086372f0a2529bbd317
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/storage.py
@@ -0,0 +1,374 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Structures to offer storage to the charm (through Juju or locally)."""
+
+from datetime import timedelta
+import pickle
+import shutil
+import subprocess
+import sqlite3
+import typing
+
+import yaml
+
+
+def _run(args, **kw):
+    cmd = shutil.which(args[0])
+    if cmd is None:
+        raise FileNotFoundError(args[0])
+    return subprocess.run([cmd, *args[1:]], **kw)
+
+
+class SQLiteStorage:
+    """Storage using SQLite backend."""
+
+    DB_LOCK_TIMEOUT = timedelta(hours=1)
+
+    def __init__(self, filename):
+        # The isolation_level argument is set to None such that the implicit
+        # transaction management behavior of the sqlite3 module is disabled.
+        self._db = sqlite3.connect(str(filename),
+                                   isolation_level=None,
+                                   timeout=self.DB_LOCK_TIMEOUT.total_seconds())
+        self._setup()
+
+    def _setup(self):
+        """Make the database ready to be used as storage."""
+        # Make sure that the database is locked until the connection is closed,
+        # not until the transaction ends.
+        self._db.execute("PRAGMA locking_mode=EXCLUSIVE")
+        c = self._db.execute("BEGIN")
+        c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'")
+        if c.fetchone()[0] == 0:
+            # Keep in mind what might happen if the process dies somewhere below.
+            # The system must not be rendered permanently broken by that.
+            self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)")
+            self._db.execute('''
+                CREATE TABLE notice (
+                  sequence INTEGER PRIMARY KEY AUTOINCREMENT,
+                  event_path TEXT,
+                  observer_path TEXT,
+                  method_name TEXT)
+                ''')
+            self._db.commit()
+
+    def close(self):
+        """Part of the Storage API, close the storage backend."""
+        self._db.close()
+
+    def commit(self):
+        """Part of the Storage API, commit latest changes in the storage backend."""
+        self._db.commit()
+
+    # There's commit but no rollback. For abort to be supported, we'll need logic that
+    # can rollback decisions made by third-party code in terms of the internal state
+    # of objects that have been snapshotted, and hooks to let them know about it and
+    # take the needed actions to undo their logic until the last snapshot.
+    # This is doable but will increase significantly the chances for mistakes.
+
+    def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None:
+        """Part of the Storage API, persist a snapshot data under the given handle.
+
+        Args:
+            handle_path: The string identifying the snapshot.
+            snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This
+            might be a dict/tuple/int, but must only contain 'simple' python types.
+        """
+        # Use pickle for serialization, so the value remains portable.
+        raw_data = pickle.dumps(snapshot_data)
+        self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data))
+
+    def load_snapshot(self, handle_path: str) -> typing.Any:
+        """Part of the Storage API, retrieve a snapshot that was previously saved.
+
+        Args:
+            handle_path: The string identifying the snapshot.
+
+        Raises:
+            NoSnapshotError: if there is no snapshot for the given handle_path.
+        """
+        c = self._db.cursor()
+        c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,))
+        row = c.fetchone()
+        if row:
+            return pickle.loads(row[0])
+        raise NoSnapshotError(handle_path)
+
+    def drop_snapshot(self, handle_path: str):
+        """Part of the Storage API, remove a snapshot that was previously saved.
+
+        Dropping a snapshot that doesn't exist is treated as a no-op.
+        """
+        self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,))
+
+    def list_snapshots(self) -> typing.Generator[str, None, None]:
+        """Return the name of all snapshots that are currently saved."""
+        c = self._db.cursor()
+        c.execute("SELECT handle FROM snapshot")
+        while True:
+            rows = c.fetchmany()
+            if not rows:
+                break
+            for row in rows:
+                yield row[0]
+
+    def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None:
+        """Part of the Storage API, record an notice (event and observer)."""
+        self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)',
+                         (event_path, observer_path, method_name))
+
+    def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None:
+        """Part of the Storage API, remove a notice that was previously recorded."""
+        self._db.execute('''
+            DELETE FROM notice
+             WHERE event_path=?
+               AND observer_path=?
+               AND method_name=?
+            ''', (event_path, observer_path, method_name))
+
+    def notices(self, event_path: str = None) ->\
+            typing.Generator[typing.Tuple[str, str, str], None, None]:
+        """Part of the Storage API, return all notices that begin with event_path.
+
+        Args:
+            event_path: If supplied, will only yield events that match event_path. If not
+                supplied (or None/'') will return all events.
+
+        Returns:
+            Iterable of (event_path, observer_path, method_name) tuples
+        """
+        if event_path:
+            c = self._db.execute('''
+                SELECT event_path, observer_path, method_name
+                  FROM notice
+                 WHERE event_path=?
+                 ORDER BY sequence
+                ''', (event_path,))
+        else:
+            c = self._db.execute('''
+                SELECT event_path, observer_path, method_name
+                  FROM notice
+                 ORDER BY sequence
+                ''')
+        while True:
+            rows = c.fetchmany()
+            if not rows:
+                break
+            for row in rows:
+                yield tuple(row)
+
+
+class JujuStorage:
+    """Storing the content tracked by the Framework in Juju.
+
+    This uses :class:`_JujuStorageBackend` to interact with state-get/state-set
+    as the way to store state for the framework and for components.
+    """
+
+    NOTICE_KEY = "#notices#"
+
+    def __init__(self, backend: '_JujuStorageBackend' = None):
+        self._backend = backend
+        if backend is None:
+            self._backend = _JujuStorageBackend()
+
+    def close(self):
+        """Part of the Storage API, close the storage backend.
+
+        Nothing to be done for Juju backend, as it's transactional.
+        """
+
+    def commit(self):
+        """Part of the Storage API, commit latest changes in the storage backend.
+
+        Nothing to be done for Juju backend, as it's transactional.
+        """
+
+    def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None:
+        """Part of the Storage API, persist a snapshot data under the given handle.
+
+        Args:
+            handle_path: The string identifying the snapshot.
+            snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This
+                might be a dict/tuple/int, but must only contain 'simple' python types.
+        """
+        self._backend.set(handle_path, snapshot_data)
+
+    def load_snapshot(self, handle_path):
+        """Part of the Storage API, retrieve a snapshot that was previously saved.
+
+        Args:
+            handle_path: The string identifying the snapshot.
+
+        Raises:
+            NoSnapshotError: if there is no snapshot for the given handle_path.
+        """
+        try:
+            content = self._backend.get(handle_path)
+        except KeyError:
+            raise NoSnapshotError(handle_path)
+        return content
+
+    def drop_snapshot(self, handle_path):
+        """Part of the Storage API, remove a snapshot that was previously saved.
+
+        Dropping a snapshot that doesn't exist is treated as a no-op.
+        """
+        self._backend.delete(handle_path)
+
+    def save_notice(self, event_path: str, observer_path: str, method_name: str):
+        """Part of the Storage API, record an notice (event and observer)."""
+        notice_list = self._load_notice_list()
+        notice_list.append([event_path, observer_path, method_name])
+        self._save_notice_list(notice_list)
+
+    def drop_notice(self, event_path: str, observer_path: str, method_name: str):
+        """Part of the Storage API, remove a notice that was previously recorded."""
+        notice_list = self._load_notice_list()
+        notice_list.remove([event_path, observer_path, method_name])
+        self._save_notice_list(notice_list)
+
+    def notices(self, event_path: str = None):
+        """Part of the Storage API, return all notices that begin with event_path.
+
+        Args:
+            event_path: If supplied, will only yield events that match event_path. If not
+                supplied (or None/'') will return all events.
+
+        Returns:
+            Iterable of (event_path, observer_path, method_name) tuples
+        """
+        notice_list = self._load_notice_list()
+        for row in notice_list:
+            if event_path and row[0] != event_path:
+                continue
+            yield tuple(row)
+
+    def _load_notice_list(self) -> typing.List[typing.Tuple[str]]:
+        """Load a notice list from current key.
+
+        Returns:
+            List of (event_path, observer_path, method_name) tuples; empty if no key or is None.
+        """
+        try:
+            notice_list = self._backend.get(self.NOTICE_KEY)
+        except KeyError:
+            return []
+        if notice_list is None:
+            return []
+        return notice_list
+
+    def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None:
+        """Save a notice list under current key.
+
+        Args:
+            notices: List of (event_path, observer_path, method_name) tuples.
+        """
+        self._backend.set(self.NOTICE_KEY, notices)
+
+
+class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)):
+    """Handle a couple basic python types.
+
+    yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one
+    that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just
+    subclass SafeLoader and add tuples back in.
+    """
+    # Taken from the example at:
+    # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml
+
+    construct_python_tuple = yaml.Loader.construct_python_tuple
+
+
+_SimpleLoader.add_constructor(
+    u'tag:yaml.org,2002:python/tuple',
+    _SimpleLoader.construct_python_tuple)
+
+
+class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)):
+    """Add types supported by 'marshal'.
+
+    YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So
+    we want to only support dumping out types that are safe to load.
+    """
+
+
+_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple
+_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple)
+
+
+def juju_backend_available() -> bool:
+    """Check if Juju state storage is available."""
+    p = shutil.which('state-get')
+    return p is not None
+
+
+class _JujuStorageBackend:
+    """Implements the interface from the Operator framework to Juju's state-get/set/etc."""
+
+    def set(self, key: str, value: typing.Any) -> None:
+        """Set a key to a given value.
+
+        Args:
+            key: The string key that will be used to find the value later
+            value: Arbitrary content that will be returned by get().
+
+        Raises:
+            CalledProcessError: if 'state-set' returns an error code.
+        """
+        # default_flow_style=None means that it can use Block for
+        # complex types (types that have nested types) but use flow
+        # for simple types (like an array). Not all versions of PyYAML
+        # have the same default style.
+        encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None)
+        content = yaml.dump(
+            {key: encoded_value}, encoding='utf8', default_style='|',
+            default_flow_style=False,
+            Dumper=_SimpleDumper)
+        _run(["state-set", "--file", "-"], input=content, check=True)
+
+    def get(self, key: str) -> typing.Any:
+        """Get the bytes value associated with a given key.
+
+        Args:
+            key: The string key that will be used to find the value
+        Raises:
+            CalledProcessError: if 'state-get' returns an error code.
+        """
+        # We don't capture stderr here so it can end up in debug logs.
+        p = _run(["state-get", key], stdout=subprocess.PIPE, check=True, universal_newlines=True)
+        if p.stdout == '' or p.stdout == '\n':
+            raise KeyError(key)
+        return yaml.load(p.stdout, Loader=_SimpleLoader)
+
+    def delete(self, key: str) -> None:
+        """Remove a key from being tracked.
+
+        Args:
+            key: The key to stop storing
+        Raises:
+            CalledProcessError: if 'state-delete' returns an error code.
+        """
+        _run(["state-delete", key], check=True)
+
+
+class NoSnapshotError(Exception):
+    """Exception to flag that there is no snapshot for the given handle_path."""
+
+    def __init__(self, handle_path):
+        self.handle_path = handle_path
+
+    def __str__(self):
+        return 'no snapshot data found for {} object'.format(self.handle_path)
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/testing.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a4048ef0e6e97f76a3693f428897a01229faabb
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/testing.py
@@ -0,0 +1,1111 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Infrastructure to build unittests for Charms using the Operator Framework."""
+
+import datetime
+import inspect
+import pathlib
+import random
+import tempfile
+import typing
+from contextlib import contextmanager
+from textwrap import dedent
+
+from ops import (
+    charm,
+    framework,
+    model,
+    pebble,
+    storage,
+)
+from ops._private import yaml
+
+
+# OptionalYAML is something like metadata.yaml or actions.yaml. You can
+# pass in a file-like object or the string directly.
+OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]]
+
+
+# noinspection PyProtectedMember
+class Harness:
+    """This class represents a way to build up the model that will drive a test suite.
+
+    The model that is created is from the viewpoint of the charm that you are testing.
+
+    Example::
+
+        harness = Harness(MyCharm)
+        # Do initial setup here
+        relation_id = harness.add_relation('db', 'postgresql')
+        # Now instantiate the charm to see events as the model changes
+        harness.begin()
+        harness.add_relation_unit(relation_id, 'postgresql/0')
+        harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+        # Check that charm has properly handled the relation_joined event for postgresql/0
+        self.assertEqual(harness.charm. ...)
+
+    Args:
+        charm_cls: The Charm class that you'll be testing.
+        meta: charm.CharmBase is a A string or file-like object containing the contents of
+            metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the
+            parent directory of the Charm, and if not found fall back to a trivial
+            'name: test-charm' metadata.
+        actions: A string or file-like object containing the contents of
+            actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the
+            parent directory of the Charm.
+        config: A string or file-like object containing the contents of
+            config.yaml. If not supplied, we will look for a 'config.yaml' file in the
+            parent directory of the Charm.
+    """
+
+    def __init__(
+            self,
+            charm_cls: typing.Type[charm.CharmBase],
+            *,
+            meta: OptionalYAML = None,
+            actions: OptionalYAML = None,
+            config: OptionalYAML = None):
+        self._charm_cls = charm_cls
+        self._charm = None
+        self._charm_dir = 'no-disk-path'  # this may be updated by _create_meta
+        self._meta = self._create_meta(meta, actions)
+        self._unit_name = self._meta.name + '/0'
+        self._framework = None
+        self._hooks_enabled = True
+        self._relation_id_counter = 0
+        self._backend = _TestingModelBackend(self._unit_name, self._meta)
+        self._model = model.Model(self._meta, self._backend)
+        self._storage = storage.SQLiteStorage(':memory:')
+        self._oci_resources = {}
+        self._framework = framework.Framework(
+            self._storage, self._charm_dir, self._meta, self._model)
+        self._update_config(key_values=self._load_config_defaults(config))
+
+    @property
+    def charm(self) -> charm.CharmBase:
+        """Return the instance of the charm class that was passed to __init__.
+
+        Note that the Charm is not instantiated until you have called
+        :meth:`.begin()`.
+        """
+        return self._charm
+
+    @property
+    def model(self) -> model.Model:
+        """Return the :class:`~ops.model.Model` that is being driven by this Harness."""
+        return self._model
+
+    @property
+    def framework(self) -> framework.Framework:
+        """Return the Framework that is being driven by this Harness."""
+        return self._framework
+
+    def begin(self) -> None:
+        """Instantiate the Charm and start handling events.
+
+        Before calling :meth:`begin`, there is no Charm instance, so changes to the Model won't
+        emit events. You must call :meth:`.begin` before :attr:`.charm` is valid.
+        """
+        if self._charm is not None:
+            raise RuntimeError('cannot call the begin method on the harness more than once')
+
+        # The Framework adds attributes to class objects for events, etc. As such, we can't re-use
+        # the original class against multiple Frameworks. So create a locally defined class
+        # and register it.
+        # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of
+        #       Class attributes which should clean up this ugliness. The API can stay the same
+        class TestEvents(self._charm_cls.on.__class__):
+            pass
+
+        TestEvents.__name__ = self._charm_cls.on.__class__.__name__
+
+        class TestCharm(self._charm_cls):
+            on = TestEvents()
+
+        # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo,
+        # rather than TestCharm has no attribute foo.
+        TestCharm.__name__ = self._charm_cls.__name__
+        self._charm = TestCharm(self._framework)
+
+    def begin_with_initial_hooks(self) -> None:
+        """Called when you want the Harness to fire the same hooks that Juju would fire at startup.
+
+        This triggers install, relation-created, config-changed, start, and any relation-joined
+        hooks. Based on what relations have been defined before you called begin().
+        Note that all of these are fired before returning control to the test suite, so if you
+        want to introspect what happens at each step, you need to fire them directly
+        (eg Charm.on.install.emit()).
+
+        To use this with all the normal hooks, you should instantiate the harness, setup any
+        relations that you want active when the charm starts, and then call this method.
+
+        Example::
+
+            harness = Harness(MyCharm)
+            # Do initial setup here
+            relation_id = harness.add_relation('db', 'postgresql')
+            harness.add_relation_unit(relation_id, 'postgresql/0')
+            harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+            harness.set_leader(True)
+            harness.update_config({'initial': 'config'})
+            harness.begin_with_initial_hooks()
+            # This will cause
+            # install, db-relation-created('postgresql'), leader-elected, config-changed, start
+            # db-relation-joined('postrgesql/0'), db-relation-changed('postgresql/0')
+            # To be fired.
+        """
+        self.begin()
+        # TODO: jam 2020-08-03 This should also handle storage-attached hooks once we have support
+        #  for dealing with storage.
+        self._charm.on.install.emit()
+        # Juju itself iterates what relation to fire based on a map[int]relation, so it doesn't
+        # guarantee a stable ordering between relation events. It *does* give a stable ordering
+        # of joined units for a given relation.
+        items = list(self._meta.relations.items())
+        random.shuffle(items)
+        this_app_name = self._meta.name
+        for relname, rel_meta in items:
+            if rel_meta.role == charm.RelationRole.peer:
+                # If the user has directly added a relation, leave it be, but otherwise ensure
+                # that peer relations are always established at before leader-elected.
+                rel_ids = self._backend._relation_ids_map.get(relname)
+                if rel_ids is None:
+                    self.add_relation(relname, self._meta.name)
+                else:
+                    random.shuffle(rel_ids)
+                    for rel_id in rel_ids:
+                        self._emit_relation_created(relname, rel_id, this_app_name)
+            else:
+                rel_ids = self._backend._relation_ids_map.get(relname, [])
+                random.shuffle(rel_ids)
+                for rel_id in rel_ids:
+                    app_name = self._backend._relation_app_and_units[rel_id]["app"]
+                    self._emit_relation_created(relname, rel_id, app_name)
+        if self._backend._is_leader:
+            self._charm.on.leader_elected.emit()
+        else:
+            self._charm.on.leader_settings_changed.emit()
+        self._charm.on.config_changed.emit()
+        self._charm.on.start.emit()
+        all_ids = list(self._backend._relation_names.items())
+        random.shuffle(all_ids)
+        for rel_id, rel_name in all_ids:
+            rel_app_and_units = self._backend._relation_app_and_units[rel_id]
+            app_name = rel_app_and_units["app"]
+            # Note: Juju *does* fire relation events for a given relation in the sorted order of
+            # the unit names. It also always fires relation-changed immediately after
+            # relation-joined for the same unit.
+            # Juju only fires relation-changed (app) if there is data for the related application
+            relation = self._model.get_relation(rel_name, rel_id)
+            if self._backend._relation_data[rel_id].get(app_name):
+                app = self._model.get_app(app_name)
+                self._charm.on[rel_name].relation_changed.emit(
+                    relation, app, None)
+            for unit_name in sorted(rel_app_and_units["units"]):
+                remote_unit = self._model.get_unit(unit_name)
+                self._charm.on[rel_name].relation_joined.emit(
+                    relation, remote_unit.app, remote_unit)
+                self._charm.on[rel_name].relation_changed.emit(
+                    relation, remote_unit.app, remote_unit)
+
+    def cleanup(self) -> None:
+        """Called by your test infrastructure to cleanup any temporary directories/files/etc.
+
+        Currently this only needs to be called if you test with resources. But it is reasonable
+        to always include a `testcase.addCleanup(harness.cleanup)` just in case.
+        """
+        self._backend._cleanup()
+
+    def _create_meta(self, charm_metadata, action_metadata):
+        """Create a CharmMeta object.
+
+        Handle the cases where a user doesn't supply explicit metadata snippets.
+        """
+        filename = inspect.getfile(self._charm_cls)
+        charm_dir = pathlib.Path(filename).parents[1]
+
+        if charm_metadata is None:
+            metadata_path = charm_dir / 'metadata.yaml'
+            if metadata_path.is_file():
+                charm_metadata = metadata_path.read_text()
+                self._charm_dir = charm_dir
+            else:
+                # The simplest of metadata that the framework can support
+                charm_metadata = 'name: test-charm'
+        elif isinstance(charm_metadata, str):
+            charm_metadata = dedent(charm_metadata)
+
+        if action_metadata is None:
+            actions_path = charm_dir / 'actions.yaml'
+            if actions_path.is_file():
+                action_metadata = actions_path.read_text()
+                self._charm_dir = charm_dir
+        elif isinstance(action_metadata, str):
+            action_metadata = dedent(action_metadata)
+
+        return charm.CharmMeta.from_yaml(charm_metadata, action_metadata)
+
+    def _load_config_defaults(self, charm_config):
+        """Load default values from config.yaml.
+
+        Handle the case where a user doesn't supply explicit config snippets.
+        """
+        filename = inspect.getfile(self._charm_cls)
+        charm_dir = pathlib.Path(filename).parents[1]
+
+        if charm_config is None:
+            config_path = charm_dir / 'config.yaml'
+            if config_path.is_file():
+                charm_config = config_path.read_text()
+                self._charm_dir = charm_dir
+            else:
+                # The simplest of config that the framework can support
+                charm_config = '{}'
+        elif isinstance(charm_config, str):
+            charm_config = dedent(charm_config)
+        charm_config = yaml.safe_load(charm_config)
+        charm_config = charm_config.get('options', {})
+        return {key: value['default'] for key, value in charm_config.items()
+                if 'default' in value}
+
+    def add_oci_resource(self, resource_name: str,
+                         contents: typing.Mapping[str, str] = None) -> None:
+        """Add oci resources to the backend.
+
+        This will register an oci resource and create a temporary file for processing metadata
+        about the resource. A default set of values will be used for all the file contents
+        unless a specific contents dict is provided.
+
+        Args:
+            resource_name: Name of the resource to add custom contents to.
+            contents: Optional custom dict to write for the named resource.
+        """
+        if not contents:
+            contents = {'registrypath': 'registrypath',
+                        'username': 'username',
+                        'password': 'password',
+                        }
+        if resource_name not in self._meta.resources.keys():
+            raise RuntimeError('Resource {} is not a defined resources'.format(resource_name))
+        if self._meta.resources[resource_name].type != "oci-image":
+            raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name))
+
+        as_yaml = yaml.safe_dump(contents)
+        self._backend._resources_map[resource_name] = ('contents.yaml', as_yaml)
+
+    def add_resource(self, resource_name: str, content: typing.AnyStr) -> None:
+        """Add content for a resource to the backend.
+
+        This will register the content, so that a call to `Model.resources.fetch(resource_name)`
+        will return a path to a file containing that content.
+
+        Args:
+            resource_name: The name of the resource being added
+            content: Either string or bytes content, which will be the content of the filename
+                returned by resource-get. If contents is a string, it will be encoded in utf-8
+        """
+        if resource_name not in self._meta.resources.keys():
+            raise RuntimeError('Resource {} is not a defined resources'.format(resource_name))
+        record = self._meta.resources[resource_name]
+        if record.type != "file":
+            raise RuntimeError(
+                'Resource {} is not a file, but actually {}'.format(resource_name, record.type))
+        filename = record.filename
+        if filename is None:
+            filename = resource_name
+
+        self._backend._resources_map[resource_name] = (filename, content)
+
+    def populate_oci_resources(self) -> None:
+        """Populate all OCI resources."""
+        for name, data in self._meta.resources.items():
+            if data.type == "oci-image":
+                self.add_oci_resource(name)
+
+    def disable_hooks(self) -> None:
+        """Stop emitting hook events when the model changes.
+
+        This can be used by developers to stop changes to the model from emitting events that
+        the charm will react to. Call :meth:`.enable_hooks`
+        to re-enable them.
+        """
+        self._hooks_enabled = False
+
+    def enable_hooks(self) -> None:
+        """Re-enable hook events from charm.on when the model is changed.
+
+        By default hook events are enabled once you call :meth:`.begin`,
+        but if you have used :meth:`.disable_hooks`, this can be used to
+        enable them again.
+        """
+        self._hooks_enabled = True
+
+    @contextmanager
+    def hooks_disabled(self):
+        """A context manager to run code with hooks disabled.
+
+        Example::
+
+            with harness.hooks_disabled():
+                # things in here don't fire events
+                harness.set_leader(True)
+                harness.update_config(unset=['foo', 'bar'])
+            # things here will again fire events
+        """
+        if self._hooks_enabled:
+            self.disable_hooks()
+            try:
+                yield None
+            finally:
+                self.enable_hooks()
+        else:
+            yield None
+
+    def _next_relation_id(self):
+        rel_id = self._relation_id_counter
+        self._relation_id_counter += 1
+        return rel_id
+
+    def add_relation(self, relation_name: str, remote_app: str) -> int:
+        """Declare that there is a new relation between this app and `remote_app`.
+
+        Args:
+            relation_name: The relation on Charm that is being related to
+            remote_app: The name of the application that is being related to
+
+        Return:
+            The relation_id created by this add_relation.
+        """
+        rel_id = self._next_relation_id()
+        self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id)
+        self._backend._relation_names[rel_id] = relation_name
+        self._backend._relation_list_map[rel_id] = []
+        self._backend._relation_data[rel_id] = {
+            remote_app: {},
+            self._backend.unit_name: {},
+            self._backend.app_name: {},
+        }
+        self._backend._relation_app_and_units[rel_id] = {
+            "app": remote_app,
+            "units": [],
+        }
+        # Reload the relation_ids list
+        if self._model is not None:
+            self._model.relations._invalidate(relation_name)
+        self._emit_relation_created(relation_name, rel_id, remote_app)
+        return rel_id
+
+    def _emit_relation_created(self, relation_name: str, relation_id: int,
+                               remote_app: str) -> None:
+        """Trigger relation-created for a given relation with a given remote application."""
+        if self._charm is None or not self._hooks_enabled:
+            return
+        if self._charm is None or not self._hooks_enabled:
+            return
+        relation = self._model.get_relation(relation_name, relation_id)
+        app = self._model.get_app(remote_app)
+        self._charm.on[relation_name].relation_created.emit(
+            relation, app)
+
+    def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None:
+        """Add a new unit to a relation.
+
+        Example::
+
+          rel_id = harness.add_relation('db', 'postgresql')
+          harness.add_relation_unit(rel_id, 'postgresql/0')
+
+        This will trigger a `relation_joined` event. This would naturally be
+        followed by a `relation_changed` event, which you can trigger with
+        :meth:`.update_relation_data`. This separation is artificial in the
+        sense that Juju will always fire the two, but is intended to make
+        testing relations and their data bags slightly more natural.
+
+        Args:
+            relation_id: The integer relation identifier (as returned by add_relation).
+            remote_unit_name: A string representing the remote unit that is being added.
+
+        Return:
+            None
+        """
+        self._backend._relation_list_map[relation_id].append(remote_unit_name)
+        self._backend._relation_data[relation_id][remote_unit_name] = {}
+        # TODO: jam 2020-08-03 This is where we could assert that the unit name matches the
+        #  application name (eg you don't have a relation to 'foo' but add units of 'bar/0'
+        self._backend._relation_app_and_units[relation_id]["units"].append(remote_unit_name)
+        relation_name = self._backend._relation_names[relation_id]
+        # Make sure that the Model reloads the relation_list for this relation_id, as well as
+        # reloading the relation data for this unit.
+        if self._model is not None:
+            remote_unit = self._model.get_unit(remote_unit_name)
+            relation = self._model.get_relation(relation_name, relation_id)
+            unit_cache = relation.data.get(remote_unit, None)
+            if unit_cache is not None:
+                unit_cache._invalidate()
+            self._model.relations._invalidate(relation_name)
+        if self._charm is None or not self._hooks_enabled:
+            return
+        self._charm.on[relation_name].relation_joined.emit(
+            relation, remote_unit.app, remote_unit)
+
+    def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping:
+        """Get the relation data bucket for a single app or unit in a given relation.
+
+        This ignores all of the safety checks of who can and can't see data in relations (eg,
+        non-leaders can't read their own application's relation data because there are no events
+        that keep that data up-to-date for the unit).
+
+        Args:
+            relation_id: The relation whose content we want to look at.
+            app_or_unit: The name of the application or unit whose data we want to read
+        Return:
+            A dict containing the relation data for `app_or_unit` or None.
+
+        Raises:
+            KeyError: if relation_id doesn't exist
+        """
+        return self._backend._relation_data[relation_id].get(app_or_unit, None)
+
+    def get_pod_spec(self) -> (typing.Mapping, typing.Mapping):
+        """Return the content of the pod spec as last set by the charm.
+
+        This returns both the pod spec and any k8s_resources that were supplied.
+        See the signature of Model.pod.set_spec
+        """
+        return self._backend._pod_spec
+
+    def get_container_pebble_plan(
+            self, container_name: str
+    ) -> pebble.Plan:
+        """Return the current Plan that pebble is executing for the given container.
+
+        Args:
+            container_name: The simple name of the associated container
+        Return:
+            The pebble.Plan for this container. You can use :meth:`pebble.Plan.to_yaml` to get
+            a string form for the content. Will raise KeyError if no pebble client exists
+            for that container name. (should only happen if container is not present in
+            metadata.yaml)
+        """
+        socket_path = '/charm/containers/{}/pebble.socket'.format(container_name)
+        client = self._backend._pebble_clients.get(socket_path)
+        if client is None:
+            raise KeyError('no known pebble client for container "{}"'.format(container_name))
+        return client.get_plan()
+
+    def get_workload_version(self) -> str:
+        """Read the workload version that was set by the unit."""
+        return self._backend._workload_version
+
+    def set_model_name(self, name: str) -> None:
+        """Set the name of the Model that this is representing.
+
+        This cannot be called once begin() has been called. But it lets you set the value that
+        will be returned by Model.name.
+        """
+        if self._charm is not None:
+            raise RuntimeError('cannot set the Model name after begin()')
+        self._backend.model_name = name
+
+    def update_relation_data(
+            self,
+            relation_id: int,
+            app_or_unit: str,
+            key_values: typing.Mapping,
+    ) -> None:
+        """Update the relation data for a given unit or application in a given relation.
+
+        This also triggers the `relation_changed` event for this relation_id.
+
+        Args:
+            relation_id: The integer relation_id representing this relation.
+            app_or_unit: The unit or application name that is being updated.
+                This can be the local or remote application.
+            key_values: Each key/value will be updated in the relation data.
+        """
+        relation_name = self._backend._relation_names[relation_id]
+        relation = self._model.get_relation(relation_name, relation_id)
+        if '/' in app_or_unit:
+            entity = self._model.get_unit(app_or_unit)
+        else:
+            entity = self._model.get_app(app_or_unit)
+        rel_data = relation.data.get(entity, None)
+        if rel_data is not None:
+            # rel_data may have cached now-stale data, so _invalidate() it.
+            # Note, this won't cause the data to be loaded if it wasn't already.
+            rel_data._invalidate()
+
+        new_values = self._backend._relation_data[relation_id][app_or_unit].copy()
+        for k, v in key_values.items():
+            if v == '':
+                new_values.pop(k, None)
+            else:
+                new_values[k] = v
+        self._backend._relation_data[relation_id][app_or_unit] = new_values
+
+        if app_or_unit == self._model.unit.name:
+            # No events for our own unit
+            return
+        if app_or_unit == self._model.app.name:
+            # updating our own app only generates an event if it is a peer relation and we
+            # aren't the leader
+            is_peer = self._meta.relations[relation_name].role.is_peer()
+            if not is_peer:
+                return
+            if self._model.unit.is_leader():
+                return
+        self._emit_relation_changed(relation_id, app_or_unit)
+
+    def _emit_relation_changed(self, relation_id, app_or_unit):
+        if self._charm is None or not self._hooks_enabled:
+            return
+        rel_name = self._backend._relation_names[relation_id]
+        relation = self.model.get_relation(rel_name, relation_id)
+        if '/' in app_or_unit:
+            app_name = app_or_unit.split('/')[0]
+            unit_name = app_or_unit
+            app = self.model.get_app(app_name)
+            unit = self.model.get_unit(unit_name)
+            args = (relation, app, unit)
+        else:
+            app_name = app_or_unit
+            app = self.model.get_app(app_name)
+            args = (relation, app)
+        self._charm.on[rel_name].relation_changed.emit(*args)
+
+    def _update_config(
+            self,
+            key_values: typing.Mapping[str, str] = None,
+            unset: typing.Iterable[str] = (),
+    ) -> None:
+        """Update the config as seen by the charm.
+
+        This will *not* trigger a `config_changed` event, and is intended for internal use.
+
+        Note that the `key_values` mapping will only add or update configuration items.
+        To remove existing ones, see the `unset` parameter.
+
+        Args:
+            key_values: A Mapping of key:value pairs to update in config.
+            unset: An iterable of keys to remove from Config. (Note that this does
+                not currently reset the config values to the default defined in config.yaml.)
+        """
+        # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config
+        # is a LazyMapping, but its _load returns a dict and this method mutates
+        # the dict that Config is caching. Arguably we should be doing some sort
+        # of charm.framework.model.config._invalidate()
+        config = self._backend._config
+        if key_values is not None:
+            for key, value in key_values.items():
+                config[key] = value
+        for key in unset:
+            config.pop(key, None)
+
+    def update_config(
+            self,
+            key_values: typing.Mapping[str, str] = None,
+            unset: typing.Iterable[str] = (),
+    ) -> None:
+        """Update the config as seen by the charm.
+
+        This will trigger a `config_changed` event.
+
+        Note that the `key_values` mapping will only add or update configuration items.
+        To remove existing ones, see the `unset` parameter.
+
+        Args:
+            key_values: A Mapping of key:value pairs to update in config.
+            unset: An iterable of keys to remove from Config. (Note that this does
+                not currently reset the config values to the default defined in config.yaml.)
+        """
+        self._update_config(key_values, unset)
+        if self._charm is None or not self._hooks_enabled:
+            return
+        self._charm.on.config_changed.emit()
+
+    def set_leader(self, is_leader: bool = True) -> None:
+        """Set whether this unit is the leader or not.
+
+        If this charm becomes a leader then `leader_elected` will be triggered.
+
+        Args:
+            is_leader: True/False as to whether this unit is the leader.
+        """
+        was_leader = self._backend._is_leader
+        self._backend._is_leader = is_leader
+        # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in
+        # the Model objects, so this automatically gets noticed.
+        if is_leader and not was_leader and self._charm is not None and self._hooks_enabled:
+            self._charm.on.leader_elected.emit()
+
+    def _get_backend_calls(self, reset: bool = True) -> list:
+        """Return the calls that we have made to the TestingModelBackend.
+
+        This is useful mostly for testing the framework itself, so that we can assert that we
+        do/don't trigger extra calls.
+
+        Args:
+            reset: If True, reset the calls list back to empty, if false, the call list is
+                preserved.
+
+        Return:
+            ``[(call1, args...), (call2, args...)]``
+        """
+        calls = self._backend._calls.copy()
+        if reset:
+            self._backend._calls.clear()
+        return calls
+
+
+def _record_calls(cls):
+    """Replace methods on cls with methods that record that they have been called.
+
+    Iterate all attributes of cls, and for public methods, replace them with a wrapped method
+    that records the method called along with the arguments and keyword arguments.
+    """
+    for meth_name, orig_method in cls.__dict__.items():
+        if meth_name.startswith('_'):
+            continue
+
+        def decorator(orig_method):
+            def wrapped(self, *args, **kwargs):
+                full_args = (orig_method.__name__,) + args
+                if kwargs:
+                    full_args = full_args + (kwargs,)
+                self._calls.append(full_args)
+                return orig_method(self, *args, **kwargs)
+            return wrapped
+
+        setattr(cls, meth_name, decorator(orig_method))
+    return cls
+
+
+def _copy_docstrings(source_cls):
+    """Copy the docstrings from source_cls to target_cls.
+
+    Use this as:
+      @_copy_docstrings(source_class)
+      class TargetClass:
+
+    And for any public method that exists on both classes, it will copy the
+    __doc__ for that method.
+    """
+    def decorator(target_cls):
+        for meth_name, orig_method in target_cls.__dict__.items():
+            if meth_name.startswith('_'):
+                continue
+            source_method = source_cls.__dict__.get(meth_name)
+            if source_method is not None and source_method.__doc__:
+                target_cls.__dict__[meth_name].__doc__ = source_method.__doc__
+        return target_cls
+    return decorator
+
+
+class _ResourceEntry:
+    """Tracks the contents of a Resource."""
+
+    def __init__(self, resource_name):
+        self.name = resource_name
+
+
+@_copy_docstrings(model._ModelBackend)
+@_record_calls
+class _TestingModelBackend:
+    """This conforms to the interface for ModelBackend but provides canned data.
+
+    DO NOT use this class directly, it is used by `Harness`_ to drive the model.
+    `Harness`_ is responsible for maintaining the internal consistency of the values here,
+    as the only public methods of this type are for implementing ModelBackend.
+    """
+
+    def __init__(self, unit_name, meta):
+        self.unit_name = unit_name
+        self.app_name = self.unit_name.split('/')[0]
+        self.model_name = None
+        self._calls = []
+        self._meta = meta
+        self._is_leader = None
+        self._relation_ids_map = {}  # relation name to [relation_ids,...]
+        self._relation_names = {}  # reverse map from relation_id to relation_name
+        self._relation_list_map = {}  # relation_id: [unit_name,...]
+        self._relation_data = {}  # {relation_id: {name: data}}
+        # {relation_id: {"app": app_name, "units": ["app/0",...]}
+        self._relation_app_and_units = {}
+        self._config = {}
+        self._is_leader = False
+        self._resources_map = {}  # {resource_name: resource_content}
+        self._pod_spec = None
+        self._app_status = {'status': 'unknown', 'message': ''}
+        self._unit_status = {'status': 'maintenance', 'message': ''}
+        self._workload_version = None
+        self._resource_dir = None
+        # {socket_path : _TestingPebbleClient}
+        # socket_path = '/charm/containers/{container_name}/pebble.socket'
+        self._pebble_clients = {}  # type: {str: _TestingPebbleClient}
+
+    def _cleanup(self):
+        if self._resource_dir is not None:
+            self._resource_dir.cleanup()
+            self._resource_dir = None
+
+    def _get_resource_dir(self) -> pathlib.Path:
+        if self._resource_dir is None:
+            # In actual Juju, the resource path for a charm's resource is
+            # $AGENT_DIR/resources/$RESOURCE_NAME/$RESOURCE_FILENAME
+            # However, charms shouldn't depend on this.
+            self._resource_dir = tempfile.TemporaryDirectory(prefix='tmp-ops-test-resource-')
+        return pathlib.Path(self._resource_dir.name)
+
+    def relation_ids(self, relation_name):
+        try:
+            return self._relation_ids_map[relation_name]
+        except KeyError as e:
+            if relation_name not in self._meta.relations:
+                raise model.ModelError('{} is not a known relation'.format(relation_name)) from e
+            return []
+
+    def relation_list(self, relation_id):
+        try:
+            return self._relation_list_map[relation_id]
+        except KeyError as e:
+            raise model.RelationNotFoundError from e
+
+    def relation_remote_app_name(self, relation_id: int) -> typing.Optional[str]:
+        if relation_id not in self._relation_app_and_units:
+            # Non-existent or dead relation
+            return None
+        return self._relation_app_and_units[relation_id]['app']
+
+    def relation_get(self, relation_id, member_name, is_app):
+        if is_app and '/' in member_name:
+            member_name = member_name.split('/')[0]
+        if relation_id not in self._relation_data:
+            raise model.RelationNotFoundError()
+        return self._relation_data[relation_id][member_name].copy()
+
+    def relation_set(self, relation_id, key, value, is_app):
+        relation = self._relation_data[relation_id]
+        if is_app:
+            bucket_key = self.app_name
+        else:
+            bucket_key = self.unit_name
+        if bucket_key not in relation:
+            relation[bucket_key] = {}
+        bucket = relation[bucket_key]
+        if value == '':
+            bucket.pop(key, None)
+        else:
+            bucket[key] = value
+
+    def config_get(self):
+        return self._config
+
+    def is_leader(self):
+        return self._is_leader
+
+    def application_version_set(self, version):
+        self._workload_version = version
+
+    def resource_get(self, resource_name):
+        if resource_name not in self._resources_map:
+            raise model.ModelError(
+                "ERROR could not download resource: HTTP request failed: "
+                "Get https://.../units/unit-{}/resources/{}: resource#{}/{} not found".format(
+                    self.unit_name.replace('/', '-'), resource_name, self.app_name, resource_name
+                ))
+        filename, contents = self._resources_map[resource_name]
+        resource_dir = self._get_resource_dir()
+        resource_filename = resource_dir / resource_name / filename
+        if not resource_filename.exists():
+            if isinstance(contents, bytes):
+                mode = 'wb'
+            else:
+                mode = 'wt'
+            resource_filename.parent.mkdir(exist_ok=True)
+            with resource_filename.open(mode=mode) as resource_file:
+                resource_file.write(contents)
+        return resource_filename
+
+    def pod_spec_set(self, spec, k8s_resources):
+        self._pod_spec = (spec, k8s_resources)
+
+    def status_get(self, *, is_app=False):
+        if is_app:
+            return self._app_status
+        else:
+            return self._unit_status
+
+    def status_set(self, status, message='', *, is_app=False):
+        if is_app:
+            self._app_status = {'status': status, 'message': message}
+        else:
+            self._unit_status = {'status': status, 'message': message}
+
+    def storage_list(self, name):
+        raise NotImplementedError(self.storage_list)
+
+    def storage_get(self, storage_name_id, attribute):
+        raise NotImplementedError(self.storage_get)
+
+    def storage_add(self, name, count=1):
+        raise NotImplementedError(self.storage_add)
+
+    def action_get(self):
+        raise NotImplementedError(self.action_get)
+
+    def action_set(self, results):
+        raise NotImplementedError(self.action_set)
+
+    def action_log(self, message):
+        raise NotImplementedError(self.action_log)
+
+    def action_fail(self, message=''):
+        raise NotImplementedError(self.action_fail)
+
+    def network_get(self, endpoint_name, relation_id=None):
+        raise NotImplementedError(self.network_get)
+
+    def add_metrics(self, metrics, labels=None):
+        raise NotImplementedError(self.add_metrics)
+
+    def juju_log(self, level, msg):
+        raise NotImplementedError(self.juju_log)
+
+    def get_pebble(self, socket_path: str):
+        client = self._pebble_clients.get(socket_path, None)
+        if client is None:
+            client = _TestingPebbleClient(self)
+            self._pebble_clients[socket_path] = client
+        return client
+
+
+@_copy_docstrings(pebble.Client)
+class _TestingPebbleClient:
+    """This conforms to the interface for pebble.Client but provides canned data.
+
+    DO NOT use this class directly, it is used by `Harness`_ to run interactions with Pebble.
+    `Harness`_ is responsible for maintaining the internal consistency of the values here,
+    as the only public methods of this type are for implementing Client.
+    """
+
+    def __init__(self, backend: _TestingModelBackend):
+        self._backend = _TestingModelBackend
+        self._layers = {}
+        # Has a service been started/stopped?
+        self._service_status = {}
+
+    def get_system_info(self) -> pebble.SystemInfo:
+        raise NotImplementedError(self.get_system_info)
+
+    def get_warnings(
+            self, select: pebble.WarningState = pebble.WarningState.PENDING,
+    ) -> typing.List['pebble.Warning']:
+        raise NotImplementedError(self.get_warnings)
+
+    def ack_warnings(self, timestamp: datetime.datetime) -> int:
+        raise NotImplementedError(self.ack_warnings)
+
+    def get_changes(
+            self, select: pebble.ChangeState = pebble.ChangeState.IN_PROGRESS, service: str = None,
+    ) -> typing.List[pebble.Change]:
+        raise NotImplementedError(self.get_changes)
+
+    def get_change(self, change_id: pebble.ChangeID) -> pebble.Change:
+        raise NotImplementedError(self.get_change)
+
+    def abort_change(self, change_id: pebble.ChangeID) -> pebble.Change:
+        raise NotImplementedError(self.abort_change)
+
+    def autostart_services(self, timeout: float = 30.0, delay: float = 0.1) -> pebble.ChangeID:
+        for name, service in self._render_services().items():
+            # TODO: jam 2021-04-20 This feels awkward that Service.startup might be a string or
+            #  might be an enum. Probably should make Service.startup a property rather than an
+            #  attribute.
+            if service.startup == '':
+                startup = pebble.ServiceStartup.DISABLED
+            else:
+                startup = pebble.ServiceStartup(service.startup)
+            if startup == pebble.ServiceStartup.ENABLED:
+                self._service_status[name] = pebble.ServiceStatus.ACTIVE
+
+    def start_services(
+            self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
+    ) -> pebble.ChangeID:
+        # A common mistake is to pass just the name of a service, rather than a list of services,
+        # so trap that so it is caught quickly.
+        if isinstance(services, str):
+            raise TypeError('start_services should take a list of names, not just "{}"'.format(
+                services))
+
+        # Note: jam 2021-04-20 We don't implement ChangeID, but the default caller of this is
+        # Container.start() which currently ignores the return value
+        known_services = self._render_services()
+        # Names appear to be validated before any are activated, so do two passes
+        for name in services:
+            if name not in known_services:
+                # TODO: jam 2021-04-20 This needs a better error type
+                raise RuntimeError('400 Bad Request: service "{}" does not exist'.format(name))
+            current = self._service_status.get(name, pebble.ServiceStatus.INACTIVE)
+            if current == pebble.ServiceStatus.ACTIVE:
+                # TODO: jam 2021-04-20 I believe pebble actually validates all the service names
+                #  can be started before starting any, and gives a list of things that couldn't
+                #  be done, but this is good enough for now
+                raise pebble.ChangeError('''\
+cannot perform the following tasks:
+- Start service "{}" (service "{}" was previously started)
+'''.format(name, name), change=1234)  # the change id is not respected
+        for name in services:
+            # If you try to start a service which is started, you get a ChangeError:
+            # $ PYTHONPATH=. python3 ./test/pebble_cli.py start serv
+            # ChangeError: cannot perform the following tasks:
+            # - Start service "serv" (service "serv" was previously started)
+            self._service_status[name] = pebble.ServiceStatus.ACTIVE
+
+    def stop_services(
+            self, services: typing.List[str], timeout: float = 30.0, delay: float = 0.1,
+    ) -> pebble.ChangeID:
+        # handle a common mistake of passing just a name rather than a list of names
+        if isinstance(services, str):
+            raise TypeError('stop_services should take a list of names, not just "{}"'.format(
+                services))
+        # TODO: handle invalid names
+        # Note: jam 2021-04-20 We don't implement ChangeID, but the default caller of this is
+        # Container.stop() which currently ignores the return value
+        known_services = self._render_services()
+        for name in services:
+            if name not in known_services:
+                # TODO: jam 2021-04-20 This needs a better error type
+                #  400 Bad Request: service "bal" does not exist
+                raise RuntimeError('400 Bad Request: service "{}" does not exist'.format(name))
+            current = self._service_status.get(name, pebble.ServiceStatus.INACTIVE)
+            if current != pebble.ServiceStatus.ACTIVE:
+                # TODO: jam 2021-04-20 I believe pebble actually validates all the service names
+                #  can be started before starting any, and gives a list of things that couldn't
+                #  be done, but this is good enough for now
+                raise pebble.ChangeError('''\
+ChangeError: cannot perform the following tasks:
+- Stop service "{}" (service "{}" is not active)
+'''.format(name, name), change=1234)  # the change id is not respected
+        for name in services:
+            self._service_status[name] = pebble.ServiceStatus.INACTIVE
+
+    def wait_change(
+            self, change_id: pebble.ChangeID, timeout: float = 30.0, delay: float = 0.1,
+    ) -> pebble.Change:
+        raise NotImplementedError(self.wait_change)
+
+    def add_layer(
+            self, label: str, layer: typing.Union[str, dict, pebble.Layer], *,
+            combine: bool = False):
+        # I wish we could combine some of this helpful object corralling with the actual backend,
+        # rather than having to re-implement it. Maybe we could subclass
+        if not isinstance(label, str):
+            raise TypeError('label must be a str, not {}'.format(type(label).__name__))
+
+        if isinstance(layer, (str, dict)):
+            layer_obj = pebble.Layer(layer)
+        elif isinstance(layer, pebble.Layer):
+            layer_obj = layer
+        else:
+            raise TypeError('layer must be str, dict, or pebble.Layer, not {}'.format(
+                type(layer).__name__))
+        if label in self._layers:
+            # TODO: jam 2021-04-19 These should not be RuntimeErrors but should be proper error
+            #  types. https://github.com/canonical/operator/issues/514
+            if not combine:
+                raise RuntimeError('400 Bad Request: layer "{}" already exists'.format(label))
+            layer = self._layers[label]
+            for name, service in layer_obj.services.items():
+                # 'override' is actually single quoted in the real error, but
+                # it shouldn't be, hopefully that gets cleaned up.
+                if not service.override:
+                    raise RuntimeError('500 Internal Server Error: layer "{}" must define'
+                                       '"override" for service "{}"'.format(label, name))
+                if service.override not in ('merge', 'replace'):
+                    raise RuntimeError('500 Internal Server Error: layer "{}" has invalid '
+                                       '"override" value on service "{}"'.format(label, name))
+                if service.override != 'replace':
+                    raise RuntimeError(
+                        'override: "{}" unsupported for layer "{}" service "{}"'.format(
+                            service.override, label, name))
+                layer.services[name] = service
+        else:
+            self._layers[label] = layer_obj
+
+    def _render_services(self) -> typing.Mapping[str, pebble.Service]:
+        services = {}
+        for key in sorted(self._layers.keys()):
+            layer = self._layers[key]
+            for name, service in layer.services.items():
+                # TODO: (jam) 2021-04-07 have a way to merge existing services
+                services[name] = service
+        return services
+
+    def get_plan(self) -> pebble.Plan:
+        plan = pebble.Plan('{}')
+        services = self._render_services()
+        if not services:
+            return plan
+        for name in sorted(services.keys()):
+            plan.services[name] = services[name]
+        return plan
+
+    def get_services(self, names: typing.List[str] = None) -> typing.List[pebble.ServiceInfo]:
+        if isinstance(names, str):
+            raise TypeError('start_services should take a list of names, not just "{}"'.format(
+                names))
+        services = self._render_services()
+        infos = []
+        if names is None:
+            names = sorted(services.keys())
+        for name in sorted(names):
+            try:
+                service = services[name]
+            except KeyError:
+                # in pebble, it just returns "nothing matched" if there are 0 matches,
+                # but it ignores services it doesn't recognize
+                continue
+            status = self._service_status.get(name, pebble.ServiceStatus.INACTIVE)
+            if service.startup == '':
+                startup = pebble.ServiceStartup.DISABLED
+            else:
+                startup = pebble.ServiceStartup(service.startup)
+            info = pebble.ServiceInfo(name,
+                                      startup=startup,
+                                      current=pebble.ServiceStatus(status))
+            infos.append(info)
+        return infos
+
+    def pull(self, path: str, *, encoding: str = 'utf-8') -> typing.Union[typing.BinaryIO,
+                                                                          typing.TextIO]:
+        raise NotImplementedError(self.pull)
+
+    def push(
+            self, path: str, source: typing.Union[bytes, str, typing.BinaryIO, typing.TextIO], *,
+            encoding: str = 'utf-8', make_dirs: bool = False, permissions: int = None,
+            user_id: int = None, user: str = None, group_id: int = None, group: str = None):
+        raise NotImplementedError(self.push)
+
+    def list_files(self, path: str, *, pattern: str = None,
+                   itself: bool = False) -> typing.List[pebble.FileInfo]:
+        raise NotImplementedError(self.list_files)
+
+    def make_dir(
+            self, path: str, *, make_parents: bool = False, permissions: int = None,
+            user_id: int = None, user: str = None, group_id: int = None, group: str = None):
+        raise NotImplementedError(self.make_dir)
+
+    def remove_path(self, path: str, *, recursive: bool = False):
+        raise NotImplementedError(self.remove_path)
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/version.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..e480b2a0662aa7df67b8dff32b55e1425a062e12
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/ops/version.py
@@ -0,0 +1,3 @@
+# this is a generated file
+
+version = '1.2.0'
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/__init__.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..86d07b5525d10bf1d543be0e1f5d01af897a4b49
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/__init__.py
@@ -0,0 +1,427 @@
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '5.4.1'
+try:
+    from .cyaml import *
+    __with_libyaml__ = True
+except ImportError:
+    __with_libyaml__ = False
+
+import io
+
+#------------------------------------------------------------------------------
+# Warnings control
+#------------------------------------------------------------------------------
+
+# 'Global' warnings state:
+_warnings_enabled = {
+    'YAMLLoadWarning': True,
+}
+
+# Get or set global warnings' state
+def warnings(settings=None):
+    if settings is None:
+        return _warnings_enabled
+
+    if type(settings) is dict:
+        for key in settings:
+            if key in _warnings_enabled:
+                _warnings_enabled[key] = settings[key]
+
+# Warn when load() is called without Loader=...
+class YAMLLoadWarning(RuntimeWarning):
+    pass
+
+def load_warning(method):
+    if _warnings_enabled['YAMLLoadWarning'] is False:
+        return
+
+    import warnings
+
+    message = (
+        "calling yaml.%s() without Loader=... is deprecated, as the "
+        "default Loader is unsafe. Please read "
+        "https://msg.pyyaml.org/load for full details."
+    ) % method
+
+    warnings.warn(message, YAMLLoadWarning, stacklevel=3)
+
+#------------------------------------------------------------------------------
+def scan(stream, Loader=Loader):
+    """
+    Scan a YAML stream and produce scanning tokens.
+    """
+    loader = Loader(stream)
+    try:
+        while loader.check_token():
+            yield loader.get_token()
+    finally:
+        loader.dispose()
+
+def parse(stream, Loader=Loader):
+    """
+    Parse a YAML stream and produce parsing events.
+    """
+    loader = Loader(stream)
+    try:
+        while loader.check_event():
+            yield loader.get_event()
+    finally:
+        loader.dispose()
+
+def compose(stream, Loader=Loader):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding representation tree.
+    """
+    loader = Loader(stream)
+    try:
+        return loader.get_single_node()
+    finally:
+        loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding representation trees.
+    """
+    loader = Loader(stream)
+    try:
+        while loader.check_node():
+            yield loader.get_node()
+    finally:
+        loader.dispose()
+
+def load(stream, Loader=None):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+    """
+    if Loader is None:
+        load_warning('load')
+        Loader = FullLoader
+
+    loader = Loader(stream)
+    try:
+        return loader.get_single_data()
+    finally:
+        loader.dispose()
+
+def load_all(stream, Loader=None):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+    """
+    if Loader is None:
+        load_warning('load_all')
+        Loader = FullLoader
+
+    loader = Loader(stream)
+    try:
+        while loader.check_data():
+            yield loader.get_data()
+    finally:
+        loader.dispose()
+
+def full_load(stream):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+
+    Resolve all tags except those known to be
+    unsafe on untrusted input.
+    """
+    return load(stream, FullLoader)
+
+def full_load_all(stream):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+
+    Resolve all tags except those known to be
+    unsafe on untrusted input.
+    """
+    return load_all(stream, FullLoader)
+
+def safe_load(stream):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+
+    Resolve only basic YAML tags. This is known
+    to be safe for untrusted input.
+    """
+    return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+
+    Resolve only basic YAML tags. This is known
+    to be safe for untrusted input.
+    """
+    return load_all(stream, SafeLoader)
+
+def unsafe_load(stream):
+    """
+    Parse the first YAML document in a stream
+    and produce the corresponding Python object.
+
+    Resolve all tags, even those known to be
+    unsafe on untrusted input.
+    """
+    return load(stream, UnsafeLoader)
+
+def unsafe_load_all(stream):
+    """
+    Parse all YAML documents in a stream
+    and produce corresponding Python objects.
+
+    Resolve all tags, even those known to be
+    unsafe on untrusted input.
+    """
+    return load_all(stream, UnsafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+        canonical=None, indent=None, width=None,
+        allow_unicode=None, line_break=None):
+    """
+    Emit YAML parsing events into a stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        stream = io.StringIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+            allow_unicode=allow_unicode, line_break=line_break)
+    try:
+        for event in events:
+            dumper.emit(event)
+    finally:
+        dumper.dispose()
+    if getvalue:
+        return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+        canonical=None, indent=None, width=None,
+        allow_unicode=None, line_break=None,
+        encoding=None, explicit_start=None, explicit_end=None,
+        version=None, tags=None):
+    """
+    Serialize a sequence of representation trees into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        if encoding is None:
+            stream = io.StringIO()
+        else:
+            stream = io.BytesIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+            allow_unicode=allow_unicode, line_break=line_break,
+            encoding=encoding, version=version, tags=tags,
+            explicit_start=explicit_start, explicit_end=explicit_end)
+    try:
+        dumper.open()
+        for node in nodes:
+            dumper.serialize(node)
+        dumper.close()
+    finally:
+        dumper.dispose()
+    if getvalue:
+        return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+    """
+    Serialize a representation tree into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+        default_style=None, default_flow_style=False,
+        canonical=None, indent=None, width=None,
+        allow_unicode=None, line_break=None,
+        encoding=None, explicit_start=None, explicit_end=None,
+        version=None, tags=None, sort_keys=True):
+    """
+    Serialize a sequence of Python objects into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    getvalue = None
+    if stream is None:
+        if encoding is None:
+            stream = io.StringIO()
+        else:
+            stream = io.BytesIO()
+        getvalue = stream.getvalue
+    dumper = Dumper(stream, default_style=default_style,
+            default_flow_style=default_flow_style,
+            canonical=canonical, indent=indent, width=width,
+            allow_unicode=allow_unicode, line_break=line_break,
+            encoding=encoding, version=version, tags=tags,
+            explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
+    try:
+        dumper.open()
+        for data in documents:
+            dumper.represent(data)
+        dumper.close()
+    finally:
+        dumper.dispose()
+    if getvalue:
+        return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+    """
+    Serialize a Python object into a YAML stream.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+    """
+    Serialize a sequence of Python objects into a YAML stream.
+    Produce only basic YAML tags.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+    """
+    Serialize a Python object into a YAML stream.
+    Produce only basic YAML tags.
+    If stream is None, return the produced string instead.
+    """
+    return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+        Loader=None, Dumper=Dumper):
+    """
+    Add an implicit scalar detector.
+    If an implicit scalar value matches the given regexp,
+    the corresponding tag is assigned to the scalar.
+    first is a sequence of possible initial characters or None.
+    """
+    if Loader is None:
+        loader.Loader.add_implicit_resolver(tag, regexp, first)
+        loader.FullLoader.add_implicit_resolver(tag, regexp, first)
+        loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
+    else:
+        Loader.add_implicit_resolver(tag, regexp, first)
+    Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
+    """
+    Add a path based resolver for the given tag.
+    A path is a list of keys that forms a path
+    to a node in the representation tree.
+    Keys can be string values, integers, or None.
+    """
+    if Loader is None:
+        loader.Loader.add_path_resolver(tag, path, kind)
+        loader.FullLoader.add_path_resolver(tag, path, kind)
+        loader.UnsafeLoader.add_path_resolver(tag, path, kind)
+    else:
+        Loader.add_path_resolver(tag, path, kind)
+    Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=None):
+    """
+    Add a constructor for the given tag.
+    Constructor is a function that accepts a Loader instance
+    and a node object and produces the corresponding Python object.
+    """
+    if Loader is None:
+        loader.Loader.add_constructor(tag, constructor)
+        loader.FullLoader.add_constructor(tag, constructor)
+        loader.UnsafeLoader.add_constructor(tag, constructor)
+    else:
+        Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
+    """
+    Add a multi-constructor for the given tag prefix.
+    Multi-constructor is called for a node if its tag starts with tag_prefix.
+    Multi-constructor accepts a Loader instance, a tag suffix,
+    and a node object and produces the corresponding Python object.
+    """
+    if Loader is None:
+        loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
+        loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
+        loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
+    else:
+        Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+    """
+    Add a representer for the given type.
+    Representer is a function accepting a Dumper instance
+    and an instance of the given data type
+    and producing the corresponding representation node.
+    """
+    Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+    """
+    Add a representer for the given type.
+    Multi-representer is a function accepting a Dumper instance
+    and an instance of the given data type or subtype
+    and producing the corresponding representation node.
+    """
+    Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+    """
+    The metaclass for YAMLObject.
+    """
+    def __init__(cls, name, bases, kwds):
+        super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+        if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+            if isinstance(cls.yaml_loader, list):
+                for loader in cls.yaml_loader:
+                    loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+            else:
+                cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+
+            cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+    """
+    An object that can dump itself to a YAML stream
+    and load itself from a YAML stream.
+    """
+
+    __slots__ = ()  # no direct instantiation, so allow immutable subclasses
+
+    yaml_loader = [Loader, FullLoader, UnsafeLoader]
+    yaml_dumper = Dumper
+
+    yaml_tag = None
+    yaml_flow_style = None
+
+    @classmethod
+    def from_yaml(cls, loader, node):
+        """
+        Convert a representation node to a Python object.
+        """
+        return loader.construct_yaml_object(node, cls)
+
+    @classmethod
+    def to_yaml(cls, dumper, data):
+        """
+        Convert a Python object to a representation node.
+        """
+        return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+                flow_style=cls.yaml_flow_style)
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/_yaml.cpython-38-x86_64-linux-gnu.so b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/_yaml.cpython-38-x86_64-linux-gnu.so
new file mode 100755
index 0000000000000000000000000000000000000000..801c0e2a72a862a33d640a576ca969b684dc8e86
Binary files /dev/null and b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/_yaml.cpython-38-x86_64-linux-gnu.so differ
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/composer.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/composer.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d15cb40e3b4198819c91c6f8d8b32807fcf53b2
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+    pass
+
+class Composer:
+
+    def __init__(self):
+        self.anchors = {}
+
+    def check_node(self):
+        # Drop the STREAM-START event.
+        if self.check_event(StreamStartEvent):
+            self.get_event()
+
+        # If there are more documents available?
+        return not self.check_event(StreamEndEvent)
+
+    def get_node(self):
+        # Get the root node of the next document.
+        if not self.check_event(StreamEndEvent):
+            return self.compose_document()
+
+    def get_single_node(self):
+        # Drop the STREAM-START event.
+        self.get_event()
+
+        # Compose a document if the stream is not empty.
+        document = None
+        if not self.check_event(StreamEndEvent):
+            document = self.compose_document()
+
+        # Ensure that the stream contains no more documents.
+        if not self.check_event(StreamEndEvent):
+            event = self.get_event()
+            raise ComposerError("expected a single document in the stream",
+                    document.start_mark, "but found another document",
+                    event.start_mark)
+
+        # Drop the STREAM-END event.
+        self.get_event()
+
+        return document
+
+    def compose_document(self):
+        # Drop the DOCUMENT-START event.
+        self.get_event()
+
+        # Compose the root node.
+        node = self.compose_node(None, None)
+
+        # Drop the DOCUMENT-END event.
+        self.get_event()
+
+        self.anchors = {}
+        return node
+
+    def compose_node(self, parent, index):
+        if self.check_event(AliasEvent):
+            event = self.get_event()
+            anchor = event.anchor
+            if anchor not in self.anchors:
+                raise ComposerError(None, None, "found undefined alias %r"
+                        % anchor, event.start_mark)
+            return self.anchors[anchor]
+        event = self.peek_event()
+        anchor = event.anchor
+        if anchor is not None:
+            if anchor in self.anchors:
+                raise ComposerError("found duplicate anchor %r; first occurrence"
+                        % anchor, self.anchors[anchor].start_mark,
+                        "second occurrence", event.start_mark)
+        self.descend_resolver(parent, index)
+        if self.check_event(ScalarEvent):
+            node = self.compose_scalar_node(anchor)
+        elif self.check_event(SequenceStartEvent):
+            node = self.compose_sequence_node(anchor)
+        elif self.check_event(MappingStartEvent):
+            node = self.compose_mapping_node(anchor)
+        self.ascend_resolver()
+        return node
+
+    def compose_scalar_node(self, anchor):
+        event = self.get_event()
+        tag = event.tag
+        if tag is None or tag == '!':
+            tag = self.resolve(ScalarNode, event.value, event.implicit)
+        node = ScalarNode(tag, event.value,
+                event.start_mark, event.end_mark, style=event.style)
+        if anchor is not None:
+            self.anchors[anchor] = node
+        return node
+
+    def compose_sequence_node(self, anchor):
+        start_event = self.get_event()
+        tag = start_event.tag
+        if tag is None or tag == '!':
+            tag = self.resolve(SequenceNode, None, start_event.implicit)
+        node = SequenceNode(tag, [],
+                start_event.start_mark, None,
+                flow_style=start_event.flow_style)
+        if anchor is not None:
+            self.anchors[anchor] = node
+        index = 0
+        while not self.check_event(SequenceEndEvent):
+            node.value.append(self.compose_node(node, index))
+            index += 1
+        end_event = self.get_event()
+        node.end_mark = end_event.end_mark
+        return node
+
+    def compose_mapping_node(self, anchor):
+        start_event = self.get_event()
+        tag = start_event.tag
+        if tag is None or tag == '!':
+            tag = self.resolve(MappingNode, None, start_event.implicit)
+        node = MappingNode(tag, [],
+                start_event.start_mark, None,
+                flow_style=start_event.flow_style)
+        if anchor is not None:
+            self.anchors[anchor] = node
+        while not self.check_event(MappingEndEvent):
+            #key_event = self.peek_event()
+            item_key = self.compose_node(node, None)
+            #if item_key in node.value:
+            #    raise ComposerError("while composing a mapping", start_event.start_mark,
+            #            "found duplicate key", key_event.start_mark)
+            item_value = self.compose_node(node, item_key)
+            #node.value[item_key] = item_value
+            node.value.append((item_key, item_value))
+        end_event = self.get_event()
+        node.end_mark = end_event.end_mark
+        return node
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/constructor.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/constructor.py
new file mode 100644
index 0000000000000000000000000000000000000000..619acd3070a4845c653fcf22a626e05158035bc2
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/constructor.py
@@ -0,0 +1,748 @@
+
+__all__ = [
+    'BaseConstructor',
+    'SafeConstructor',
+    'FullConstructor',
+    'UnsafeConstructor',
+    'Constructor',
+    'ConstructorError'
+]
+
+from .error import *
+from .nodes import *
+
+import collections.abc, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+    pass
+
+class BaseConstructor:
+
+    yaml_constructors = {}
+    yaml_multi_constructors = {}
+
+    def __init__(self):
+        self.constructed_objects = {}
+        self.recursive_objects = {}
+        self.state_generators = []
+        self.deep_construct = False
+
+    def check_data(self):
+        # If there are more documents available?
+        return self.check_node()
+
+    def check_state_key(self, key):
+        """Block special attributes/methods from being set in a newly created
+        object, to prevent user-controlled methods from being called during
+        deserialization"""
+        if self.get_state_keys_blacklist_regexp().match(key):
+            raise ConstructorError(None, None,
+                "blacklisted key '%s' in instance state found" % (key,), None)
+
+    def get_data(self):
+        # Construct and return the next document.
+        if self.check_node():
+            return self.construct_document(self.get_node())
+
+    def get_single_data(self):
+        # Ensure that the stream contains a single document and construct it.
+        node = self.get_single_node()
+        if node is not None:
+            return self.construct_document(node)
+        return None
+
+    def construct_document(self, node):
+        data = self.construct_object(node)
+        while self.state_generators:
+            state_generators = self.state_generators
+            self.state_generators = []
+            for generator in state_generators:
+                for dummy in generator:
+                    pass
+        self.constructed_objects = {}
+        self.recursive_objects = {}
+        self.deep_construct = False
+        return data
+
+    def construct_object(self, node, deep=False):
+        if node in self.constructed_objects:
+            return self.constructed_objects[node]
+        if deep:
+            old_deep = self.deep_construct
+            self.deep_construct = True
+        if node in self.recursive_objects:
+            raise ConstructorError(None, None,
+                    "found unconstructable recursive node", node.start_mark)
+        self.recursive_objects[node] = None
+        constructor = None
+        tag_suffix = None
+        if node.tag in self.yaml_constructors:
+            constructor = self.yaml_constructors[node.tag]
+        else:
+            for tag_prefix in self.yaml_multi_constructors:
+                if tag_prefix is not None and node.tag.startswith(tag_prefix):
+                    tag_suffix = node.tag[len(tag_prefix):]
+                    constructor = self.yaml_multi_constructors[tag_prefix]
+                    break
+            else:
+                if None in self.yaml_multi_constructors:
+                    tag_suffix = node.tag
+                    constructor = self.yaml_multi_constructors[None]
+                elif None in self.yaml_constructors:
+                    constructor = self.yaml_constructors[None]
+                elif isinstance(node, ScalarNode):
+                    constructor = self.__class__.construct_scalar
+                elif isinstance(node, SequenceNode):
+                    constructor = self.__class__.construct_sequence
+                elif isinstance(node, MappingNode):
+                    constructor = self.__class__.construct_mapping
+        if tag_suffix is None:
+            data = constructor(self, node)
+        else:
+            data = constructor(self, tag_suffix, node)
+        if isinstance(data, types.GeneratorType):
+            generator = data
+            data = next(generator)
+            if self.deep_construct:
+                for dummy in generator:
+                    pass
+            else:
+                self.state_generators.append(generator)
+        self.constructed_objects[node] = data
+        del self.recursive_objects[node]
+        if deep:
+            self.deep_construct = old_deep
+        return data
+
+    def construct_scalar(self, node):
+        if not isinstance(node, ScalarNode):
+            raise ConstructorError(None, None,
+                    "expected a scalar node, but found %s" % node.id,
+                    node.start_mark)
+        return node.value
+
+    def construct_sequence(self, node, deep=False):
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(None, None,
+                    "expected a sequence node, but found %s" % node.id,
+                    node.start_mark)
+        return [self.construct_object(child, deep=deep)
+                for child in node.value]
+
+    def construct_mapping(self, node, deep=False):
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(None, None,
+                    "expected a mapping node, but found %s" % node.id,
+                    node.start_mark)
+        mapping = {}
+        for key_node, value_node in node.value:
+            key = self.construct_object(key_node, deep=deep)
+            if not isinstance(key, collections.abc.Hashable):
+                raise ConstructorError("while constructing a mapping", node.start_mark,
+                        "found unhashable key", key_node.start_mark)
+            value = self.construct_object(value_node, deep=deep)
+            mapping[key] = value
+        return mapping
+
+    def construct_pairs(self, node, deep=False):
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(None, None,
+                    "expected a mapping node, but found %s" % node.id,
+                    node.start_mark)
+        pairs = []
+        for key_node, value_node in node.value:
+            key = self.construct_object(key_node, deep=deep)
+            value = self.construct_object(value_node, deep=deep)
+            pairs.append((key, value))
+        return pairs
+
+    @classmethod
+    def add_constructor(cls, tag, constructor):
+        if not 'yaml_constructors' in cls.__dict__:
+            cls.yaml_constructors = cls.yaml_constructors.copy()
+        cls.yaml_constructors[tag] = constructor
+
+    @classmethod
+    def add_multi_constructor(cls, tag_prefix, multi_constructor):
+        if not 'yaml_multi_constructors' in cls.__dict__:
+            cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+        cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+    def construct_scalar(self, node):
+        if isinstance(node, MappingNode):
+            for key_node, value_node in node.value:
+                if key_node.tag == 'tag:yaml.org,2002:value':
+                    return self.construct_scalar(value_node)
+        return super().construct_scalar(node)
+
+    def flatten_mapping(self, node):
+        merge = []
+        index = 0
+        while index < len(node.value):
+            key_node, value_node = node.value[index]
+            if key_node.tag == 'tag:yaml.org,2002:merge':
+                del node.value[index]
+                if isinstance(value_node, MappingNode):
+                    self.flatten_mapping(value_node)
+                    merge.extend(value_node.value)
+                elif isinstance(value_node, SequenceNode):
+                    submerge = []
+                    for subnode in value_node.value:
+                        if not isinstance(subnode, MappingNode):
+                            raise ConstructorError("while constructing a mapping",
+                                    node.start_mark,
+                                    "expected a mapping for merging, but found %s"
+                                    % subnode.id, subnode.start_mark)
+                        self.flatten_mapping(subnode)
+                        submerge.append(subnode.value)
+                    submerge.reverse()
+                    for value in submerge:
+                        merge.extend(value)
+                else:
+                    raise ConstructorError("while constructing a mapping", node.start_mark,
+                            "expected a mapping or list of mappings for merging, but found %s"
+                            % value_node.id, value_node.start_mark)
+            elif key_node.tag == 'tag:yaml.org,2002:value':
+                key_node.tag = 'tag:yaml.org,2002:str'
+                index += 1
+            else:
+                index += 1
+        if merge:
+            node.value = merge + node.value
+
+    def construct_mapping(self, node, deep=False):
+        if isinstance(node, MappingNode):
+            self.flatten_mapping(node)
+        return super().construct_mapping(node, deep=deep)
+
+    def construct_yaml_null(self, node):
+        self.construct_scalar(node)
+        return None
+
+    bool_values = {
+        'yes':      True,
+        'no':       False,
+        'true':     True,
+        'false':    False,
+        'on':       True,
+        'off':      False,
+    }
+
+    def construct_yaml_bool(self, node):
+        value = self.construct_scalar(node)
+        return self.bool_values[value.lower()]
+
+    def construct_yaml_int(self, node):
+        value = self.construct_scalar(node)
+        value = value.replace('_', '')
+        sign = +1
+        if value[0] == '-':
+            sign = -1
+        if value[0] in '+-':
+            value = value[1:]
+        if value == '0':
+            return 0
+        elif value.startswith('0b'):
+            return sign*int(value[2:], 2)
+        elif value.startswith('0x'):
+            return sign*int(value[2:], 16)
+        elif value[0] == '0':
+            return sign*int(value, 8)
+        elif ':' in value:
+            digits = [int(part) for part in value.split(':')]
+            digits.reverse()
+            base = 1
+            value = 0
+            for digit in digits:
+                value += digit*base
+                base *= 60
+            return sign*value
+        else:
+            return sign*int(value)
+
+    inf_value = 1e300
+    while inf_value != inf_value*inf_value:
+        inf_value *= inf_value
+    nan_value = -inf_value/inf_value   # Trying to make a quiet NaN (like C99).
+
+    def construct_yaml_float(self, node):
+        value = self.construct_scalar(node)
+        value = value.replace('_', '').lower()
+        sign = +1
+        if value[0] == '-':
+            sign = -1
+        if value[0] in '+-':
+            value = value[1:]
+        if value == '.inf':
+            return sign*self.inf_value
+        elif value == '.nan':
+            return self.nan_value
+        elif ':' in value:
+            digits = [float(part) for part in value.split(':')]
+            digits.reverse()
+            base = 1
+            value = 0.0
+            for digit in digits:
+                value += digit*base
+                base *= 60
+            return sign*value
+        else:
+            return sign*float(value)
+
+    def construct_yaml_binary(self, node):
+        try:
+            value = self.construct_scalar(node).encode('ascii')
+        except UnicodeEncodeError as exc:
+            raise ConstructorError(None, None,
+                    "failed to convert base64 data into ascii: %s" % exc,
+                    node.start_mark)
+        try:
+            if hasattr(base64, 'decodebytes'):
+                return base64.decodebytes(value)
+            else:
+                return base64.decodestring(value)
+        except binascii.Error as exc:
+            raise ConstructorError(None, None,
+                    "failed to decode base64 data: %s" % exc, node.start_mark)
+
+    timestamp_regexp = re.compile(
+            r'''^(?P<year>[0-9][0-9][0-9][0-9])
+                -(?P<month>[0-9][0-9]?)
+                -(?P<day>[0-9][0-9]?)
+                (?:(?:[Tt]|[ \t]+)
+                (?P<hour>[0-9][0-9]?)
+                :(?P<minute>[0-9][0-9])
+                :(?P<second>[0-9][0-9])
+                (?:\.(?P<fraction>[0-9]*))?
+                (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+                (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+    def construct_yaml_timestamp(self, node):
+        value = self.construct_scalar(node)
+        match = self.timestamp_regexp.match(node.value)
+        values = match.groupdict()
+        year = int(values['year'])
+        month = int(values['month'])
+        day = int(values['day'])
+        if not values['hour']:
+            return datetime.date(year, month, day)
+        hour = int(values['hour'])
+        minute = int(values['minute'])
+        second = int(values['second'])
+        fraction = 0
+        tzinfo = None
+        if values['fraction']:
+            fraction = values['fraction'][:6]
+            while len(fraction) < 6:
+                fraction += '0'
+            fraction = int(fraction)
+        if values['tz_sign']:
+            tz_hour = int(values['tz_hour'])
+            tz_minute = int(values['tz_minute'] or 0)
+            delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+            if values['tz_sign'] == '-':
+                delta = -delta
+            tzinfo = datetime.timezone(delta)
+        elif values['tz']:
+            tzinfo = datetime.timezone.utc
+        return datetime.datetime(year, month, day, hour, minute, second, fraction,
+                                 tzinfo=tzinfo)
+
+    def construct_yaml_omap(self, node):
+        # Note: we do not check for duplicate keys, because it's too
+        # CPU-expensive.
+        omap = []
+        yield omap
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError("while constructing an ordered map", node.start_mark,
+                    "expected a sequence, but found %s" % node.id, node.start_mark)
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError("while constructing an ordered map", node.start_mark,
+                        "expected a mapping of length 1, but found %s" % subnode.id,
+                        subnode.start_mark)
+            if len(subnode.value) != 1:
+                raise ConstructorError("while constructing an ordered map", node.start_mark,
+                        "expected a single mapping item, but found %d items" % len(subnode.value),
+                        subnode.start_mark)
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            value = self.construct_object(value_node)
+            omap.append((key, value))
+
+    def construct_yaml_pairs(self, node):
+        # Note: the same code as `construct_yaml_omap`.
+        pairs = []
+        yield pairs
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError("while constructing pairs", node.start_mark,
+                    "expected a sequence, but found %s" % node.id, node.start_mark)
+        for subnode in node.value:
+            if not isinstance(subnode, MappingNode):
+                raise ConstructorError("while constructing pairs", node.start_mark,
+                        "expected a mapping of length 1, but found %s" % subnode.id,
+                        subnode.start_mark)
+            if len(subnode.value) != 1:
+                raise ConstructorError("while constructing pairs", node.start_mark,
+                        "expected a single mapping item, but found %d items" % len(subnode.value),
+                        subnode.start_mark)
+            key_node, value_node = subnode.value[0]
+            key = self.construct_object(key_node)
+            value = self.construct_object(value_node)
+            pairs.append((key, value))
+
+    def construct_yaml_set(self, node):
+        data = set()
+        yield data
+        value = self.construct_mapping(node)
+        data.update(value)
+
+    def construct_yaml_str(self, node):
+        return self.construct_scalar(node)
+
+    def construct_yaml_seq(self, node):
+        data = []
+        yield data
+        data.extend(self.construct_sequence(node))
+
+    def construct_yaml_map(self, node):
+        data = {}
+        yield data
+        value = self.construct_mapping(node)
+        data.update(value)
+
+    def construct_yaml_object(self, node, cls):
+        data = cls.__new__(cls)
+        yield data
+        if hasattr(data, '__setstate__'):
+            state = self.construct_mapping(node, deep=True)
+            data.__setstate__(state)
+        else:
+            state = self.construct_mapping(node)
+            data.__dict__.update(state)
+
+    def construct_undefined(self, node):
+        raise ConstructorError(None, None,
+                "could not determine a constructor for the tag %r" % node.tag,
+                node.start_mark)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:null',
+        SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:bool',
+        SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:int',
+        SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:float',
+        SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:binary',
+        SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:timestamp',
+        SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:omap',
+        SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:pairs',
+        SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:set',
+        SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:str',
+        SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:seq',
+        SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+        'tag:yaml.org,2002:map',
+        SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+        SafeConstructor.construct_undefined)
+
+class FullConstructor(SafeConstructor):
+    # 'extend' is blacklisted because it is used by
+    # construct_python_object_apply to add `listitems` to a newly generate
+    # python instance
+    def get_state_keys_blacklist(self):
+        return ['^extend$', '^__.*__$']
+
+    def get_state_keys_blacklist_regexp(self):
+        if not hasattr(self, 'state_keys_blacklist_regexp'):
+            self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
+        return self.state_keys_blacklist_regexp
+
+    def construct_python_str(self, node):
+        return self.construct_scalar(node)
+
+    def construct_python_unicode(self, node):
+        return self.construct_scalar(node)
+
+    def construct_python_bytes(self, node):
+        try:
+            value = self.construct_scalar(node).encode('ascii')
+        except UnicodeEncodeError as exc:
+            raise ConstructorError(None, None,
+                    "failed to convert base64 data into ascii: %s" % exc,
+                    node.start_mark)
+        try:
+            if hasattr(base64, 'decodebytes'):
+                return base64.decodebytes(value)
+            else:
+                return base64.decodestring(value)
+        except binascii.Error as exc:
+            raise ConstructorError(None, None,
+                    "failed to decode base64 data: %s" % exc, node.start_mark)
+
+    def construct_python_long(self, node):
+        return self.construct_yaml_int(node)
+
+    def construct_python_complex(self, node):
+       return complex(self.construct_scalar(node))
+
+    def construct_python_tuple(self, node):
+        return tuple(self.construct_sequence(node))
+
+    def find_python_module(self, name, mark, unsafe=False):
+        if not name:
+            raise ConstructorError("while constructing a Python module", mark,
+                    "expected non-empty name appended to the tag", mark)
+        if unsafe:
+            try:
+                __import__(name)
+            except ImportError as exc:
+                raise ConstructorError("while constructing a Python module", mark,
+                        "cannot find module %r (%s)" % (name, exc), mark)
+        if name not in sys.modules:
+            raise ConstructorError("while constructing a Python module", mark,
+                    "module %r is not imported" % name, mark)
+        return sys.modules[name]
+
+    def find_python_name(self, name, mark, unsafe=False):
+        if not name:
+            raise ConstructorError("while constructing a Python object", mark,
+                    "expected non-empty name appended to the tag", mark)
+        if '.' in name:
+            module_name, object_name = name.rsplit('.', 1)
+        else:
+            module_name = 'builtins'
+            object_name = name
+        if unsafe:
+            try:
+                __import__(module_name)
+            except ImportError as exc:
+                raise ConstructorError("while constructing a Python object", mark,
+                        "cannot find module %r (%s)" % (module_name, exc), mark)
+        if module_name not in sys.modules:
+            raise ConstructorError("while constructing a Python object", mark,
+                    "module %r is not imported" % module_name, mark)
+        module = sys.modules[module_name]
+        if not hasattr(module, object_name):
+            raise ConstructorError("while constructing a Python object", mark,
+                    "cannot find %r in the module %r"
+                    % (object_name, module.__name__), mark)
+        return getattr(module, object_name)
+
+    def construct_python_name(self, suffix, node):
+        value = self.construct_scalar(node)
+        if value:
+            raise ConstructorError("while constructing a Python name", node.start_mark,
+                    "expected the empty value, but found %r" % value, node.start_mark)
+        return self.find_python_name(suffix, node.start_mark)
+
+    def construct_python_module(self, suffix, node):
+        value = self.construct_scalar(node)
+        if value:
+            raise ConstructorError("while constructing a Python module", node.start_mark,
+                    "expected the empty value, but found %r" % value, node.start_mark)
+        return self.find_python_module(suffix, node.start_mark)
+
+    def make_python_instance(self, suffix, node,
+            args=None, kwds=None, newobj=False, unsafe=False):
+        if not args:
+            args = []
+        if not kwds:
+            kwds = {}
+        cls = self.find_python_name(suffix, node.start_mark)
+        if not (unsafe or isinstance(cls, type)):
+            raise ConstructorError("while constructing a Python instance", node.start_mark,
+                    "expected a class, but found %r" % type(cls),
+                    node.start_mark)
+        if newobj and isinstance(cls, type):
+            return cls.__new__(cls, *args, **kwds)
+        else:
+            return cls(*args, **kwds)
+
+    def set_python_instance_state(self, instance, state, unsafe=False):
+        if hasattr(instance, '__setstate__'):
+            instance.__setstate__(state)
+        else:
+            slotstate = {}
+            if isinstance(state, tuple) and len(state) == 2:
+                state, slotstate = state
+            if hasattr(instance, '__dict__'):
+                if not unsafe and state:
+                    for key in state.keys():
+                        self.check_state_key(key)
+                instance.__dict__.update(state)
+            elif state:
+                slotstate.update(state)
+            for key, value in slotstate.items():
+                if not unsafe:
+                    self.check_state_key(key)
+                setattr(instance, key, value)
+
+    def construct_python_object(self, suffix, node):
+        # Format:
+        #   !!python/object:module.name { ... state ... }
+        instance = self.make_python_instance(suffix, node, newobj=True)
+        yield instance
+        deep = hasattr(instance, '__setstate__')
+        state = self.construct_mapping(node, deep=deep)
+        self.set_python_instance_state(instance, state)
+
+    def construct_python_object_apply(self, suffix, node, newobj=False):
+        # Format:
+        #   !!python/object/apply       # (or !!python/object/new)
+        #   args: [ ... arguments ... ]
+        #   kwds: { ... keywords ... }
+        #   state: ... state ...
+        #   listitems: [ ... listitems ... ]
+        #   dictitems: { ... dictitems ... }
+        # or short format:
+        #   !!python/object/apply [ ... arguments ... ]
+        # The difference between !!python/object/apply and !!python/object/new
+        # is how an object is created, check make_python_instance for details.
+        if isinstance(node, SequenceNode):
+            args = self.construct_sequence(node, deep=True)
+            kwds = {}
+            state = {}
+            listitems = []
+            dictitems = {}
+        else:
+            value = self.construct_mapping(node, deep=True)
+            args = value.get('args', [])
+            kwds = value.get('kwds', {})
+            state = value.get('state', {})
+            listitems = value.get('listitems', [])
+            dictitems = value.get('dictitems', {})
+        instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+        if state:
+            self.set_python_instance_state(instance, state)
+        if listitems:
+            instance.extend(listitems)
+        if dictitems:
+            for key in dictitems:
+                instance[key] = dictitems[key]
+        return instance
+
+    def construct_python_object_new(self, suffix, node):
+        return self.construct_python_object_apply(suffix, node, newobj=True)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/none',
+    FullConstructor.construct_yaml_null)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/bool',
+    FullConstructor.construct_yaml_bool)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/str',
+    FullConstructor.construct_python_str)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/unicode',
+    FullConstructor.construct_python_unicode)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/bytes',
+    FullConstructor.construct_python_bytes)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/int',
+    FullConstructor.construct_yaml_int)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/long',
+    FullConstructor.construct_python_long)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/float',
+    FullConstructor.construct_yaml_float)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/complex',
+    FullConstructor.construct_python_complex)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/list',
+    FullConstructor.construct_yaml_seq)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/tuple',
+    FullConstructor.construct_python_tuple)
+
+FullConstructor.add_constructor(
+    'tag:yaml.org,2002:python/dict',
+    FullConstructor.construct_yaml_map)
+
+FullConstructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/name:',
+    FullConstructor.construct_python_name)
+
+class UnsafeConstructor(FullConstructor):
+
+    def find_python_module(self, name, mark):
+        return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
+
+    def find_python_name(self, name, mark):
+        return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
+
+    def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
+        return super(UnsafeConstructor, self).make_python_instance(
+            suffix, node, args, kwds, newobj, unsafe=True)
+
+    def set_python_instance_state(self, instance, state):
+        return super(UnsafeConstructor, self).set_python_instance_state(
+            instance, state, unsafe=True)
+
+UnsafeConstructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/module:',
+    UnsafeConstructor.construct_python_module)
+
+UnsafeConstructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/object:',
+    UnsafeConstructor.construct_python_object)
+
+UnsafeConstructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/object/new:',
+    UnsafeConstructor.construct_python_object_new)
+
+UnsafeConstructor.add_multi_constructor(
+    'tag:yaml.org,2002:python/object/apply:',
+    UnsafeConstructor.construct_python_object_apply)
+
+# Constructor is same as UnsafeConstructor. Need to leave this in place in case
+# people have extended it directly.
+class Constructor(UnsafeConstructor):
+    pass
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/cyaml.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/cyaml.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c21345879b298bb8668201bebe7d289586b17f9
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/cyaml.py
@@ -0,0 +1,101 @@
+
+__all__ = [
+    'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader',
+    'CBaseDumper', 'CSafeDumper', 'CDumper'
+]
+
+from yaml._yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+    def __init__(self, stream):
+        CParser.__init__(self, stream)
+        BaseConstructor.__init__(self)
+        BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+    def __init__(self, stream):
+        CParser.__init__(self, stream)
+        SafeConstructor.__init__(self)
+        Resolver.__init__(self)
+
+class CFullLoader(CParser, FullConstructor, Resolver):
+
+    def __init__(self, stream):
+        CParser.__init__(self, stream)
+        FullConstructor.__init__(self)
+        Resolver.__init__(self)
+
+class CUnsafeLoader(CParser, UnsafeConstructor, Resolver):
+
+    def __init__(self, stream):
+        CParser.__init__(self, stream)
+        UnsafeConstructor.__init__(self)
+        Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+    def __init__(self, stream):
+        CParser.__init__(self, stream)
+        Constructor.__init__(self)
+        Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=False,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None, sort_keys=True):
+        CEmitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width, encoding=encoding,
+                allow_unicode=allow_unicode, line_break=line_break,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style, sort_keys=sort_keys)
+        Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=False,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None, sort_keys=True):
+        CEmitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width, encoding=encoding,
+                allow_unicode=allow_unicode, line_break=line_break,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        SafeRepresenter.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style, sort_keys=sort_keys)
+        Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=False,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None, sort_keys=True):
+        CEmitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width, encoding=encoding,
+                allow_unicode=allow_unicode, line_break=line_break,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style, sort_keys=sort_keys)
+        Resolver.__init__(self)
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/dumper.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/dumper.py
new file mode 100644
index 0000000000000000000000000000000000000000..6aadba551f3836b02f4752277f4b3027073defad
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=False,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None, sort_keys=True):
+        Emitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width,
+                allow_unicode=allow_unicode, line_break=line_break)
+        Serializer.__init__(self, encoding=encoding,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style, sort_keys=sort_keys)
+        Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=False,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None, sort_keys=True):
+        Emitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width,
+                allow_unicode=allow_unicode, line_break=line_break)
+        Serializer.__init__(self, encoding=encoding,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        SafeRepresenter.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style, sort_keys=sort_keys)
+        Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+    def __init__(self, stream,
+            default_style=None, default_flow_style=False,
+            canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None,
+            encoding=None, explicit_start=None, explicit_end=None,
+            version=None, tags=None, sort_keys=True):
+        Emitter.__init__(self, stream, canonical=canonical,
+                indent=indent, width=width,
+                allow_unicode=allow_unicode, line_break=line_break)
+        Serializer.__init__(self, encoding=encoding,
+                explicit_start=explicit_start, explicit_end=explicit_end,
+                version=version, tags=tags)
+        Representer.__init__(self, default_style=default_style,
+                default_flow_style=default_flow_style, sort_keys=sort_keys)
+        Resolver.__init__(self)
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/emitter.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/emitter.py
new file mode 100644
index 0000000000000000000000000000000000000000..a664d011162af69184df2f8e59ab7feec818f7c7
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/emitter.py
@@ -0,0 +1,1137 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+    pass
+
+class ScalarAnalysis:
+    def __init__(self, scalar, empty, multiline,
+            allow_flow_plain, allow_block_plain,
+            allow_single_quoted, allow_double_quoted,
+            allow_block):
+        self.scalar = scalar
+        self.empty = empty
+        self.multiline = multiline
+        self.allow_flow_plain = allow_flow_plain
+        self.allow_block_plain = allow_block_plain
+        self.allow_single_quoted = allow_single_quoted
+        self.allow_double_quoted = allow_double_quoted
+        self.allow_block = allow_block
+
+class Emitter:
+
+    DEFAULT_TAG_PREFIXES = {
+        '!' : '!',
+        'tag:yaml.org,2002:' : '!!',
+    }
+
+    def __init__(self, stream, canonical=None, indent=None, width=None,
+            allow_unicode=None, line_break=None):
+
+        # The stream should have the methods `write` and possibly `flush`.
+        self.stream = stream
+
+        # Encoding can be overridden by STREAM-START.
+        self.encoding = None
+
+        # Emitter is a state machine with a stack of states to handle nested
+        # structures.
+        self.states = []
+        self.state = self.expect_stream_start
+
+        # Current event and the event queue.
+        self.events = []
+        self.event = None
+
+        # The current indentation level and the stack of previous indents.
+        self.indents = []
+        self.indent = None
+
+        # Flow level.
+        self.flow_level = 0
+
+        # Contexts.
+        self.root_context = False
+        self.sequence_context = False
+        self.mapping_context = False
+        self.simple_key_context = False
+
+        # Characteristics of the last emitted character:
+        #  - current position.
+        #  - is it a whitespace?
+        #  - is it an indention character
+        #    (indentation space, '-', '?', or ':')?
+        self.line = 0
+        self.column = 0
+        self.whitespace = True
+        self.indention = True
+
+        # Whether the document requires an explicit document indicator
+        self.open_ended = False
+
+        # Formatting details.
+        self.canonical = canonical
+        self.allow_unicode = allow_unicode
+        self.best_indent = 2
+        if indent and 1 < indent < 10:
+            self.best_indent = indent
+        self.best_width = 80
+        if width and width > self.best_indent*2:
+            self.best_width = width
+        self.best_line_break = '\n'
+        if line_break in ['\r', '\n', '\r\n']:
+            self.best_line_break = line_break
+
+        # Tag prefixes.
+        self.tag_prefixes = None
+
+        # Prepared anchor and tag.
+        self.prepared_anchor = None
+        self.prepared_tag = None
+
+        # Scalar analysis and style.
+        self.analysis = None
+        self.style = None
+
+    def dispose(self):
+        # Reset the state attributes (to clear self-references)
+        self.states = []
+        self.state = None
+
+    def emit(self, event):
+        self.events.append(event)
+        while not self.need_more_events():
+            self.event = self.events.pop(0)
+            self.state()
+            self.event = None
+
+    # In some cases, we wait for a few next events before emitting.
+
+    def need_more_events(self):
+        if not self.events:
+            return True
+        event = self.events[0]
+        if isinstance(event, DocumentStartEvent):
+            return self.need_events(1)
+        elif isinstance(event, SequenceStartEvent):
+            return self.need_events(2)
+        elif isinstance(event, MappingStartEvent):
+            return self.need_events(3)
+        else:
+            return False
+
+    def need_events(self, count):
+        level = 0
+        for event in self.events[1:]:
+            if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+                level += 1
+            elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+                level -= 1
+            elif isinstance(event, StreamEndEvent):
+                level = -1
+            if level < 0:
+                return False
+        return (len(self.events) < count+1)
+
+    def increase_indent(self, flow=False, indentless=False):
+        self.indents.append(self.indent)
+        if self.indent is None:
+            if flow:
+                self.indent = self.best_indent
+            else:
+                self.indent = 0
+        elif not indentless:
+            self.indent += self.best_indent
+
+    # States.
+
+    # Stream handlers.
+
+    def expect_stream_start(self):
+        if isinstance(self.event, StreamStartEvent):
+            if self.event.encoding and not hasattr(self.stream, 'encoding'):
+                self.encoding = self.event.encoding
+            self.write_stream_start()
+            self.state = self.expect_first_document_start
+        else:
+            raise EmitterError("expected StreamStartEvent, but got %s"
+                    % self.event)
+
+    def expect_nothing(self):
+        raise EmitterError("expected nothing, but got %s" % self.event)
+
+    # Document handlers.
+
+    def expect_first_document_start(self):
+        return self.expect_document_start(first=True)
+
+    def expect_document_start(self, first=False):
+        if isinstance(self.event, DocumentStartEvent):
+            if (self.event.version or self.event.tags) and self.open_ended:
+                self.write_indicator('...', True)
+                self.write_indent()
+            if self.event.version:
+                version_text = self.prepare_version(self.event.version)
+                self.write_version_directive(version_text)
+            self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+            if self.event.tags:
+                handles = sorted(self.event.tags.keys())
+                for handle in handles:
+                    prefix = self.event.tags[handle]
+                    self.tag_prefixes[prefix] = handle
+                    handle_text = self.prepare_tag_handle(handle)
+                    prefix_text = self.prepare_tag_prefix(prefix)
+                    self.write_tag_directive(handle_text, prefix_text)
+            implicit = (first and not self.event.explicit and not self.canonical
+                    and not self.event.version and not self.event.tags
+                    and not self.check_empty_document())
+            if not implicit:
+                self.write_indent()
+                self.write_indicator('---', True)
+                if self.canonical:
+                    self.write_indent()
+            self.state = self.expect_document_root
+        elif isinstance(self.event, StreamEndEvent):
+            if self.open_ended:
+                self.write_indicator('...', True)
+                self.write_indent()
+            self.write_stream_end()
+            self.state = self.expect_nothing
+        else:
+            raise EmitterError("expected DocumentStartEvent, but got %s"
+                    % self.event)
+
+    def expect_document_end(self):
+        if isinstance(self.event, DocumentEndEvent):
+            self.write_indent()
+            if self.event.explicit:
+                self.write_indicator('...', True)
+                self.write_indent()
+            self.flush_stream()
+            self.state = self.expect_document_start
+        else:
+            raise EmitterError("expected DocumentEndEvent, but got %s"
+                    % self.event)
+
+    def expect_document_root(self):
+        self.states.append(self.expect_document_end)
+        self.expect_node(root=True)
+
+    # Node handlers.
+
+    def expect_node(self, root=False, sequence=False, mapping=False,
+            simple_key=False):
+        self.root_context = root
+        self.sequence_context = sequence
+        self.mapping_context = mapping
+        self.simple_key_context = simple_key
+        if isinstance(self.event, AliasEvent):
+            self.expect_alias()
+        elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+            self.process_anchor('&')
+            self.process_tag()
+            if isinstance(self.event, ScalarEvent):
+                self.expect_scalar()
+            elif isinstance(self.event, SequenceStartEvent):
+                if self.flow_level or self.canonical or self.event.flow_style   \
+                        or self.check_empty_sequence():
+                    self.expect_flow_sequence()
+                else:
+                    self.expect_block_sequence()
+            elif isinstance(self.event, MappingStartEvent):
+                if self.flow_level or self.canonical or self.event.flow_style   \
+                        or self.check_empty_mapping():
+                    self.expect_flow_mapping()
+                else:
+                    self.expect_block_mapping()
+        else:
+            raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+    def expect_alias(self):
+        if self.event.anchor is None:
+            raise EmitterError("anchor is not specified for alias")
+        self.process_anchor('*')
+        self.state = self.states.pop()
+
+    def expect_scalar(self):
+        self.increase_indent(flow=True)
+        self.process_scalar()
+        self.indent = self.indents.pop()
+        self.state = self.states.pop()
+
+    # Flow sequence handlers.
+
+    def expect_flow_sequence(self):
+        self.write_indicator('[', True, whitespace=True)
+        self.flow_level += 1
+        self.increase_indent(flow=True)
+        self.state = self.expect_first_flow_sequence_item
+
+    def expect_first_flow_sequence_item(self):
+        if isinstance(self.event, SequenceEndEvent):
+            self.indent = self.indents.pop()
+            self.flow_level -= 1
+            self.write_indicator(']', False)
+            self.state = self.states.pop()
+        else:
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            self.states.append(self.expect_flow_sequence_item)
+            self.expect_node(sequence=True)
+
+    def expect_flow_sequence_item(self):
+        if isinstance(self.event, SequenceEndEvent):
+            self.indent = self.indents.pop()
+            self.flow_level -= 1
+            if self.canonical:
+                self.write_indicator(',', False)
+                self.write_indent()
+            self.write_indicator(']', False)
+            self.state = self.states.pop()
+        else:
+            self.write_indicator(',', False)
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            self.states.append(self.expect_flow_sequence_item)
+            self.expect_node(sequence=True)
+
+    # Flow mapping handlers.
+
+    def expect_flow_mapping(self):
+        self.write_indicator('{', True, whitespace=True)
+        self.flow_level += 1
+        self.increase_indent(flow=True)
+        self.state = self.expect_first_flow_mapping_key
+
+    def expect_first_flow_mapping_key(self):
+        if isinstance(self.event, MappingEndEvent):
+            self.indent = self.indents.pop()
+            self.flow_level -= 1
+            self.write_indicator('}', False)
+            self.state = self.states.pop()
+        else:
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            if not self.canonical and self.check_simple_key():
+                self.states.append(self.expect_flow_mapping_simple_value)
+                self.expect_node(mapping=True, simple_key=True)
+            else:
+                self.write_indicator('?', True)
+                self.states.append(self.expect_flow_mapping_value)
+                self.expect_node(mapping=True)
+
+    def expect_flow_mapping_key(self):
+        if isinstance(self.event, MappingEndEvent):
+            self.indent = self.indents.pop()
+            self.flow_level -= 1
+            if self.canonical:
+                self.write_indicator(',', False)
+                self.write_indent()
+            self.write_indicator('}', False)
+            self.state = self.states.pop()
+        else:
+            self.write_indicator(',', False)
+            if self.canonical or self.column > self.best_width:
+                self.write_indent()
+            if not self.canonical and self.check_simple_key():
+                self.states.append(self.expect_flow_mapping_simple_value)
+                self.expect_node(mapping=True, simple_key=True)
+            else:
+                self.write_indicator('?', True)
+                self.states.append(self.expect_flow_mapping_value)
+                self.expect_node(mapping=True)
+
+    def expect_flow_mapping_simple_value(self):
+        self.write_indicator(':', False)
+        self.states.append(self.expect_flow_mapping_key)
+        self.expect_node(mapping=True)
+
+    def expect_flow_mapping_value(self):
+        if self.canonical or self.column > self.best_width:
+            self.write_indent()
+        self.write_indicator(':', True)
+        self.states.append(self.expect_flow_mapping_key)
+        self.expect_node(mapping=True)
+
+    # Block sequence handlers.
+
+    def expect_block_sequence(self):
+        indentless = (self.mapping_context and not self.indention)
+        self.increase_indent(flow=False, indentless=indentless)
+        self.state = self.expect_first_block_sequence_item
+
+    def expect_first_block_sequence_item(self):
+        return self.expect_block_sequence_item(first=True)
+
+    def expect_block_sequence_item(self, first=False):
+        if not first and isinstance(self.event, SequenceEndEvent):
+            self.indent = self.indents.pop()
+            self.state = self.states.pop()
+        else:
+            self.write_indent()
+            self.write_indicator('-', True, indention=True)
+            self.states.append(self.expect_block_sequence_item)
+            self.expect_node(sequence=True)
+
+    # Block mapping handlers.
+
+    def expect_block_mapping(self):
+        self.increase_indent(flow=False)
+        self.state = self.expect_first_block_mapping_key
+
+    def expect_first_block_mapping_key(self):
+        return self.expect_block_mapping_key(first=True)
+
+    def expect_block_mapping_key(self, first=False):
+        if not first and isinstance(self.event, MappingEndEvent):
+            self.indent = self.indents.pop()
+            self.state = self.states.pop()
+        else:
+            self.write_indent()
+            if self.check_simple_key():
+                self.states.append(self.expect_block_mapping_simple_value)
+                self.expect_node(mapping=True, simple_key=True)
+            else:
+                self.write_indicator('?', True, indention=True)
+                self.states.append(self.expect_block_mapping_value)
+                self.expect_node(mapping=True)
+
+    def expect_block_mapping_simple_value(self):
+        self.write_indicator(':', False)
+        self.states.append(self.expect_block_mapping_key)
+        self.expect_node(mapping=True)
+
+    def expect_block_mapping_value(self):
+        self.write_indent()
+        self.write_indicator(':', True, indention=True)
+        self.states.append(self.expect_block_mapping_key)
+        self.expect_node(mapping=True)
+
+    # Checkers.
+
+    def check_empty_sequence(self):
+        return (isinstance(self.event, SequenceStartEvent) and self.events
+                and isinstance(self.events[0], SequenceEndEvent))
+
+    def check_empty_mapping(self):
+        return (isinstance(self.event, MappingStartEvent) and self.events
+                and isinstance(self.events[0], MappingEndEvent))
+
+    def check_empty_document(self):
+        if not isinstance(self.event, DocumentStartEvent) or not self.events:
+            return False
+        event = self.events[0]
+        return (isinstance(event, ScalarEvent) and event.anchor is None
+                and event.tag is None and event.implicit and event.value == '')
+
+    def check_simple_key(self):
+        length = 0
+        if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+            if self.prepared_anchor is None:
+                self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+            length += len(self.prepared_anchor)
+        if isinstance(self.event, (ScalarEvent, CollectionStartEvent))  \
+                and self.event.tag is not None:
+            if self.prepared_tag is None:
+                self.prepared_tag = self.prepare_tag(self.event.tag)
+            length += len(self.prepared_tag)
+        if isinstance(self.event, ScalarEvent):
+            if self.analysis is None:
+                self.analysis = self.analyze_scalar(self.event.value)
+            length += len(self.analysis.scalar)
+        return (length < 128 and (isinstance(self.event, AliasEvent)
+            or (isinstance(self.event, ScalarEvent)
+                    and not self.analysis.empty and not self.analysis.multiline)
+            or self.check_empty_sequence() or self.check_empty_mapping()))
+
+    # Anchor, Tag, and Scalar processors.
+
+    def process_anchor(self, indicator):
+        if self.event.anchor is None:
+            self.prepared_anchor = None
+            return
+        if self.prepared_anchor is None:
+            self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+        if self.prepared_anchor:
+            self.write_indicator(indicator+self.prepared_anchor, True)
+        self.prepared_anchor = None
+
+    def process_tag(self):
+        tag = self.event.tag
+        if isinstance(self.event, ScalarEvent):
+            if self.style is None:
+                self.style = self.choose_scalar_style()
+            if ((not self.canonical or tag is None) and
+                ((self.style == '' and self.event.implicit[0])
+                        or (self.style != '' and self.event.implicit[1]))):
+                self.prepared_tag = None
+                return
+            if self.event.implicit[0] and tag is None:
+                tag = '!'
+                self.prepared_tag = None
+        else:
+            if (not self.canonical or tag is None) and self.event.implicit:
+                self.prepared_tag = None
+                return
+        if tag is None:
+            raise EmitterError("tag is not specified")
+        if self.prepared_tag is None:
+            self.prepared_tag = self.prepare_tag(tag)
+        if self.prepared_tag:
+            self.write_indicator(self.prepared_tag, True)
+        self.prepared_tag = None
+
+    def choose_scalar_style(self):
+        if self.analysis is None:
+            self.analysis = self.analyze_scalar(self.event.value)
+        if self.event.style == '"' or self.canonical:
+            return '"'
+        if not self.event.style and self.event.implicit[0]:
+            if (not (self.simple_key_context and
+                    (self.analysis.empty or self.analysis.multiline))
+                and (self.flow_level and self.analysis.allow_flow_plain
+                    or (not self.flow_level and self.analysis.allow_block_plain))):
+                return ''
+        if self.event.style and self.event.style in '|>':
+            if (not self.flow_level and not self.simple_key_context
+                    and self.analysis.allow_block):
+                return self.event.style
+        if not self.event.style or self.event.style == '\'':
+            if (self.analysis.allow_single_quoted and
+                    not (self.simple_key_context and self.analysis.multiline)):
+                return '\''
+        return '"'
+
+    def process_scalar(self):
+        if self.analysis is None:
+            self.analysis = self.analyze_scalar(self.event.value)
+        if self.style is None:
+            self.style = self.choose_scalar_style()
+        split = (not self.simple_key_context)
+        #if self.analysis.multiline and split    \
+        #        and (not self.style or self.style in '\'\"'):
+        #    self.write_indent()
+        if self.style == '"':
+            self.write_double_quoted(self.analysis.scalar, split)
+        elif self.style == '\'':
+            self.write_single_quoted(self.analysis.scalar, split)
+        elif self.style == '>':
+            self.write_folded(self.analysis.scalar)
+        elif self.style == '|':
+            self.write_literal(self.analysis.scalar)
+        else:
+            self.write_plain(self.analysis.scalar, split)
+        self.analysis = None
+        self.style = None
+
+    # Analyzers.
+
+    def prepare_version(self, version):
+        major, minor = version
+        if major != 1:
+            raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+        return '%d.%d' % (major, minor)
+
+    def prepare_tag_handle(self, handle):
+        if not handle:
+            raise EmitterError("tag handle must not be empty")
+        if handle[0] != '!' or handle[-1] != '!':
+            raise EmitterError("tag handle must start and end with '!': %r" % handle)
+        for ch in handle[1:-1]:
+            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
+                    or ch in '-_'):
+                raise EmitterError("invalid character %r in the tag handle: %r"
+                        % (ch, handle))
+        return handle
+
+    def prepare_tag_prefix(self, prefix):
+        if not prefix:
+            raise EmitterError("tag prefix must not be empty")
+        chunks = []
+        start = end = 0
+        if prefix[0] == '!':
+            end = 1
+        while end < len(prefix):
+            ch = prefix[end]
+            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+                    or ch in '-;/?!:@&=+$,_.~*\'()[]':
+                end += 1
+            else:
+                if start < end:
+                    chunks.append(prefix[start:end])
+                start = end = end+1
+                data = ch.encode('utf-8')
+                for ch in data:
+                    chunks.append('%%%02X' % ord(ch))
+        if start < end:
+            chunks.append(prefix[start:end])
+        return ''.join(chunks)
+
+    def prepare_tag(self, tag):
+        if not tag:
+            raise EmitterError("tag must not be empty")
+        if tag == '!':
+            return tag
+        handle = None
+        suffix = tag
+        prefixes = sorted(self.tag_prefixes.keys())
+        for prefix in prefixes:
+            if tag.startswith(prefix)   \
+                    and (prefix == '!' or len(prefix) < len(tag)):
+                handle = self.tag_prefixes[prefix]
+                suffix = tag[len(prefix):]
+        chunks = []
+        start = end = 0
+        while end < len(suffix):
+            ch = suffix[end]
+            if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+                    or ch in '-;/?:@&=+$,_.~*\'()[]'   \
+                    or (ch == '!' and handle != '!'):
+                end += 1
+            else:
+                if start < end:
+                    chunks.append(suffix[start:end])
+                start = end = end+1
+                data = ch.encode('utf-8')
+                for ch in data:
+                    chunks.append('%%%02X' % ch)
+        if start < end:
+            chunks.append(suffix[start:end])
+        suffix_text = ''.join(chunks)
+        if handle:
+            return '%s%s' % (handle, suffix_text)
+        else:
+            return '!<%s>' % suffix_text
+
+    def prepare_anchor(self, anchor):
+        if not anchor:
+            raise EmitterError("anchor must not be empty")
+        for ch in anchor:
+            if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'    \
+                    or ch in '-_'):
+                raise EmitterError("invalid character %r in the anchor: %r"
+                        % (ch, anchor))
+        return anchor
+
+    def analyze_scalar(self, scalar):
+
+        # Empty scalar is a special case.
+        if not scalar:
+            return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+                    allow_flow_plain=False, allow_block_plain=True,
+                    allow_single_quoted=True, allow_double_quoted=True,
+                    allow_block=False)
+
+        # Indicators and special characters.
+        block_indicators = False
+        flow_indicators = False
+        line_breaks = False
+        special_characters = False
+
+        # Important whitespace combinations.
+        leading_space = False
+        leading_break = False
+        trailing_space = False
+        trailing_break = False
+        break_space = False
+        space_break = False
+
+        # Check document indicators.
+        if scalar.startswith('---') or scalar.startswith('...'):
+            block_indicators = True
+            flow_indicators = True
+
+        # First character or preceded by a whitespace.
+        preceded_by_whitespace = True
+
+        # Last character or followed by a whitespace.
+        followed_by_whitespace = (len(scalar) == 1 or
+                scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+        # The previous character is a space.
+        previous_space = False
+
+        # The previous character is a break.
+        previous_break = False
+
+        index = 0
+        while index < len(scalar):
+            ch = scalar[index]
+
+            # Check for indicators.
+            if index == 0:
+                # Leading indicators are special characters.
+                if ch in '#,[]{}&*!|>\'\"%@`':
+                    flow_indicators = True
+                    block_indicators = True
+                if ch in '?:':
+                    flow_indicators = True
+                    if followed_by_whitespace:
+                        block_indicators = True
+                if ch == '-' and followed_by_whitespace:
+                    flow_indicators = True
+                    block_indicators = True
+            else:
+                # Some indicators cannot appear within a scalar as well.
+                if ch in ',?[]{}':
+                    flow_indicators = True
+                if ch == ':':
+                    flow_indicators = True
+                    if followed_by_whitespace:
+                        block_indicators = True
+                if ch == '#' and preceded_by_whitespace:
+                    flow_indicators = True
+                    block_indicators = True
+
+            # Check for line breaks, special, and unicode characters.
+            if ch in '\n\x85\u2028\u2029':
+                line_breaks = True
+            if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+                if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+                        or '\uE000' <= ch <= '\uFFFD'
+                        or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF':
+                    unicode_characters = True
+                    if not self.allow_unicode:
+                        special_characters = True
+                else:
+                    special_characters = True
+
+            # Detect important whitespace combinations.
+            if ch == ' ':
+                if index == 0:
+                    leading_space = True
+                if index == len(scalar)-1:
+                    trailing_space = True
+                if previous_break:
+                    break_space = True
+                previous_space = True
+                previous_break = False
+            elif ch in '\n\x85\u2028\u2029':
+                if index == 0:
+                    leading_break = True
+                if index == len(scalar)-1:
+                    trailing_break = True
+                if previous_space:
+                    space_break = True
+                previous_space = False
+                previous_break = True
+            else:
+                previous_space = False
+                previous_break = False
+
+            # Prepare for the next character.
+            index += 1
+            preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+            followed_by_whitespace = (index+1 >= len(scalar) or
+                    scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+        # Let's decide what styles are allowed.
+        allow_flow_plain = True
+        allow_block_plain = True
+        allow_single_quoted = True
+        allow_double_quoted = True
+        allow_block = True
+
+        # Leading and trailing whitespaces are bad for plain scalars.
+        if (leading_space or leading_break
+                or trailing_space or trailing_break):
+            allow_flow_plain = allow_block_plain = False
+
+        # We do not permit trailing spaces for block scalars.
+        if trailing_space:
+            allow_block = False
+
+        # Spaces at the beginning of a new line are only acceptable for block
+        # scalars.
+        if break_space:
+            allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+        # Spaces followed by breaks, as well as special character are only
+        # allowed for double quoted scalars.
+        if space_break or special_characters:
+            allow_flow_plain = allow_block_plain =  \
+            allow_single_quoted = allow_block = False
+
+        # Although the plain scalar writer supports breaks, we never emit
+        # multiline plain scalars.
+        if line_breaks:
+            allow_flow_plain = allow_block_plain = False
+
+        # Flow indicators are forbidden for flow plain scalars.
+        if flow_indicators:
+            allow_flow_plain = False
+
+        # Block indicators are forbidden for block plain scalars.
+        if block_indicators:
+            allow_block_plain = False
+
+        return ScalarAnalysis(scalar=scalar,
+                empty=False, multiline=line_breaks,
+                allow_flow_plain=allow_flow_plain,
+                allow_block_plain=allow_block_plain,
+                allow_single_quoted=allow_single_quoted,
+                allow_double_quoted=allow_double_quoted,
+                allow_block=allow_block)
+
+    # Writers.
+
+    def flush_stream(self):
+        if hasattr(self.stream, 'flush'):
+            self.stream.flush()
+
+    def write_stream_start(self):
+        # Write BOM if needed.
+        if self.encoding and self.encoding.startswith('utf-16'):
+            self.stream.write('\uFEFF'.encode(self.encoding))
+
+    def write_stream_end(self):
+        self.flush_stream()
+
+    def write_indicator(self, indicator, need_whitespace,
+            whitespace=False, indention=False):
+        if self.whitespace or not need_whitespace:
+            data = indicator
+        else:
+            data = ' '+indicator
+        self.whitespace = whitespace
+        self.indention = self.indention and indention
+        self.column += len(data)
+        self.open_ended = False
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+
+    def write_indent(self):
+        indent = self.indent or 0
+        if not self.indention or self.column > indent   \
+                or (self.column == indent and not self.whitespace):
+            self.write_line_break()
+        if self.column < indent:
+            self.whitespace = True
+            data = ' '*(indent-self.column)
+            self.column = indent
+            if self.encoding:
+                data = data.encode(self.encoding)
+            self.stream.write(data)
+
+    def write_line_break(self, data=None):
+        if data is None:
+            data = self.best_line_break
+        self.whitespace = True
+        self.indention = True
+        self.line += 1
+        self.column = 0
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+
+    def write_version_directive(self, version_text):
+        data = '%%YAML %s' % version_text
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+        self.write_line_break()
+
+    def write_tag_directive(self, handle_text, prefix_text):
+        data = '%%TAG %s %s' % (handle_text, prefix_text)
+        if self.encoding:
+            data = data.encode(self.encoding)
+        self.stream.write(data)
+        self.write_line_break()
+
+    # Scalar streams.
+
+    def write_single_quoted(self, text, split=True):
+        self.write_indicator('\'', True)
+        spaces = False
+        breaks = False
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if spaces:
+                if ch is None or ch != ' ':
+                    if start+1 == end and self.column > self.best_width and split   \
+                            and start != 0 and end != len(text):
+                        self.write_indent()
+                    else:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if self.encoding:
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                    start = end
+            elif breaks:
+                if ch is None or ch not in '\n\x85\u2028\u2029':
+                    if text[start] == '\n':
+                        self.write_line_break()
+                    for br in text[start:end]:
+                        if br == '\n':
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    self.write_indent()
+                    start = end
+            else:
+                if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+                    if start < end:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if self.encoding:
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                        start = end
+            if ch == '\'':
+                data = '\'\''
+                self.column += 2
+                if self.encoding:
+                    data = data.encode(self.encoding)
+                self.stream.write(data)
+                start = end + 1
+            if ch is not None:
+                spaces = (ch == ' ')
+                breaks = (ch in '\n\x85\u2028\u2029')
+            end += 1
+        self.write_indicator('\'', False)
+
+    ESCAPE_REPLACEMENTS = {
+        '\0':       '0',
+        '\x07':     'a',
+        '\x08':     'b',
+        '\x09':     't',
+        '\x0A':     'n',
+        '\x0B':     'v',
+        '\x0C':     'f',
+        '\x0D':     'r',
+        '\x1B':     'e',
+        '\"':       '\"',
+        '\\':       '\\',
+        '\x85':     'N',
+        '\xA0':     '_',
+        '\u2028':   'L',
+        '\u2029':   'P',
+    }
+
+    def write_double_quoted(self, text, split=True):
+        self.write_indicator('"', True)
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+                    or not ('\x20' <= ch <= '\x7E'
+                        or (self.allow_unicode
+                            and ('\xA0' <= ch <= '\uD7FF'
+                                or '\uE000' <= ch <= '\uFFFD'))):
+                if start < end:
+                    data = text[start:end]
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    start = end
+                if ch is not None:
+                    if ch in self.ESCAPE_REPLACEMENTS:
+                        data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+                    elif ch <= '\xFF':
+                        data = '\\x%02X' % ord(ch)
+                    elif ch <= '\uFFFF':
+                        data = '\\u%04X' % ord(ch)
+                    else:
+                        data = '\\U%08X' % ord(ch)
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    start = end+1
+            if 0 < end < len(text)-1 and (ch == ' ' or start >= end)    \
+                    and self.column+(end-start) > self.best_width and split:
+                data = text[start:end]+'\\'
+                if start < end:
+                    start = end
+                self.column += len(data)
+                if self.encoding:
+                    data = data.encode(self.encoding)
+                self.stream.write(data)
+                self.write_indent()
+                self.whitespace = False
+                self.indention = False
+                if text[start] == ' ':
+                    data = '\\'
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+            end += 1
+        self.write_indicator('"', False)
+
+    def determine_block_hints(self, text):
+        hints = ''
+        if text:
+            if text[0] in ' \n\x85\u2028\u2029':
+                hints += str(self.best_indent)
+            if text[-1] not in '\n\x85\u2028\u2029':
+                hints += '-'
+            elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+                hints += '+'
+        return hints
+
+    def write_folded(self, text):
+        hints = self.determine_block_hints(text)
+        self.write_indicator('>'+hints, True)
+        if hints[-1:] == '+':
+            self.open_ended = True
+        self.write_line_break()
+        leading_space = True
+        spaces = False
+        breaks = True
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if breaks:
+                if ch is None or ch not in '\n\x85\u2028\u2029':
+                    if not leading_space and ch is not None and ch != ' '   \
+                            and text[start] == '\n':
+                        self.write_line_break()
+                    leading_space = (ch == ' ')
+                    for br in text[start:end]:
+                        if br == '\n':
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    if ch is not None:
+                        self.write_indent()
+                    start = end
+            elif spaces:
+                if ch != ' ':
+                    if start+1 == end and self.column > self.best_width:
+                        self.write_indent()
+                    else:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if self.encoding:
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                    start = end
+            else:
+                if ch is None or ch in ' \n\x85\u2028\u2029':
+                    data = text[start:end]
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    if ch is None:
+                        self.write_line_break()
+                    start = end
+            if ch is not None:
+                breaks = (ch in '\n\x85\u2028\u2029')
+                spaces = (ch == ' ')
+            end += 1
+
+    def write_literal(self, text):
+        hints = self.determine_block_hints(text)
+        self.write_indicator('|'+hints, True)
+        if hints[-1:] == '+':
+            self.open_ended = True
+        self.write_line_break()
+        breaks = True
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if breaks:
+                if ch is None or ch not in '\n\x85\u2028\u2029':
+                    for br in text[start:end]:
+                        if br == '\n':
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    if ch is not None:
+                        self.write_indent()
+                    start = end
+            else:
+                if ch is None or ch in '\n\x85\u2028\u2029':
+                    data = text[start:end]
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    if ch is None:
+                        self.write_line_break()
+                    start = end
+            if ch is not None:
+                breaks = (ch in '\n\x85\u2028\u2029')
+            end += 1
+
+    def write_plain(self, text, split=True):
+        if self.root_context:
+            self.open_ended = True
+        if not text:
+            return
+        if not self.whitespace:
+            data = ' '
+            self.column += len(data)
+            if self.encoding:
+                data = data.encode(self.encoding)
+            self.stream.write(data)
+        self.whitespace = False
+        self.indention = False
+        spaces = False
+        breaks = False
+        start = end = 0
+        while end <= len(text):
+            ch = None
+            if end < len(text):
+                ch = text[end]
+            if spaces:
+                if ch != ' ':
+                    if start+1 == end and self.column > self.best_width and split:
+                        self.write_indent()
+                        self.whitespace = False
+                        self.indention = False
+                    else:
+                        data = text[start:end]
+                        self.column += len(data)
+                        if self.encoding:
+                            data = data.encode(self.encoding)
+                        self.stream.write(data)
+                    start = end
+            elif breaks:
+                if ch not in '\n\x85\u2028\u2029':
+                    if text[start] == '\n':
+                        self.write_line_break()
+                    for br in text[start:end]:
+                        if br == '\n':
+                            self.write_line_break()
+                        else:
+                            self.write_line_break(br)
+                    self.write_indent()
+                    self.whitespace = False
+                    self.indention = False
+                    start = end
+            else:
+                if ch is None or ch in ' \n\x85\u2028\u2029':
+                    data = text[start:end]
+                    self.column += len(data)
+                    if self.encoding:
+                        data = data.encode(self.encoding)
+                    self.stream.write(data)
+                    start = end
+            if ch is not None:
+                spaces = (ch == ' ')
+                breaks = (ch in '\n\x85\u2028\u2029')
+            end += 1
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/error.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/error.py
new file mode 100644
index 0000000000000000000000000000000000000000..b796b4dc519512c4825ff539a2e6aa20f4d370d0
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+    def __init__(self, name, index, line, column, buffer, pointer):
+        self.name = name
+        self.index = index
+        self.line = line
+        self.column = column
+        self.buffer = buffer
+        self.pointer = pointer
+
+    def get_snippet(self, indent=4, max_length=75):
+        if self.buffer is None:
+            return None
+        head = ''
+        start = self.pointer
+        while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+            start -= 1
+            if self.pointer-start > max_length/2-1:
+                head = ' ... '
+                start += 5
+                break
+        tail = ''
+        end = self.pointer
+        while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+            end += 1
+            if end-self.pointer > max_length/2-1:
+                tail = ' ... '
+                end -= 5
+                break
+        snippet = self.buffer[start:end]
+        return ' '*indent + head + snippet + tail + '\n'  \
+                + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+    def __str__(self):
+        snippet = self.get_snippet()
+        where = "  in \"%s\", line %d, column %d"   \
+                % (self.name, self.line+1, self.column+1)
+        if snippet is not None:
+            where += ":\n"+snippet
+        return where
+
+class YAMLError(Exception):
+    pass
+
+class MarkedYAMLError(YAMLError):
+
+    def __init__(self, context=None, context_mark=None,
+            problem=None, problem_mark=None, note=None):
+        self.context = context
+        self.context_mark = context_mark
+        self.problem = problem
+        self.problem_mark = problem_mark
+        self.note = note
+
+    def __str__(self):
+        lines = []
+        if self.context is not None:
+            lines.append(self.context)
+        if self.context_mark is not None  \
+            and (self.problem is None or self.problem_mark is None
+                    or self.context_mark.name != self.problem_mark.name
+                    or self.context_mark.line != self.problem_mark.line
+                    or self.context_mark.column != self.problem_mark.column):
+            lines.append(str(self.context_mark))
+        if self.problem is not None:
+            lines.append(self.problem)
+        if self.problem_mark is not None:
+            lines.append(str(self.problem_mark))
+        if self.note is not None:
+            lines.append(self.note)
+        return '\n'.join(lines)
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/events.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/events.py
new file mode 100644
index 0000000000000000000000000000000000000000..f79ad389cb6c9517e391dcd25534866bc9ccd36a
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+    def __init__(self, start_mark=None, end_mark=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+    def __repr__(self):
+        attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+                if hasattr(self, key)]
+        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+                for key in attributes])
+        return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+    def __init__(self, anchor, start_mark=None, end_mark=None):
+        self.anchor = anchor
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+    def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+            flow_style=None):
+        self.anchor = anchor
+        self.tag = tag
+        self.implicit = implicit
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+    pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+    def __init__(self, start_mark=None, end_mark=None, encoding=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.encoding = encoding
+
+class StreamEndEvent(Event):
+    pass
+
+class DocumentStartEvent(Event):
+    def __init__(self, start_mark=None, end_mark=None,
+            explicit=None, version=None, tags=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.explicit = explicit
+        self.version = version
+        self.tags = tags
+
+class DocumentEndEvent(Event):
+    def __init__(self, start_mark=None, end_mark=None,
+            explicit=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+    pass
+
+class ScalarEvent(NodeEvent):
+    def __init__(self, anchor, tag, implicit, value,
+            start_mark=None, end_mark=None, style=None):
+        self.anchor = anchor
+        self.tag = tag
+        self.implicit = implicit
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+    pass
+
+class SequenceEndEvent(CollectionEndEvent):
+    pass
+
+class MappingStartEvent(CollectionStartEvent):
+    pass
+
+class MappingEndEvent(CollectionEndEvent):
+    pass
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/loader.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..e90c11224c38e559cdf0cb205f0692ebd4fb8681
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/loader.py
@@ -0,0 +1,63 @@
+
+__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+    def __init__(self, stream):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        BaseConstructor.__init__(self)
+        BaseResolver.__init__(self)
+
+class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
+
+    def __init__(self, stream):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        FullConstructor.__init__(self)
+        Resolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+    def __init__(self, stream):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        SafeConstructor.__init__(self)
+        Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+    def __init__(self, stream):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        Constructor.__init__(self)
+        Resolver.__init__(self)
+
+# UnsafeLoader is the same as Loader (which is and was always unsafe on
+# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
+# FullLoad should be able to load almost all YAML safely. Loader is left intact
+# to ensure backwards compatibility.
+class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+    def __init__(self, stream):
+        Reader.__init__(self, stream)
+        Scanner.__init__(self)
+        Parser.__init__(self)
+        Composer.__init__(self)
+        Constructor.__init__(self)
+        Resolver.__init__(self)
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/nodes.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4f070c41e1fb1bc01af27d69329e92dded38908
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+    def __init__(self, tag, value, start_mark, end_mark):
+        self.tag = tag
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+    def __repr__(self):
+        value = self.value
+        #if isinstance(value, list):
+        #    if len(value) == 0:
+        #        value = '<empty>'
+        #    elif len(value) == 1:
+        #        value = '<1 item>'
+        #    else:
+        #        value = '<%d items>' % len(value)
+        #else:
+        #    if len(value) > 75:
+        #        value = repr(value[:70]+u' ... ')
+        #    else:
+        #        value = repr(value)
+        value = repr(value)
+        return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+    id = 'scalar'
+    def __init__(self, tag, value,
+            start_mark=None, end_mark=None, style=None):
+        self.tag = tag
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.style = style
+
+class CollectionNode(Node):
+    def __init__(self, tag, value,
+            start_mark=None, end_mark=None, flow_style=None):
+        self.tag = tag
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+    id = 'sequence'
+
+class MappingNode(CollectionNode):
+    id = 'mapping'
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/parser.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..13a5995d292045d0f865a99abf692bd35dc87814
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream            ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+#                       ALIAS
+#                       | properties (block_content | indentless_block_sequence)?
+#                       | block_content
+#                       | indentless_block_sequence
+# block_node        ::= ALIAS
+#                       | properties block_content?
+#                       | block_content
+# flow_node         ::= ALIAS
+#                       | properties flow_content?
+#                       | flow_content
+# properties        ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content     ::= block_collection | flow_collection | SCALAR
+# flow_content      ::= flow_collection | SCALAR
+# block_collection  ::= block_sequence | block_mapping
+# flow_collection   ::= flow_sequence | flow_mapping
+# block_sequence    ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence   ::= (BLOCK-ENTRY block_node?)+
+# block_mapping     ::= BLOCK-MAPPING_START
+#                       ((KEY block_node_or_indentless_sequence?)?
+#                       (VALUE block_node_or_indentless_sequence?)?)*
+#                       BLOCK-END
+# flow_sequence     ::= FLOW-SEQUENCE-START
+#                       (flow_sequence_entry FLOW-ENTRY)*
+#                       flow_sequence_entry?
+#                       FLOW-SEQUENCE-END
+# flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping      ::= FLOW-MAPPING-START
+#                       (flow_mapping_entry FLOW-ENTRY)*
+#                       flow_mapping_entry?
+#                       FLOW-MAPPING-END
+# flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+    pass
+
+class Parser:
+    # Since writing a recursive-descendant parser is a straightforward task, we
+    # do not give many comments here.
+
+    DEFAULT_TAGS = {
+        '!':   '!',
+        '!!':  'tag:yaml.org,2002:',
+    }
+
+    def __init__(self):
+        self.current_event = None
+        self.yaml_version = None
+        self.tag_handles = {}
+        self.states = []
+        self.marks = []
+        self.state = self.parse_stream_start
+
+    def dispose(self):
+        # Reset the state attributes (to clear self-references)
+        self.states = []
+        self.state = None
+
+    def check_event(self, *choices):
+        # Check the type of the next event.
+        if self.current_event is None:
+            if self.state:
+                self.current_event = self.state()
+        if self.current_event is not None:
+            if not choices:
+                return True
+            for choice in choices:
+                if isinstance(self.current_event, choice):
+                    return True
+        return False
+
+    def peek_event(self):
+        # Get the next event.
+        if self.current_event is None:
+            if self.state:
+                self.current_event = self.state()
+        return self.current_event
+
+    def get_event(self):
+        # Get the next event and proceed further.
+        if self.current_event is None:
+            if self.state:
+                self.current_event = self.state()
+        value = self.current_event
+        self.current_event = None
+        return value
+
+    # stream    ::= STREAM-START implicit_document? explicit_document* STREAM-END
+    # implicit_document ::= block_node DOCUMENT-END*
+    # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+    def parse_stream_start(self):
+
+        # Parse the stream start.
+        token = self.get_token()
+        event = StreamStartEvent(token.start_mark, token.end_mark,
+                encoding=token.encoding)
+
+        # Prepare the next state.
+        self.state = self.parse_implicit_document_start
+
+        return event
+
+    def parse_implicit_document_start(self):
+
+        # Parse an implicit document.
+        if not self.check_token(DirectiveToken, DocumentStartToken,
+                StreamEndToken):
+            self.tag_handles = self.DEFAULT_TAGS
+            token = self.peek_token()
+            start_mark = end_mark = token.start_mark
+            event = DocumentStartEvent(start_mark, end_mark,
+                    explicit=False)
+
+            # Prepare the next state.
+            self.states.append(self.parse_document_end)
+            self.state = self.parse_block_node
+
+            return event
+
+        else:
+            return self.parse_document_start()
+
+    def parse_document_start(self):
+
+        # Parse any extra document end indicators.
+        while self.check_token(DocumentEndToken):
+            self.get_token()
+
+        # Parse an explicit document.
+        if not self.check_token(StreamEndToken):
+            token = self.peek_token()
+            start_mark = token.start_mark
+            version, tags = self.process_directives()
+            if not self.check_token(DocumentStartToken):
+                raise ParserError(None, None,
+                        "expected '<document start>', but found %r"
+                        % self.peek_token().id,
+                        self.peek_token().start_mark)
+            token = self.get_token()
+            end_mark = token.end_mark
+            event = DocumentStartEvent(start_mark, end_mark,
+                    explicit=True, version=version, tags=tags)
+            self.states.append(self.parse_document_end)
+            self.state = self.parse_document_content
+        else:
+            # Parse the end of the stream.
+            token = self.get_token()
+            event = StreamEndEvent(token.start_mark, token.end_mark)
+            assert not self.states
+            assert not self.marks
+            self.state = None
+        return event
+
+    def parse_document_end(self):
+
+        # Parse the document end.
+        token = self.peek_token()
+        start_mark = end_mark = token.start_mark
+        explicit = False
+        if self.check_token(DocumentEndToken):
+            token = self.get_token()
+            end_mark = token.end_mark
+            explicit = True
+        event = DocumentEndEvent(start_mark, end_mark,
+                explicit=explicit)
+
+        # Prepare the next state.
+        self.state = self.parse_document_start
+
+        return event
+
+    def parse_document_content(self):
+        if self.check_token(DirectiveToken,
+                DocumentStartToken, DocumentEndToken, StreamEndToken):
+            event = self.process_empty_scalar(self.peek_token().start_mark)
+            self.state = self.states.pop()
+            return event
+        else:
+            return self.parse_block_node()
+
+    def process_directives(self):
+        self.yaml_version = None
+        self.tag_handles = {}
+        while self.check_token(DirectiveToken):
+            token = self.get_token()
+            if token.name == 'YAML':
+                if self.yaml_version is not None:
+                    raise ParserError(None, None,
+                            "found duplicate YAML directive", token.start_mark)
+                major, minor = token.value
+                if major != 1:
+                    raise ParserError(None, None,
+                            "found incompatible YAML document (version 1.* is required)",
+                            token.start_mark)
+                self.yaml_version = token.value
+            elif token.name == 'TAG':
+                handle, prefix = token.value
+                if handle in self.tag_handles:
+                    raise ParserError(None, None,
+                            "duplicate tag handle %r" % handle,
+                            token.start_mark)
+                self.tag_handles[handle] = prefix
+        if self.tag_handles:
+            value = self.yaml_version, self.tag_handles.copy()
+        else:
+            value = self.yaml_version, None
+        for key in self.DEFAULT_TAGS:
+            if key not in self.tag_handles:
+                self.tag_handles[key] = self.DEFAULT_TAGS[key]
+        return value
+
+    # block_node_or_indentless_sequence ::= ALIAS
+    #               | properties (block_content | indentless_block_sequence)?
+    #               | block_content
+    #               | indentless_block_sequence
+    # block_node    ::= ALIAS
+    #                   | properties block_content?
+    #                   | block_content
+    # flow_node     ::= ALIAS
+    #                   | properties flow_content?
+    #                   | flow_content
+    # properties    ::= TAG ANCHOR? | ANCHOR TAG?
+    # block_content     ::= block_collection | flow_collection | SCALAR
+    # flow_content      ::= flow_collection | SCALAR
+    # block_collection  ::= block_sequence | block_mapping
+    # flow_collection   ::= flow_sequence | flow_mapping
+
+    def parse_block_node(self):
+        return self.parse_node(block=True)
+
+    def parse_flow_node(self):
+        return self.parse_node()
+
+    def parse_block_node_or_indentless_sequence(self):
+        return self.parse_node(block=True, indentless_sequence=True)
+
+    def parse_node(self, block=False, indentless_sequence=False):
+        if self.check_token(AliasToken):
+            token = self.get_token()
+            event = AliasEvent(token.value, token.start_mark, token.end_mark)
+            self.state = self.states.pop()
+        else:
+            anchor = None
+            tag = None
+            start_mark = end_mark = tag_mark = None
+            if self.check_token(AnchorToken):
+                token = self.get_token()
+                start_mark = token.start_mark
+                end_mark = token.end_mark
+                anchor = token.value
+                if self.check_token(TagToken):
+                    token = self.get_token()
+                    tag_mark = token.start_mark
+                    end_mark = token.end_mark
+                    tag = token.value
+            elif self.check_token(TagToken):
+                token = self.get_token()
+                start_mark = tag_mark = token.start_mark
+                end_mark = token.end_mark
+                tag = token.value
+                if self.check_token(AnchorToken):
+                    token = self.get_token()
+                    end_mark = token.end_mark
+                    anchor = token.value
+            if tag is not None:
+                handle, suffix = tag
+                if handle is not None:
+                    if handle not in self.tag_handles:
+                        raise ParserError("while parsing a node", start_mark,
+                                "found undefined tag handle %r" % handle,
+                                tag_mark)
+                    tag = self.tag_handles[handle]+suffix
+                else:
+                    tag = suffix
+            #if tag == '!':
+            #    raise ParserError("while parsing a node", start_mark,
+            #            "found non-specific tag '!'", tag_mark,
+            #            "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+            if start_mark is None:
+                start_mark = end_mark = self.peek_token().start_mark
+            event = None
+            implicit = (tag is None or tag == '!')
+            if indentless_sequence and self.check_token(BlockEntryToken):
+                end_mark = self.peek_token().end_mark
+                event = SequenceStartEvent(anchor, tag, implicit,
+                        start_mark, end_mark)
+                self.state = self.parse_indentless_sequence_entry
+            else:
+                if self.check_token(ScalarToken):
+                    token = self.get_token()
+                    end_mark = token.end_mark
+                    if (token.plain and tag is None) or tag == '!':
+                        implicit = (True, False)
+                    elif tag is None:
+                        implicit = (False, True)
+                    else:
+                        implicit = (False, False)
+                    event = ScalarEvent(anchor, tag, implicit, token.value,
+                            start_mark, end_mark, style=token.style)
+                    self.state = self.states.pop()
+                elif self.check_token(FlowSequenceStartToken):
+                    end_mark = self.peek_token().end_mark
+                    event = SequenceStartEvent(anchor, tag, implicit,
+                            start_mark, end_mark, flow_style=True)
+                    self.state = self.parse_flow_sequence_first_entry
+                elif self.check_token(FlowMappingStartToken):
+                    end_mark = self.peek_token().end_mark
+                    event = MappingStartEvent(anchor, tag, implicit,
+                            start_mark, end_mark, flow_style=True)
+                    self.state = self.parse_flow_mapping_first_key
+                elif block and self.check_token(BlockSequenceStartToken):
+                    end_mark = self.peek_token().start_mark
+                    event = SequenceStartEvent(anchor, tag, implicit,
+                            start_mark, end_mark, flow_style=False)
+                    self.state = self.parse_block_sequence_first_entry
+                elif block and self.check_token(BlockMappingStartToken):
+                    end_mark = self.peek_token().start_mark
+                    event = MappingStartEvent(anchor, tag, implicit,
+                            start_mark, end_mark, flow_style=False)
+                    self.state = self.parse_block_mapping_first_key
+                elif anchor is not None or tag is not None:
+                    # Empty scalars are allowed even if a tag or an anchor is
+                    # specified.
+                    event = ScalarEvent(anchor, tag, (implicit, False), '',
+                            start_mark, end_mark)
+                    self.state = self.states.pop()
+                else:
+                    if block:
+                        node = 'block'
+                    else:
+                        node = 'flow'
+                    token = self.peek_token()
+                    raise ParserError("while parsing a %s node" % node, start_mark,
+                            "expected the node content, but found %r" % token.id,
+                            token.start_mark)
+        return event
+
+    # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+    def parse_block_sequence_first_entry(self):
+        token = self.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_block_sequence_entry()
+
+    def parse_block_sequence_entry(self):
+        if self.check_token(BlockEntryToken):
+            token = self.get_token()
+            if not self.check_token(BlockEntryToken, BlockEndToken):
+                self.states.append(self.parse_block_sequence_entry)
+                return self.parse_block_node()
+            else:
+                self.state = self.parse_block_sequence_entry
+                return self.process_empty_scalar(token.end_mark)
+        if not self.check_token(BlockEndToken):
+            token = self.peek_token()
+            raise ParserError("while parsing a block collection", self.marks[-1],
+                    "expected <block end>, but found %r" % token.id, token.start_mark)
+        token = self.get_token()
+        event = SequenceEndEvent(token.start_mark, token.end_mark)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+    def parse_indentless_sequence_entry(self):
+        if self.check_token(BlockEntryToken):
+            token = self.get_token()
+            if not self.check_token(BlockEntryToken,
+                    KeyToken, ValueToken, BlockEndToken):
+                self.states.append(self.parse_indentless_sequence_entry)
+                return self.parse_block_node()
+            else:
+                self.state = self.parse_indentless_sequence_entry
+                return self.process_empty_scalar(token.end_mark)
+        token = self.peek_token()
+        event = SequenceEndEvent(token.start_mark, token.start_mark)
+        self.state = self.states.pop()
+        return event
+
+    # block_mapping     ::= BLOCK-MAPPING_START
+    #                       ((KEY block_node_or_indentless_sequence?)?
+    #                       (VALUE block_node_or_indentless_sequence?)?)*
+    #                       BLOCK-END
+
+    def parse_block_mapping_first_key(self):
+        token = self.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_block_mapping_key()
+
+    def parse_block_mapping_key(self):
+        if self.check_token(KeyToken):
+            token = self.get_token()
+            if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+                self.states.append(self.parse_block_mapping_value)
+                return self.parse_block_node_or_indentless_sequence()
+            else:
+                self.state = self.parse_block_mapping_value
+                return self.process_empty_scalar(token.end_mark)
+        if not self.check_token(BlockEndToken):
+            token = self.peek_token()
+            raise ParserError("while parsing a block mapping", self.marks[-1],
+                    "expected <block end>, but found %r" % token.id, token.start_mark)
+        token = self.get_token()
+        event = MappingEndEvent(token.start_mark, token.end_mark)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    def parse_block_mapping_value(self):
+        if self.check_token(ValueToken):
+            token = self.get_token()
+            if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+                self.states.append(self.parse_block_mapping_key)
+                return self.parse_block_node_or_indentless_sequence()
+            else:
+                self.state = self.parse_block_mapping_key
+                return self.process_empty_scalar(token.end_mark)
+        else:
+            self.state = self.parse_block_mapping_key
+            token = self.peek_token()
+            return self.process_empty_scalar(token.start_mark)
+
+    # flow_sequence     ::= FLOW-SEQUENCE-START
+    #                       (flow_sequence_entry FLOW-ENTRY)*
+    #                       flow_sequence_entry?
+    #                       FLOW-SEQUENCE-END
+    # flow_sequence_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+    #
+    # Note that while production rules for both flow_sequence_entry and
+    # flow_mapping_entry are equal, their interpretations are different.
+    # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+    # generate an inline mapping (set syntax).
+
+    def parse_flow_sequence_first_entry(self):
+        token = self.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_flow_sequence_entry(first=True)
+
+    def parse_flow_sequence_entry(self, first=False):
+        if not self.check_token(FlowSequenceEndToken):
+            if not first:
+                if self.check_token(FlowEntryToken):
+                    self.get_token()
+                else:
+                    token = self.peek_token()
+                    raise ParserError("while parsing a flow sequence", self.marks[-1],
+                            "expected ',' or ']', but got %r" % token.id, token.start_mark)
+            
+            if self.check_token(KeyToken):
+                token = self.peek_token()
+                event = MappingStartEvent(None, None, True,
+                        token.start_mark, token.end_mark,
+                        flow_style=True)
+                self.state = self.parse_flow_sequence_entry_mapping_key
+                return event
+            elif not self.check_token(FlowSequenceEndToken):
+                self.states.append(self.parse_flow_sequence_entry)
+                return self.parse_flow_node()
+        token = self.get_token()
+        event = SequenceEndEvent(token.start_mark, token.end_mark)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    def parse_flow_sequence_entry_mapping_key(self):
+        token = self.get_token()
+        if not self.check_token(ValueToken,
+                FlowEntryToken, FlowSequenceEndToken):
+            self.states.append(self.parse_flow_sequence_entry_mapping_value)
+            return self.parse_flow_node()
+        else:
+            self.state = self.parse_flow_sequence_entry_mapping_value
+            return self.process_empty_scalar(token.end_mark)
+
+    def parse_flow_sequence_entry_mapping_value(self):
+        if self.check_token(ValueToken):
+            token = self.get_token()
+            if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+                self.states.append(self.parse_flow_sequence_entry_mapping_end)
+                return self.parse_flow_node()
+            else:
+                self.state = self.parse_flow_sequence_entry_mapping_end
+                return self.process_empty_scalar(token.end_mark)
+        else:
+            self.state = self.parse_flow_sequence_entry_mapping_end
+            token = self.peek_token()
+            return self.process_empty_scalar(token.start_mark)
+
+    def parse_flow_sequence_entry_mapping_end(self):
+        self.state = self.parse_flow_sequence_entry
+        token = self.peek_token()
+        return MappingEndEvent(token.start_mark, token.start_mark)
+
+    # flow_mapping  ::= FLOW-MAPPING-START
+    #                   (flow_mapping_entry FLOW-ENTRY)*
+    #                   flow_mapping_entry?
+    #                   FLOW-MAPPING-END
+    # flow_mapping_entry    ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+    def parse_flow_mapping_first_key(self):
+        token = self.get_token()
+        self.marks.append(token.start_mark)
+        return self.parse_flow_mapping_key(first=True)
+
+    def parse_flow_mapping_key(self, first=False):
+        if not self.check_token(FlowMappingEndToken):
+            if not first:
+                if self.check_token(FlowEntryToken):
+                    self.get_token()
+                else:
+                    token = self.peek_token()
+                    raise ParserError("while parsing a flow mapping", self.marks[-1],
+                            "expected ',' or '}', but got %r" % token.id, token.start_mark)
+            if self.check_token(KeyToken):
+                token = self.get_token()
+                if not self.check_token(ValueToken,
+                        FlowEntryToken, FlowMappingEndToken):
+                    self.states.append(self.parse_flow_mapping_value)
+                    return self.parse_flow_node()
+                else:
+                    self.state = self.parse_flow_mapping_value
+                    return self.process_empty_scalar(token.end_mark)
+            elif not self.check_token(FlowMappingEndToken):
+                self.states.append(self.parse_flow_mapping_empty_value)
+                return self.parse_flow_node()
+        token = self.get_token()
+        event = MappingEndEvent(token.start_mark, token.end_mark)
+        self.state = self.states.pop()
+        self.marks.pop()
+        return event
+
+    def parse_flow_mapping_value(self):
+        if self.check_token(ValueToken):
+            token = self.get_token()
+            if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+                self.states.append(self.parse_flow_mapping_key)
+                return self.parse_flow_node()
+            else:
+                self.state = self.parse_flow_mapping_key
+                return self.process_empty_scalar(token.end_mark)
+        else:
+            self.state = self.parse_flow_mapping_key
+            token = self.peek_token()
+            return self.process_empty_scalar(token.start_mark)
+
+    def parse_flow_mapping_empty_value(self):
+        self.state = self.parse_flow_mapping_key
+        return self.process_empty_scalar(self.peek_token().start_mark)
+
+    def process_empty_scalar(self, mark):
+        return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/reader.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..774b0219b5932a0ee1c27e637371de5ba8d9cb16
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/reader.py
@@ -0,0 +1,185 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+#   Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+#   Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+#   reader.peek(length=1) - return the next `length` characters
+#   reader.forward(length=1) - move the current position to `length` characters.
+#   reader.index - the number of the current character.
+#   reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+    def __init__(self, name, position, character, encoding, reason):
+        self.name = name
+        self.character = character
+        self.position = position
+        self.encoding = encoding
+        self.reason = reason
+
+    def __str__(self):
+        if isinstance(self.character, bytes):
+            return "'%s' codec can't decode byte #x%02x: %s\n"  \
+                    "  in \"%s\", position %d"    \
+                    % (self.encoding, ord(self.character), self.reason,
+                            self.name, self.position)
+        else:
+            return "unacceptable character #x%04x: %s\n"    \
+                    "  in \"%s\", position %d"    \
+                    % (self.character, self.reason,
+                            self.name, self.position)
+
+class Reader(object):
+    # Reader:
+    # - determines the data encoding and converts it to a unicode string,
+    # - checks if characters are in allowed range,
+    # - adds '\0' to the end.
+
+    # Reader accepts
+    #  - a `bytes` object,
+    #  - a `str` object,
+    #  - a file-like object with its `read` method returning `str`,
+    #  - a file-like object with its `read` method returning `unicode`.
+
+    # Yeah, it's ugly and slow.
+
+    def __init__(self, stream):
+        self.name = None
+        self.stream = None
+        self.stream_pointer = 0
+        self.eof = True
+        self.buffer = ''
+        self.pointer = 0
+        self.raw_buffer = None
+        self.raw_decode = None
+        self.encoding = None
+        self.index = 0
+        self.line = 0
+        self.column = 0
+        if isinstance(stream, str):
+            self.name = "<unicode string>"
+            self.check_printable(stream)
+            self.buffer = stream+'\0'
+        elif isinstance(stream, bytes):
+            self.name = "<byte string>"
+            self.raw_buffer = stream
+            self.determine_encoding()
+        else:
+            self.stream = stream
+            self.name = getattr(stream, 'name', "<file>")
+            self.eof = False
+            self.raw_buffer = None
+            self.determine_encoding()
+
+    def peek(self, index=0):
+        try:
+            return self.buffer[self.pointer+index]
+        except IndexError:
+            self.update(index+1)
+            return self.buffer[self.pointer+index]
+
+    def prefix(self, length=1):
+        if self.pointer+length >= len(self.buffer):
+            self.update(length)
+        return self.buffer[self.pointer:self.pointer+length]
+
+    def forward(self, length=1):
+        if self.pointer+length+1 >= len(self.buffer):
+            self.update(length+1)
+        while length:
+            ch = self.buffer[self.pointer]
+            self.pointer += 1
+            self.index += 1
+            if ch in '\n\x85\u2028\u2029'  \
+                    or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+                self.line += 1
+                self.column = 0
+            elif ch != '\uFEFF':
+                self.column += 1
+            length -= 1
+
+    def get_mark(self):
+        if self.stream is None:
+            return Mark(self.name, self.index, self.line, self.column,
+                    self.buffer, self.pointer)
+        else:
+            return Mark(self.name, self.index, self.line, self.column,
+                    None, None)
+
+    def determine_encoding(self):
+        while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+            self.update_raw()
+        if isinstance(self.raw_buffer, bytes):
+            if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+                self.raw_decode = codecs.utf_16_le_decode
+                self.encoding = 'utf-16-le'
+            elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+                self.raw_decode = codecs.utf_16_be_decode
+                self.encoding = 'utf-16-be'
+            else:
+                self.raw_decode = codecs.utf_8_decode
+                self.encoding = 'utf-8'
+        self.update(1)
+
+    NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
+    def check_printable(self, data):
+        match = self.NON_PRINTABLE.search(data)
+        if match:
+            character = match.group()
+            position = self.index+(len(self.buffer)-self.pointer)+match.start()
+            raise ReaderError(self.name, position, ord(character),
+                    'unicode', "special characters are not allowed")
+
+    def update(self, length):
+        if self.raw_buffer is None:
+            return
+        self.buffer = self.buffer[self.pointer:]
+        self.pointer = 0
+        while len(self.buffer) < length:
+            if not self.eof:
+                self.update_raw()
+            if self.raw_decode is not None:
+                try:
+                    data, converted = self.raw_decode(self.raw_buffer,
+                            'strict', self.eof)
+                except UnicodeDecodeError as exc:
+                    character = self.raw_buffer[exc.start]
+                    if self.stream is not None:
+                        position = self.stream_pointer-len(self.raw_buffer)+exc.start
+                    else:
+                        position = exc.start
+                    raise ReaderError(self.name, position, character,
+                            exc.encoding, exc.reason)
+            else:
+                data = self.raw_buffer
+                converted = len(data)
+            self.check_printable(data)
+            self.buffer += data
+            self.raw_buffer = self.raw_buffer[converted:]
+            if self.eof:
+                self.buffer += '\0'
+                self.raw_buffer = None
+                break
+
+    def update_raw(self, size=4096):
+        data = self.stream.read(size)
+        if self.raw_buffer is None:
+            self.raw_buffer = data
+        else:
+            self.raw_buffer += data
+        self.stream_pointer += len(data)
+        if not data:
+            self.eof = True
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/representer.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/representer.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b0b192ef32ed7f5b7015456fe883c3327bb841e
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/representer.py
@@ -0,0 +1,389 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+    'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, copyreg, types, base64, collections
+
+class RepresenterError(YAMLError):
+    pass
+
+class BaseRepresenter:
+
+    yaml_representers = {}
+    yaml_multi_representers = {}
+
+    def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
+        self.default_style = default_style
+        self.sort_keys = sort_keys
+        self.default_flow_style = default_flow_style
+        self.represented_objects = {}
+        self.object_keeper = []
+        self.alias_key = None
+
+    def represent(self, data):
+        node = self.represent_data(data)
+        self.serialize(node)
+        self.represented_objects = {}
+        self.object_keeper = []
+        self.alias_key = None
+
+    def represent_data(self, data):
+        if self.ignore_aliases(data):
+            self.alias_key = None
+        else:
+            self.alias_key = id(data)
+        if self.alias_key is not None:
+            if self.alias_key in self.represented_objects:
+                node = self.represented_objects[self.alias_key]
+                #if node is None:
+                #    raise RepresenterError("recursive objects are not allowed: %r" % data)
+                return node
+            #self.represented_objects[alias_key] = None
+            self.object_keeper.append(data)
+        data_types = type(data).__mro__
+        if data_types[0] in self.yaml_representers:
+            node = self.yaml_representers[data_types[0]](self, data)
+        else:
+            for data_type in data_types:
+                if data_type in self.yaml_multi_representers:
+                    node = self.yaml_multi_representers[data_type](self, data)
+                    break
+            else:
+                if None in self.yaml_multi_representers:
+                    node = self.yaml_multi_representers[None](self, data)
+                elif None in self.yaml_representers:
+                    node = self.yaml_representers[None](self, data)
+                else:
+                    node = ScalarNode(None, str(data))
+        #if alias_key is not None:
+        #    self.represented_objects[alias_key] = node
+        return node
+
+    @classmethod
+    def add_representer(cls, data_type, representer):
+        if not 'yaml_representers' in cls.__dict__:
+            cls.yaml_representers = cls.yaml_representers.copy()
+        cls.yaml_representers[data_type] = representer
+
+    @classmethod
+    def add_multi_representer(cls, data_type, representer):
+        if not 'yaml_multi_representers' in cls.__dict__:
+            cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+        cls.yaml_multi_representers[data_type] = representer
+
+    def represent_scalar(self, tag, value, style=None):
+        if style is None:
+            style = self.default_style
+        node = ScalarNode(tag, value, style=style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        return node
+
+    def represent_sequence(self, tag, sequence, flow_style=None):
+        value = []
+        node = SequenceNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        for item in sequence:
+            node_item = self.represent_data(item)
+            if not (isinstance(node_item, ScalarNode) and not node_item.style):
+                best_style = False
+            value.append(node_item)
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def represent_mapping(self, tag, mapping, flow_style=None):
+        value = []
+        node = MappingNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        if hasattr(mapping, 'items'):
+            mapping = list(mapping.items())
+            if self.sort_keys:
+                try:
+                    mapping = sorted(mapping)
+                except TypeError:
+                    pass
+        for item_key, item_value in mapping:
+            node_key = self.represent_data(item_key)
+            node_value = self.represent_data(item_value)
+            if not (isinstance(node_key, ScalarNode) and not node_key.style):
+                best_style = False
+            if not (isinstance(node_value, ScalarNode) and not node_value.style):
+                best_style = False
+            value.append((node_key, node_value))
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+    def ignore_aliases(self, data):
+        return False
+
+class SafeRepresenter(BaseRepresenter):
+
+    def ignore_aliases(self, data):
+        if data is None:
+            return True
+        if isinstance(data, tuple) and data == ():
+            return True
+        if isinstance(data, (str, bytes, bool, int, float)):
+            return True
+
+    def represent_none(self, data):
+        return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+    def represent_str(self, data):
+        return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+    def represent_binary(self, data):
+        if hasattr(base64, 'encodebytes'):
+            data = base64.encodebytes(data).decode('ascii')
+        else:
+            data = base64.encodestring(data).decode('ascii')
+        return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+    def represent_bool(self, data):
+        if data:
+            value = 'true'
+        else:
+            value = 'false'
+        return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+    def represent_int(self, data):
+        return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+    inf_value = 1e300
+    while repr(inf_value) != repr(inf_value*inf_value):
+        inf_value *= inf_value
+
+    def represent_float(self, data):
+        if data != data or (data == 0.0 and data == 1.0):
+            value = '.nan'
+        elif data == self.inf_value:
+            value = '.inf'
+        elif data == -self.inf_value:
+            value = '-.inf'
+        else:
+            value = repr(data).lower()
+            # Note that in some cases `repr(data)` represents a float number
+            # without the decimal parts.  For instance:
+            #   >>> repr(1e17)
+            #   '1e17'
+            # Unfortunately, this is not a valid float representation according
+            # to the definition of the `!!float` tag.  We fix this by adding
+            # '.0' before the 'e' symbol.
+            if '.' not in value and 'e' in value:
+                value = value.replace('e', '.0e', 1)
+        return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+    def represent_list(self, data):
+        #pairs = (len(data) > 0 and isinstance(data, list))
+        #if pairs:
+        #    for item in data:
+        #        if not isinstance(item, tuple) or len(item) != 2:
+        #            pairs = False
+        #            break
+        #if not pairs:
+            return self.represent_sequence('tag:yaml.org,2002:seq', data)
+        #value = []
+        #for item_key, item_value in data:
+        #    value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+        #        [(item_key, item_value)]))
+        #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+    def represent_dict(self, data):
+        return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+    def represent_set(self, data):
+        value = {}
+        for key in data:
+            value[key] = None
+        return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+    def represent_date(self, data):
+        value = data.isoformat()
+        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+    def represent_datetime(self, data):
+        value = data.isoformat(' ')
+        return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+    def represent_yaml_object(self, tag, data, cls, flow_style=None):
+        if hasattr(data, '__getstate__'):
+            state = data.__getstate__()
+        else:
+            state = data.__dict__.copy()
+        return self.represent_mapping(tag, state, flow_style=flow_style)
+
+    def represent_undefined(self, data):
+        raise RepresenterError("cannot represent an object", data)
+
+SafeRepresenter.add_representer(type(None),
+        SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+        SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+        SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+        SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+        SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+        SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+        SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+        SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+        SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+        SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+        SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+        SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+        SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+    def represent_complex(self, data):
+        if data.imag == 0.0:
+            data = '%r' % data.real
+        elif data.real == 0.0:
+            data = '%rj' % data.imag
+        elif data.imag > 0:
+            data = '%r+%rj' % (data.real, data.imag)
+        else:
+            data = '%r%rj' % (data.real, data.imag)
+        return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+    def represent_tuple(self, data):
+        return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+    def represent_name(self, data):
+        name = '%s.%s' % (data.__module__, data.__name__)
+        return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+    def represent_module(self, data):
+        return self.represent_scalar(
+                'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+    def represent_object(self, data):
+        # We use __reduce__ API to save the data. data.__reduce__ returns
+        # a tuple of length 2-5:
+        #   (function, args, state, listitems, dictitems)
+
+        # For reconstructing, we calls function(*args), then set its state,
+        # listitems, and dictitems if they are not None.
+
+        # A special case is when function.__name__ == '__newobj__'. In this
+        # case we create the object with args[0].__new__(*args).
+
+        # Another special case is when __reduce__ returns a string - we don't
+        # support it.
+
+        # We produce a !!python/object, !!python/object/new or
+        # !!python/object/apply node.
+
+        cls = type(data)
+        if cls in copyreg.dispatch_table:
+            reduce = copyreg.dispatch_table[cls](data)
+        elif hasattr(data, '__reduce_ex__'):
+            reduce = data.__reduce_ex__(2)
+        elif hasattr(data, '__reduce__'):
+            reduce = data.__reduce__()
+        else:
+            raise RepresenterError("cannot represent an object", data)
+        reduce = (list(reduce)+[None]*5)[:5]
+        function, args, state, listitems, dictitems = reduce
+        args = list(args)
+        if state is None:
+            state = {}
+        if listitems is not None:
+            listitems = list(listitems)
+        if dictitems is not None:
+            dictitems = dict(dictitems)
+        if function.__name__ == '__newobj__':
+            function = args[0]
+            args = args[1:]
+            tag = 'tag:yaml.org,2002:python/object/new:'
+            newobj = True
+        else:
+            tag = 'tag:yaml.org,2002:python/object/apply:'
+            newobj = False
+        function_name = '%s.%s' % (function.__module__, function.__name__)
+        if not args and not listitems and not dictitems \
+                and isinstance(state, dict) and newobj:
+            return self.represent_mapping(
+                    'tag:yaml.org,2002:python/object:'+function_name, state)
+        if not listitems and not dictitems  \
+                and isinstance(state, dict) and not state:
+            return self.represent_sequence(tag+function_name, args)
+        value = {}
+        if args:
+            value['args'] = args
+        if state or not isinstance(state, dict):
+            value['state'] = state
+        if listitems:
+            value['listitems'] = listitems
+        if dictitems:
+            value['dictitems'] = dictitems
+        return self.represent_mapping(tag+function_name, value)
+
+    def represent_ordered_dict(self, data):
+        # Provide uniform representation across different Python versions.
+        data_type = type(data)
+        tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
+                % (data_type.__module__, data_type.__name__)
+        items = [[key, value] for key, value in data.items()]
+        return self.represent_sequence(tag, [items])
+
+Representer.add_representer(complex,
+        Representer.represent_complex)
+
+Representer.add_representer(tuple,
+        Representer.represent_tuple)
+
+Representer.add_representer(type,
+        Representer.represent_name)
+
+Representer.add_representer(collections.OrderedDict,
+        Representer.represent_ordered_dict)
+
+Representer.add_representer(types.FunctionType,
+        Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+        Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+        Representer.represent_module)
+
+Representer.add_multi_representer(object,
+        Representer.represent_object)
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/resolver.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/resolver.py
new file mode 100644
index 0000000000000000000000000000000000000000..013896d2f10619e0e75d2579cd63220338a7fef1
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/resolver.py
@@ -0,0 +1,227 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+    pass
+
+class BaseResolver:
+
+    DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+    DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+    DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+    yaml_implicit_resolvers = {}
+    yaml_path_resolvers = {}
+
+    def __init__(self):
+        self.resolver_exact_paths = []
+        self.resolver_prefix_paths = []
+
+    @classmethod
+    def add_implicit_resolver(cls, tag, regexp, first):
+        if not 'yaml_implicit_resolvers' in cls.__dict__:
+            implicit_resolvers = {}
+            for key in cls.yaml_implicit_resolvers:
+                implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
+            cls.yaml_implicit_resolvers = implicit_resolvers
+        if first is None:
+            first = [None]
+        for ch in first:
+            cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+    @classmethod
+    def add_path_resolver(cls, tag, path, kind=None):
+        # Note: `add_path_resolver` is experimental.  The API could be changed.
+        # `new_path` is a pattern that is matched against the path from the
+        # root to the node that is being considered.  `node_path` elements are
+        # tuples `(node_check, index_check)`.  `node_check` is a node class:
+        # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`.  `None`
+        # matches any kind of a node.  `index_check` could be `None`, a boolean
+        # value, a string value, or a number.  `None` and `False` match against
+        # any _value_ of sequence and mapping nodes.  `True` matches against
+        # any _key_ of a mapping node.  A string `index_check` matches against
+        # a mapping value that corresponds to a scalar key which content is
+        # equal to the `index_check` value.  An integer `index_check` matches
+        # against a sequence value with the index equal to `index_check`.
+        if not 'yaml_path_resolvers' in cls.__dict__:
+            cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+        new_path = []
+        for element in path:
+            if isinstance(element, (list, tuple)):
+                if len(element) == 2:
+                    node_check, index_check = element
+                elif len(element) == 1:
+                    node_check = element[0]
+                    index_check = True
+                else:
+                    raise ResolverError("Invalid path element: %s" % element)
+            else:
+                node_check = None
+                index_check = element
+            if node_check is str:
+                node_check = ScalarNode
+            elif node_check is list:
+                node_check = SequenceNode
+            elif node_check is dict:
+                node_check = MappingNode
+            elif node_check not in [ScalarNode, SequenceNode, MappingNode]  \
+                    and not isinstance(node_check, str) \
+                    and node_check is not None:
+                raise ResolverError("Invalid node checker: %s" % node_check)
+            if not isinstance(index_check, (str, int))  \
+                    and index_check is not None:
+                raise ResolverError("Invalid index checker: %s" % index_check)
+            new_path.append((node_check, index_check))
+        if kind is str:
+            kind = ScalarNode
+        elif kind is list:
+            kind = SequenceNode
+        elif kind is dict:
+            kind = MappingNode
+        elif kind not in [ScalarNode, SequenceNode, MappingNode]    \
+                and kind is not None:
+            raise ResolverError("Invalid node kind: %s" % kind)
+        cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+    def descend_resolver(self, current_node, current_index):
+        if not self.yaml_path_resolvers:
+            return
+        exact_paths = {}
+        prefix_paths = []
+        if current_node:
+            depth = len(self.resolver_prefix_paths)
+            for path, kind in self.resolver_prefix_paths[-1]:
+                if self.check_resolver_prefix(depth, path, kind,
+                        current_node, current_index):
+                    if len(path) > depth:
+                        prefix_paths.append((path, kind))
+                    else:
+                        exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+        else:
+            for path, kind in self.yaml_path_resolvers:
+                if not path:
+                    exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+                else:
+                    prefix_paths.append((path, kind))
+        self.resolver_exact_paths.append(exact_paths)
+        self.resolver_prefix_paths.append(prefix_paths)
+
+    def ascend_resolver(self):
+        if not self.yaml_path_resolvers:
+            return
+        self.resolver_exact_paths.pop()
+        self.resolver_prefix_paths.pop()
+
+    def check_resolver_prefix(self, depth, path, kind,
+            current_node, current_index):
+        node_check, index_check = path[depth-1]
+        if isinstance(node_check, str):
+            if current_node.tag != node_check:
+                return
+        elif node_check is not None:
+            if not isinstance(current_node, node_check):
+                return
+        if index_check is True and current_index is not None:
+            return
+        if (index_check is False or index_check is None)    \
+                and current_index is None:
+            return
+        if isinstance(index_check, str):
+            if not (isinstance(current_index, ScalarNode)
+                    and index_check == current_index.value):
+                return
+        elif isinstance(index_check, int) and not isinstance(index_check, bool):
+            if index_check != current_index:
+                return
+        return True
+
+    def resolve(self, kind, value, implicit):
+        if kind is ScalarNode and implicit[0]:
+            if value == '':
+                resolvers = self.yaml_implicit_resolvers.get('', [])
+            else:
+                resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+            wildcard_resolvers = self.yaml_implicit_resolvers.get(None, [])
+            for tag, regexp in resolvers + wildcard_resolvers:
+                if regexp.match(value):
+                    return tag
+            implicit = implicit[1]
+        if self.yaml_path_resolvers:
+            exact_paths = self.resolver_exact_paths[-1]
+            if kind in exact_paths:
+                return exact_paths[kind]
+            if None in exact_paths:
+                return exact_paths[None]
+        if kind is ScalarNode:
+            return self.DEFAULT_SCALAR_TAG
+        elif kind is SequenceNode:
+            return self.DEFAULT_SEQUENCE_TAG
+        elif kind is MappingNode:
+            return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+    pass
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:bool',
+        re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+                    |true|True|TRUE|false|False|FALSE
+                    |on|On|ON|off|Off|OFF)$''', re.X),
+        list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:float',
+        re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+                    |\.[0-9_]+(?:[eE][-+][0-9]+)?
+                    |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+                    |[-+]?\.(?:inf|Inf|INF)
+                    |\.(?:nan|NaN|NAN))$''', re.X),
+        list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:int',
+        re.compile(r'''^(?:[-+]?0b[0-1_]+
+                    |[-+]?0[0-7_]+
+                    |[-+]?(?:0|[1-9][0-9_]*)
+                    |[-+]?0x[0-9a-fA-F_]+
+                    |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+        list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:merge',
+        re.compile(r'^(?:<<)$'),
+        ['<'])
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:null',
+        re.compile(r'''^(?: ~
+                    |null|Null|NULL
+                    | )$''', re.X),
+        ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:timestamp',
+        re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+                    |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+                     (?:[Tt]|[ \t]+)[0-9][0-9]?
+                     :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+                     (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+        list('0123456789'))
+
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:value',
+        re.compile(r'^(?:=)$'),
+        ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+        'tag:yaml.org,2002:yaml',
+        re.compile(r'^(?:!|&|\*)$'),
+        list('!&*'))
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/scanner.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/scanner.py
new file mode 100644
index 0000000000000000000000000000000000000000..7437ede1c608266aaca481955f438844479cab4f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/scanner.py
@@ -0,0 +1,1435 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+    pass
+
+class SimpleKey:
+    # See below simple keys treatment.
+
+    def __init__(self, token_number, required, index, line, column, mark):
+        self.token_number = token_number
+        self.required = required
+        self.index = index
+        self.line = line
+        self.column = column
+        self.mark = mark
+
+class Scanner:
+
+    def __init__(self):
+        """Initialize the scanner."""
+        # It is assumed that Scanner and Reader will have a common descendant.
+        # Reader do the dirty work of checking for BOM and converting the
+        # input data to Unicode. It also adds NUL to the end.
+        #
+        # Reader supports the following methods
+        #   self.peek(i=0)       # peek the next i-th character
+        #   self.prefix(l=1)     # peek the next l characters
+        #   self.forward(l=1)    # read the next l characters and move the pointer.
+
+        # Had we reached the end of the stream?
+        self.done = False
+
+        # The number of unclosed '{' and '['. `flow_level == 0` means block
+        # context.
+        self.flow_level = 0
+
+        # List of processed tokens that are not yet emitted.
+        self.tokens = []
+
+        # Add the STREAM-START token.
+        self.fetch_stream_start()
+
+        # Number of tokens that were emitted through the `get_token` method.
+        self.tokens_taken = 0
+
+        # The current indentation level.
+        self.indent = -1
+
+        # Past indentation levels.
+        self.indents = []
+
+        # Variables related to simple keys treatment.
+
+        # A simple key is a key that is not denoted by the '?' indicator.
+        # Example of simple keys:
+        #   ---
+        #   block simple key: value
+        #   ? not a simple key:
+        #   : { flow simple key: value }
+        # We emit the KEY token before all keys, so when we find a potential
+        # simple key, we try to locate the corresponding ':' indicator.
+        # Simple keys should be limited to a single line and 1024 characters.
+
+        # Can a simple key start at the current position? A simple key may
+        # start:
+        # - at the beginning of the line, not counting indentation spaces
+        #       (in block context),
+        # - after '{', '[', ',' (in the flow context),
+        # - after '?', ':', '-' (in the block context).
+        # In the block context, this flag also signifies if a block collection
+        # may start at the current position.
+        self.allow_simple_key = True
+
+        # Keep track of possible simple keys. This is a dictionary. The key
+        # is `flow_level`; there can be no more that one possible simple key
+        # for each level. The value is a SimpleKey record:
+        #   (token_number, required, index, line, column, mark)
+        # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+        # '[', or '{' tokens.
+        self.possible_simple_keys = {}
+
+    # Public methods.
+
+    def check_token(self, *choices):
+        # Check if the next token is one of the given types.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        if self.tokens:
+            if not choices:
+                return True
+            for choice in choices:
+                if isinstance(self.tokens[0], choice):
+                    return True
+        return False
+
+    def peek_token(self):
+        # Return the next token, but do not delete if from the queue.
+        # Return None if no more tokens.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        if self.tokens:
+            return self.tokens[0]
+        else:
+            return None
+
+    def get_token(self):
+        # Return the next token.
+        while self.need_more_tokens():
+            self.fetch_more_tokens()
+        if self.tokens:
+            self.tokens_taken += 1
+            return self.tokens.pop(0)
+
+    # Private methods.
+
+    def need_more_tokens(self):
+        if self.done:
+            return False
+        if not self.tokens:
+            return True
+        # The current token may be a potential simple key, so we
+        # need to look further.
+        self.stale_possible_simple_keys()
+        if self.next_possible_simple_key() == self.tokens_taken:
+            return True
+
+    def fetch_more_tokens(self):
+
+        # Eat whitespaces and comments until we reach the next token.
+        self.scan_to_next_token()
+
+        # Remove obsolete possible simple keys.
+        self.stale_possible_simple_keys()
+
+        # Compare the current indentation and column. It may add some tokens
+        # and decrease the current indentation level.
+        self.unwind_indent(self.column)
+
+        # Peek the next character.
+        ch = self.peek()
+
+        # Is it the end of stream?
+        if ch == '\0':
+            return self.fetch_stream_end()
+
+        # Is it a directive?
+        if ch == '%' and self.check_directive():
+            return self.fetch_directive()
+
+        # Is it the document start?
+        if ch == '-' and self.check_document_start():
+            return self.fetch_document_start()
+
+        # Is it the document end?
+        if ch == '.' and self.check_document_end():
+            return self.fetch_document_end()
+
+        # TODO: support for BOM within a stream.
+        #if ch == '\uFEFF':
+        #    return self.fetch_bom()    <-- issue BOMToken
+
+        # Note: the order of the following checks is NOT significant.
+
+        # Is it the flow sequence start indicator?
+        if ch == '[':
+            return self.fetch_flow_sequence_start()
+
+        # Is it the flow mapping start indicator?
+        if ch == '{':
+            return self.fetch_flow_mapping_start()
+
+        # Is it the flow sequence end indicator?
+        if ch == ']':
+            return self.fetch_flow_sequence_end()
+
+        # Is it the flow mapping end indicator?
+        if ch == '}':
+            return self.fetch_flow_mapping_end()
+
+        # Is it the flow entry indicator?
+        if ch == ',':
+            return self.fetch_flow_entry()
+
+        # Is it the block entry indicator?
+        if ch == '-' and self.check_block_entry():
+            return self.fetch_block_entry()
+
+        # Is it the key indicator?
+        if ch == '?' and self.check_key():
+            return self.fetch_key()
+
+        # Is it the value indicator?
+        if ch == ':' and self.check_value():
+            return self.fetch_value()
+
+        # Is it an alias?
+        if ch == '*':
+            return self.fetch_alias()
+
+        # Is it an anchor?
+        if ch == '&':
+            return self.fetch_anchor()
+
+        # Is it a tag?
+        if ch == '!':
+            return self.fetch_tag()
+
+        # Is it a literal scalar?
+        if ch == '|' and not self.flow_level:
+            return self.fetch_literal()
+
+        # Is it a folded scalar?
+        if ch == '>' and not self.flow_level:
+            return self.fetch_folded()
+
+        # Is it a single quoted scalar?
+        if ch == '\'':
+            return self.fetch_single()
+
+        # Is it a double quoted scalar?
+        if ch == '\"':
+            return self.fetch_double()
+
+        # It must be a plain scalar then.
+        if self.check_plain():
+            return self.fetch_plain()
+
+        # No? It's an error. Let's produce a nice error message.
+        raise ScannerError("while scanning for the next token", None,
+                "found character %r that cannot start any token" % ch,
+                self.get_mark())
+
+    # Simple keys treatment.
+
+    def next_possible_simple_key(self):
+        # Return the number of the nearest possible simple key. Actually we
+        # don't need to loop through the whole dictionary. We may replace it
+        # with the following code:
+        #   if not self.possible_simple_keys:
+        #       return None
+        #   return self.possible_simple_keys[
+        #           min(self.possible_simple_keys.keys())].token_number
+        min_token_number = None
+        for level in self.possible_simple_keys:
+            key = self.possible_simple_keys[level]
+            if min_token_number is None or key.token_number < min_token_number:
+                min_token_number = key.token_number
+        return min_token_number
+
+    def stale_possible_simple_keys(self):
+        # Remove entries that are no longer possible simple keys. According to
+        # the YAML specification, simple keys
+        # - should be limited to a single line,
+        # - should be no longer than 1024 characters.
+        # Disabling this procedure will allow simple keys of any length and
+        # height (may cause problems if indentation is broken though).
+        for level in list(self.possible_simple_keys):
+            key = self.possible_simple_keys[level]
+            if key.line != self.line  \
+                    or self.index-key.index > 1024:
+                if key.required:
+                    raise ScannerError("while scanning a simple key", key.mark,
+                            "could not find expected ':'", self.get_mark())
+                del self.possible_simple_keys[level]
+
+    def save_possible_simple_key(self):
+        # The next token may start a simple key. We check if it's possible
+        # and save its position. This function is called for
+        #   ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+        # Check if a simple key is required at the current position.
+        required = not self.flow_level and self.indent == self.column
+
+        # The next token might be a simple key. Let's save it's number and
+        # position.
+        if self.allow_simple_key:
+            self.remove_possible_simple_key()
+            token_number = self.tokens_taken+len(self.tokens)
+            key = SimpleKey(token_number, required,
+                    self.index, self.line, self.column, self.get_mark())
+            self.possible_simple_keys[self.flow_level] = key
+
+    def remove_possible_simple_key(self):
+        # Remove the saved possible key position at the current flow level.
+        if self.flow_level in self.possible_simple_keys:
+            key = self.possible_simple_keys[self.flow_level]
+            
+            if key.required:
+                raise ScannerError("while scanning a simple key", key.mark,
+                        "could not find expected ':'", self.get_mark())
+
+            del self.possible_simple_keys[self.flow_level]
+
+    # Indentation functions.
+
+    def unwind_indent(self, column):
+
+        ## In flow context, tokens should respect indentation.
+        ## Actually the condition should be `self.indent >= column` according to
+        ## the spec. But this condition will prohibit intuitively correct
+        ## constructions such as
+        ## key : {
+        ## }
+        #if self.flow_level and self.indent > column:
+        #    raise ScannerError(None, None,
+        #            "invalid indentation or unclosed '[' or '{'",
+        #            self.get_mark())
+
+        # In the flow context, indentation is ignored. We make the scanner less
+        # restrictive then specification requires.
+        if self.flow_level:
+            return
+
+        # In block context, we may need to issue the BLOCK-END tokens.
+        while self.indent > column:
+            mark = self.get_mark()
+            self.indent = self.indents.pop()
+            self.tokens.append(BlockEndToken(mark, mark))
+
+    def add_indent(self, column):
+        # Check if we need to increase indentation.
+        if self.indent < column:
+            self.indents.append(self.indent)
+            self.indent = column
+            return True
+        return False
+
+    # Fetchers.
+
+    def fetch_stream_start(self):
+        # We always add STREAM-START as the first token and STREAM-END as the
+        # last token.
+
+        # Read the token.
+        mark = self.get_mark()
+        
+        # Add STREAM-START.
+        self.tokens.append(StreamStartToken(mark, mark,
+            encoding=self.encoding))
+        
+
+    def fetch_stream_end(self):
+
+        # Set the current indentation to -1.
+        self.unwind_indent(-1)
+
+        # Reset simple keys.
+        self.remove_possible_simple_key()
+        self.allow_simple_key = False
+        self.possible_simple_keys = {}
+
+        # Read the token.
+        mark = self.get_mark()
+        
+        # Add STREAM-END.
+        self.tokens.append(StreamEndToken(mark, mark))
+
+        # The steam is finished.
+        self.done = True
+
+    def fetch_directive(self):
+        
+        # Set the current indentation to -1.
+        self.unwind_indent(-1)
+
+        # Reset simple keys.
+        self.remove_possible_simple_key()
+        self.allow_simple_key = False
+
+        # Scan and add DIRECTIVE.
+        self.tokens.append(self.scan_directive())
+
+    def fetch_document_start(self):
+        self.fetch_document_indicator(DocumentStartToken)
+
+    def fetch_document_end(self):
+        self.fetch_document_indicator(DocumentEndToken)
+
+    def fetch_document_indicator(self, TokenClass):
+
+        # Set the current indentation to -1.
+        self.unwind_indent(-1)
+
+        # Reset simple keys. Note that there could not be a block collection
+        # after '---'.
+        self.remove_possible_simple_key()
+        self.allow_simple_key = False
+
+        # Add DOCUMENT-START or DOCUMENT-END.
+        start_mark = self.get_mark()
+        self.forward(3)
+        end_mark = self.get_mark()
+        self.tokens.append(TokenClass(start_mark, end_mark))
+
+    def fetch_flow_sequence_start(self):
+        self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+    def fetch_flow_mapping_start(self):
+        self.fetch_flow_collection_start(FlowMappingStartToken)
+
+    def fetch_flow_collection_start(self, TokenClass):
+
+        # '[' and '{' may start a simple key.
+        self.save_possible_simple_key()
+
+        # Increase the flow level.
+        self.flow_level += 1
+
+        # Simple keys are allowed after '[' and '{'.
+        self.allow_simple_key = True
+
+        # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(TokenClass(start_mark, end_mark))
+
+    def fetch_flow_sequence_end(self):
+        self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+    def fetch_flow_mapping_end(self):
+        self.fetch_flow_collection_end(FlowMappingEndToken)
+
+    def fetch_flow_collection_end(self, TokenClass):
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Decrease the flow level.
+        self.flow_level -= 1
+
+        # No simple keys after ']' or '}'.
+        self.allow_simple_key = False
+
+        # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(TokenClass(start_mark, end_mark))
+
+    def fetch_flow_entry(self):
+
+        # Simple keys are allowed after ','.
+        self.allow_simple_key = True
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Add FLOW-ENTRY.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+    def fetch_block_entry(self):
+
+        # Block context needs additional checks.
+        if not self.flow_level:
+
+            # Are we allowed to start a new entry?
+            if not self.allow_simple_key:
+                raise ScannerError(None, None,
+                        "sequence entries are not allowed here",
+                        self.get_mark())
+
+            # We may need to add BLOCK-SEQUENCE-START.
+            if self.add_indent(self.column):
+                mark = self.get_mark()
+                self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+        # It's an error for the block entry to occur in the flow context,
+        # but we let the parser detect this.
+        else:
+            pass
+
+        # Simple keys are allowed after '-'.
+        self.allow_simple_key = True
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Add BLOCK-ENTRY.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+    def fetch_key(self):
+        
+        # Block context needs additional checks.
+        if not self.flow_level:
+
+            # Are we allowed to start a key (not necessary a simple)?
+            if not self.allow_simple_key:
+                raise ScannerError(None, None,
+                        "mapping keys are not allowed here",
+                        self.get_mark())
+
+            # We may need to add BLOCK-MAPPING-START.
+            if self.add_indent(self.column):
+                mark = self.get_mark()
+                self.tokens.append(BlockMappingStartToken(mark, mark))
+
+        # Simple keys are allowed after '?' in the block context.
+        self.allow_simple_key = not self.flow_level
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Add KEY.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(KeyToken(start_mark, end_mark))
+
+    def fetch_value(self):
+
+        # Do we determine a simple key?
+        if self.flow_level in self.possible_simple_keys:
+
+            # Add KEY.
+            key = self.possible_simple_keys[self.flow_level]
+            del self.possible_simple_keys[self.flow_level]
+            self.tokens.insert(key.token_number-self.tokens_taken,
+                    KeyToken(key.mark, key.mark))
+
+            # If this key starts a new block mapping, we need to add
+            # BLOCK-MAPPING-START.
+            if not self.flow_level:
+                if self.add_indent(key.column):
+                    self.tokens.insert(key.token_number-self.tokens_taken,
+                            BlockMappingStartToken(key.mark, key.mark))
+
+            # There cannot be two simple keys one after another.
+            self.allow_simple_key = False
+
+        # It must be a part of a complex key.
+        else:
+            
+            # Block context needs additional checks.
+            # (Do we really need them? They will be caught by the parser
+            # anyway.)
+            if not self.flow_level:
+
+                # We are allowed to start a complex value if and only if
+                # we can start a simple key.
+                if not self.allow_simple_key:
+                    raise ScannerError(None, None,
+                            "mapping values are not allowed here",
+                            self.get_mark())
+
+            # If this value starts a new block mapping, we need to add
+            # BLOCK-MAPPING-START.  It will be detected as an error later by
+            # the parser.
+            if not self.flow_level:
+                if self.add_indent(self.column):
+                    mark = self.get_mark()
+                    self.tokens.append(BlockMappingStartToken(mark, mark))
+
+            # Simple keys are allowed after ':' in the block context.
+            self.allow_simple_key = not self.flow_level
+
+            # Reset possible simple key on the current level.
+            self.remove_possible_simple_key()
+
+        # Add VALUE.
+        start_mark = self.get_mark()
+        self.forward()
+        end_mark = self.get_mark()
+        self.tokens.append(ValueToken(start_mark, end_mark))
+
+    def fetch_alias(self):
+
+        # ALIAS could be a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after ALIAS.
+        self.allow_simple_key = False
+
+        # Scan and add ALIAS.
+        self.tokens.append(self.scan_anchor(AliasToken))
+
+    def fetch_anchor(self):
+
+        # ANCHOR could start a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after ANCHOR.
+        self.allow_simple_key = False
+
+        # Scan and add ANCHOR.
+        self.tokens.append(self.scan_anchor(AnchorToken))
+
+    def fetch_tag(self):
+
+        # TAG could start a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after TAG.
+        self.allow_simple_key = False
+
+        # Scan and add TAG.
+        self.tokens.append(self.scan_tag())
+
+    def fetch_literal(self):
+        self.fetch_block_scalar(style='|')
+
+    def fetch_folded(self):
+        self.fetch_block_scalar(style='>')
+
+    def fetch_block_scalar(self, style):
+
+        # A simple key may follow a block scalar.
+        self.allow_simple_key = True
+
+        # Reset possible simple key on the current level.
+        self.remove_possible_simple_key()
+
+        # Scan and add SCALAR.
+        self.tokens.append(self.scan_block_scalar(style))
+
+    def fetch_single(self):
+        self.fetch_flow_scalar(style='\'')
+
+    def fetch_double(self):
+        self.fetch_flow_scalar(style='"')
+
+    def fetch_flow_scalar(self, style):
+
+        # A flow scalar could be a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after flow scalars.
+        self.allow_simple_key = False
+
+        # Scan and add SCALAR.
+        self.tokens.append(self.scan_flow_scalar(style))
+
+    def fetch_plain(self):
+
+        # A plain scalar could be a simple key.
+        self.save_possible_simple_key()
+
+        # No simple keys after plain scalars. But note that `scan_plain` will
+        # change this flag if the scan is finished at the beginning of the
+        # line.
+        self.allow_simple_key = False
+
+        # Scan and add SCALAR. May change `allow_simple_key`.
+        self.tokens.append(self.scan_plain())
+
+    # Checkers.
+
+    def check_directive(self):
+
+        # DIRECTIVE:        ^ '%' ...
+        # The '%' indicator is already checked.
+        if self.column == 0:
+            return True
+
+    def check_document_start(self):
+
+        # DOCUMENT-START:   ^ '---' (' '|'\n')
+        if self.column == 0:
+            if self.prefix(3) == '---'  \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                return True
+
+    def check_document_end(self):
+
+        # DOCUMENT-END:     ^ '...' (' '|'\n')
+        if self.column == 0:
+            if self.prefix(3) == '...'  \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                return True
+
+    def check_block_entry(self):
+
+        # BLOCK-ENTRY:      '-' (' '|'\n')
+        return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+    def check_key(self):
+
+        # KEY(flow context):    '?'
+        if self.flow_level:
+            return True
+
+        # KEY(block context):   '?' (' '|'\n')
+        else:
+            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+    def check_value(self):
+
+        # VALUE(flow context):  ':'
+        if self.flow_level:
+            return True
+
+        # VALUE(block context): ':' (' '|'\n')
+        else:
+            return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+    def check_plain(self):
+
+        # A plain scalar may start with any non-space character except:
+        #   '-', '?', ':', ',', '[', ']', '{', '}',
+        #   '#', '&', '*', '!', '|', '>', '\'', '\"',
+        #   '%', '@', '`'.
+        #
+        # It may also start with
+        #   '-', '?', ':'
+        # if it is followed by a non-space character.
+        #
+        # Note that we limit the last rule to the block context (except the
+        # '-' character) because we want the flow context to be space
+        # independent.
+        ch = self.peek()
+        return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`'  \
+                or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+                        and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+    # Scanners.
+
+    def scan_to_next_token(self):
+        # We ignore spaces, line breaks and comments.
+        # If we find a line break in the block context, we set the flag
+        # `allow_simple_key` on.
+        # The byte order mark is stripped if it's the first character in the
+        # stream. We do not yet support BOM inside the stream as the
+        # specification requires. Any such mark will be considered as a part
+        # of the document.
+        #
+        # TODO: We need to make tab handling rules more sane. A good rule is
+        #   Tabs cannot precede tokens
+        #   BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+        #   KEY(block), VALUE(block), BLOCK-ENTRY
+        # So the checking code is
+        #   if <TAB>:
+        #       self.allow_simple_keys = False
+        # We also need to add the check for `allow_simple_keys == True` to
+        # `unwind_indent` before issuing BLOCK-END.
+        # Scanners for block, flow, and plain scalars need to be modified.
+
+        if self.index == 0 and self.peek() == '\uFEFF':
+            self.forward()
+        found = False
+        while not found:
+            while self.peek() == ' ':
+                self.forward()
+            if self.peek() == '#':
+                while self.peek() not in '\0\r\n\x85\u2028\u2029':
+                    self.forward()
+            if self.scan_line_break():
+                if not self.flow_level:
+                    self.allow_simple_key = True
+            else:
+                found = True
+
+    def scan_directive(self):
+        # See the specification for details.
+        start_mark = self.get_mark()
+        self.forward()
+        name = self.scan_directive_name(start_mark)
+        value = None
+        if name == 'YAML':
+            value = self.scan_yaml_directive_value(start_mark)
+            end_mark = self.get_mark()
+        elif name == 'TAG':
+            value = self.scan_tag_directive_value(start_mark)
+            end_mark = self.get_mark()
+        else:
+            end_mark = self.get_mark()
+            while self.peek() not in '\0\r\n\x85\u2028\u2029':
+                self.forward()
+        self.scan_directive_ignored_line(start_mark)
+        return DirectiveToken(name, value, start_mark, end_mark)
+
+    def scan_directive_name(self, start_mark):
+        # See the specification for details.
+        length = 0
+        ch = self.peek(length)
+        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                or ch in '-_':
+            length += 1
+            ch = self.peek(length)
+        if not length:
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected alphabetic or numeric character, but found %r"
+                    % ch, self.get_mark())
+        value = self.prefix(length)
+        self.forward(length)
+        ch = self.peek()
+        if ch not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected alphabetic or numeric character, but found %r"
+                    % ch, self.get_mark())
+        return value
+
+    def scan_yaml_directive_value(self, start_mark):
+        # See the specification for details.
+        while self.peek() == ' ':
+            self.forward()
+        major = self.scan_yaml_directive_number(start_mark)
+        if self.peek() != '.':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected a digit or '.', but found %r" % self.peek(),
+                    self.get_mark())
+        self.forward()
+        minor = self.scan_yaml_directive_number(start_mark)
+        if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected a digit or ' ', but found %r" % self.peek(),
+                    self.get_mark())
+        return (major, minor)
+
+    def scan_yaml_directive_number(self, start_mark):
+        # See the specification for details.
+        ch = self.peek()
+        if not ('0' <= ch <= '9'):
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected a digit, but found %r" % ch, self.get_mark())
+        length = 0
+        while '0' <= self.peek(length) <= '9':
+            length += 1
+        value = int(self.prefix(length))
+        self.forward(length)
+        return value
+
+    def scan_tag_directive_value(self, start_mark):
+        # See the specification for details.
+        while self.peek() == ' ':
+            self.forward()
+        handle = self.scan_tag_directive_handle(start_mark)
+        while self.peek() == ' ':
+            self.forward()
+        prefix = self.scan_tag_directive_prefix(start_mark)
+        return (handle, prefix)
+
+    def scan_tag_directive_handle(self, start_mark):
+        # See the specification for details.
+        value = self.scan_tag_handle('directive', start_mark)
+        ch = self.peek()
+        if ch != ' ':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected ' ', but found %r" % ch, self.get_mark())
+        return value
+
+    def scan_tag_directive_prefix(self, start_mark):
+        # See the specification for details.
+        value = self.scan_tag_uri('directive', start_mark)
+        ch = self.peek()
+        if ch not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected ' ', but found %r" % ch, self.get_mark())
+        return value
+
+    def scan_directive_ignored_line(self, start_mark):
+        # See the specification for details.
+        while self.peek() == ' ':
+            self.forward()
+        if self.peek() == '#':
+            while self.peek() not in '\0\r\n\x85\u2028\u2029':
+                self.forward()
+        ch = self.peek()
+        if ch not in '\0\r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a directive", start_mark,
+                    "expected a comment or a line break, but found %r"
+                        % ch, self.get_mark())
+        self.scan_line_break()
+
+    def scan_anchor(self, TokenClass):
+        # The specification does not restrict characters for anchors and
+        # aliases. This may lead to problems, for instance, the document:
+        #   [ *alias, value ]
+        # can be interpreted in two ways, as
+        #   [ "value" ]
+        # and
+        #   [ *alias , "value" ]
+        # Therefore we restrict aliases to numbers and ASCII letters.
+        start_mark = self.get_mark()
+        indicator = self.peek()
+        if indicator == '*':
+            name = 'alias'
+        else:
+            name = 'anchor'
+        self.forward()
+        length = 0
+        ch = self.peek(length)
+        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                or ch in '-_':
+            length += 1
+            ch = self.peek(length)
+        if not length:
+            raise ScannerError("while scanning an %s" % name, start_mark,
+                    "expected alphabetic or numeric character, but found %r"
+                    % ch, self.get_mark())
+        value = self.prefix(length)
+        self.forward(length)
+        ch = self.peek()
+        if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+            raise ScannerError("while scanning an %s" % name, start_mark,
+                    "expected alphabetic or numeric character, but found %r"
+                    % ch, self.get_mark())
+        end_mark = self.get_mark()
+        return TokenClass(value, start_mark, end_mark)
+
+    def scan_tag(self):
+        # See the specification for details.
+        start_mark = self.get_mark()
+        ch = self.peek(1)
+        if ch == '<':
+            handle = None
+            self.forward(2)
+            suffix = self.scan_tag_uri('tag', start_mark)
+            if self.peek() != '>':
+                raise ScannerError("while parsing a tag", start_mark,
+                        "expected '>', but found %r" % self.peek(),
+                        self.get_mark())
+            self.forward()
+        elif ch in '\0 \t\r\n\x85\u2028\u2029':
+            handle = None
+            suffix = '!'
+            self.forward()
+        else:
+            length = 1
+            use_handle = False
+            while ch not in '\0 \r\n\x85\u2028\u2029':
+                if ch == '!':
+                    use_handle = True
+                    break
+                length += 1
+                ch = self.peek(length)
+            handle = '!'
+            if use_handle:
+                handle = self.scan_tag_handle('tag', start_mark)
+            else:
+                handle = '!'
+                self.forward()
+            suffix = self.scan_tag_uri('tag', start_mark)
+        ch = self.peek()
+        if ch not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a tag", start_mark,
+                    "expected ' ', but found %r" % ch, self.get_mark())
+        value = (handle, suffix)
+        end_mark = self.get_mark()
+        return TagToken(value, start_mark, end_mark)
+
+    def scan_block_scalar(self, style):
+        # See the specification for details.
+
+        if style == '>':
+            folded = True
+        else:
+            folded = False
+
+        chunks = []
+        start_mark = self.get_mark()
+
+        # Scan the header.
+        self.forward()
+        chomping, increment = self.scan_block_scalar_indicators(start_mark)
+        self.scan_block_scalar_ignored_line(start_mark)
+
+        # Determine the indentation level and go to the first non-empty line.
+        min_indent = self.indent+1
+        if min_indent < 1:
+            min_indent = 1
+        if increment is None:
+            breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+            indent = max(min_indent, max_indent)
+        else:
+            indent = min_indent+increment-1
+            breaks, end_mark = self.scan_block_scalar_breaks(indent)
+        line_break = ''
+
+        # Scan the inner part of the block scalar.
+        while self.column == indent and self.peek() != '\0':
+            chunks.extend(breaks)
+            leading_non_space = self.peek() not in ' \t'
+            length = 0
+            while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+                length += 1
+            chunks.append(self.prefix(length))
+            self.forward(length)
+            line_break = self.scan_line_break()
+            breaks, end_mark = self.scan_block_scalar_breaks(indent)
+            if self.column == indent and self.peek() != '\0':
+
+                # Unfortunately, folding rules are ambiguous.
+                #
+                # This is the folding according to the specification:
+                
+                if folded and line_break == '\n'    \
+                        and leading_non_space and self.peek() not in ' \t':
+                    if not breaks:
+                        chunks.append(' ')
+                else:
+                    chunks.append(line_break)
+                
+                # This is Clark Evans's interpretation (also in the spec
+                # examples):
+                #
+                #if folded and line_break == '\n':
+                #    if not breaks:
+                #        if self.peek() not in ' \t':
+                #            chunks.append(' ')
+                #        else:
+                #            chunks.append(line_break)
+                #else:
+                #    chunks.append(line_break)
+            else:
+                break
+
+        # Chomp the tail.
+        if chomping is not False:
+            chunks.append(line_break)
+        if chomping is True:
+            chunks.extend(breaks)
+
+        # We are done.
+        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+                style)
+
+    def scan_block_scalar_indicators(self, start_mark):
+        # See the specification for details.
+        chomping = None
+        increment = None
+        ch = self.peek()
+        if ch in '+-':
+            if ch == '+':
+                chomping = True
+            else:
+                chomping = False
+            self.forward()
+            ch = self.peek()
+            if ch in '0123456789':
+                increment = int(ch)
+                if increment == 0:
+                    raise ScannerError("while scanning a block scalar", start_mark,
+                            "expected indentation indicator in the range 1-9, but found 0",
+                            self.get_mark())
+                self.forward()
+        elif ch in '0123456789':
+            increment = int(ch)
+            if increment == 0:
+                raise ScannerError("while scanning a block scalar", start_mark,
+                        "expected indentation indicator in the range 1-9, but found 0",
+                        self.get_mark())
+            self.forward()
+            ch = self.peek()
+            if ch in '+-':
+                if ch == '+':
+                    chomping = True
+                else:
+                    chomping = False
+                self.forward()
+        ch = self.peek()
+        if ch not in '\0 \r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a block scalar", start_mark,
+                    "expected chomping or indentation indicators, but found %r"
+                    % ch, self.get_mark())
+        return chomping, increment
+
+    def scan_block_scalar_ignored_line(self, start_mark):
+        # See the specification for details.
+        while self.peek() == ' ':
+            self.forward()
+        if self.peek() == '#':
+            while self.peek() not in '\0\r\n\x85\u2028\u2029':
+                self.forward()
+        ch = self.peek()
+        if ch not in '\0\r\n\x85\u2028\u2029':
+            raise ScannerError("while scanning a block scalar", start_mark,
+                    "expected a comment or a line break, but found %r" % ch,
+                    self.get_mark())
+        self.scan_line_break()
+
+    def scan_block_scalar_indentation(self):
+        # See the specification for details.
+        chunks = []
+        max_indent = 0
+        end_mark = self.get_mark()
+        while self.peek() in ' \r\n\x85\u2028\u2029':
+            if self.peek() != ' ':
+                chunks.append(self.scan_line_break())
+                end_mark = self.get_mark()
+            else:
+                self.forward()
+                if self.column > max_indent:
+                    max_indent = self.column
+        return chunks, max_indent, end_mark
+
+    def scan_block_scalar_breaks(self, indent):
+        # See the specification for details.
+        chunks = []
+        end_mark = self.get_mark()
+        while self.column < indent and self.peek() == ' ':
+            self.forward()
+        while self.peek() in '\r\n\x85\u2028\u2029':
+            chunks.append(self.scan_line_break())
+            end_mark = self.get_mark()
+            while self.column < indent and self.peek() == ' ':
+                self.forward()
+        return chunks, end_mark
+
+    def scan_flow_scalar(self, style):
+        # See the specification for details.
+        # Note that we loose indentation rules for quoted scalars. Quoted
+        # scalars don't need to adhere indentation because " and ' clearly
+        # mark the beginning and the end of them. Therefore we are less
+        # restrictive then the specification requires. We only need to check
+        # that document separators are not included in scalars.
+        if style == '"':
+            double = True
+        else:
+            double = False
+        chunks = []
+        start_mark = self.get_mark()
+        quote = self.peek()
+        self.forward()
+        chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+        while self.peek() != quote:
+            chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+            chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+        self.forward()
+        end_mark = self.get_mark()
+        return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+                style)
+
+    ESCAPE_REPLACEMENTS = {
+        '0':    '\0',
+        'a':    '\x07',
+        'b':    '\x08',
+        't':    '\x09',
+        '\t':   '\x09',
+        'n':    '\x0A',
+        'v':    '\x0B',
+        'f':    '\x0C',
+        'r':    '\x0D',
+        'e':    '\x1B',
+        ' ':    '\x20',
+        '\"':   '\"',
+        '\\':   '\\',
+        '/':    '/',
+        'N':    '\x85',
+        '_':    '\xA0',
+        'L':    '\u2028',
+        'P':    '\u2029',
+    }
+
+    ESCAPE_CODES = {
+        'x':    2,
+        'u':    4,
+        'U':    8,
+    }
+
+    def scan_flow_scalar_non_spaces(self, double, start_mark):
+        # See the specification for details.
+        chunks = []
+        while True:
+            length = 0
+            while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+                length += 1
+            if length:
+                chunks.append(self.prefix(length))
+                self.forward(length)
+            ch = self.peek()
+            if not double and ch == '\'' and self.peek(1) == '\'':
+                chunks.append('\'')
+                self.forward(2)
+            elif (double and ch == '\'') or (not double and ch in '\"\\'):
+                chunks.append(ch)
+                self.forward()
+            elif double and ch == '\\':
+                self.forward()
+                ch = self.peek()
+                if ch in self.ESCAPE_REPLACEMENTS:
+                    chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+                    self.forward()
+                elif ch in self.ESCAPE_CODES:
+                    length = self.ESCAPE_CODES[ch]
+                    self.forward()
+                    for k in range(length):
+                        if self.peek(k) not in '0123456789ABCDEFabcdef':
+                            raise ScannerError("while scanning a double-quoted scalar", start_mark,
+                                    "expected escape sequence of %d hexdecimal numbers, but found %r" %
+                                        (length, self.peek(k)), self.get_mark())
+                    code = int(self.prefix(length), 16)
+                    chunks.append(chr(code))
+                    self.forward(length)
+                elif ch in '\r\n\x85\u2028\u2029':
+                    self.scan_line_break()
+                    chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+                else:
+                    raise ScannerError("while scanning a double-quoted scalar", start_mark,
+                            "found unknown escape character %r" % ch, self.get_mark())
+            else:
+                return chunks
+
+    def scan_flow_scalar_spaces(self, double, start_mark):
+        # See the specification for details.
+        chunks = []
+        length = 0
+        while self.peek(length) in ' \t':
+            length += 1
+        whitespaces = self.prefix(length)
+        self.forward(length)
+        ch = self.peek()
+        if ch == '\0':
+            raise ScannerError("while scanning a quoted scalar", start_mark,
+                    "found unexpected end of stream", self.get_mark())
+        elif ch in '\r\n\x85\u2028\u2029':
+            line_break = self.scan_line_break()
+            breaks = self.scan_flow_scalar_breaks(double, start_mark)
+            if line_break != '\n':
+                chunks.append(line_break)
+            elif not breaks:
+                chunks.append(' ')
+            chunks.extend(breaks)
+        else:
+            chunks.append(whitespaces)
+        return chunks
+
+    def scan_flow_scalar_breaks(self, double, start_mark):
+        # See the specification for details.
+        chunks = []
+        while True:
+            # Instead of checking indentation, we check for document
+            # separators.
+            prefix = self.prefix(3)
+            if (prefix == '---' or prefix == '...')   \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                raise ScannerError("while scanning a quoted scalar", start_mark,
+                        "found unexpected document separator", self.get_mark())
+            while self.peek() in ' \t':
+                self.forward()
+            if self.peek() in '\r\n\x85\u2028\u2029':
+                chunks.append(self.scan_line_break())
+            else:
+                return chunks
+
+    def scan_plain(self):
+        # See the specification for details.
+        # We add an additional restriction for the flow context:
+        #   plain scalars in the flow context cannot contain ',' or '?'.
+        # We also keep track of the `allow_simple_key` flag here.
+        # Indentation rules are loosed for the flow context.
+        chunks = []
+        start_mark = self.get_mark()
+        end_mark = start_mark
+        indent = self.indent+1
+        # We allow zero indentation for scalars, but then we need to check for
+        # document separators at the beginning of the line.
+        #if indent == 0:
+        #    indent = 1
+        spaces = []
+        while True:
+            length = 0
+            if self.peek() == '#':
+                break
+            while True:
+                ch = self.peek(length)
+                if ch in '\0 \t\r\n\x85\u2028\u2029'    \
+                        or (ch == ':' and
+                                self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029'
+                                      + (u',[]{}' if self.flow_level else u''))\
+                        or (self.flow_level and ch in ',?[]{}'):
+                    break
+                length += 1
+            if length == 0:
+                break
+            self.allow_simple_key = False
+            chunks.extend(spaces)
+            chunks.append(self.prefix(length))
+            self.forward(length)
+            end_mark = self.get_mark()
+            spaces = self.scan_plain_spaces(indent, start_mark)
+            if not spaces or self.peek() == '#' \
+                    or (not self.flow_level and self.column < indent):
+                break
+        return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+    def scan_plain_spaces(self, indent, start_mark):
+        # See the specification for details.
+        # The specification is really confusing about tabs in plain scalars.
+        # We just forbid them completely. Do not use tabs in YAML!
+        chunks = []
+        length = 0
+        while self.peek(length) in ' ':
+            length += 1
+        whitespaces = self.prefix(length)
+        self.forward(length)
+        ch = self.peek()
+        if ch in '\r\n\x85\u2028\u2029':
+            line_break = self.scan_line_break()
+            self.allow_simple_key = True
+            prefix = self.prefix(3)
+            if (prefix == '---' or prefix == '...')   \
+                    and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                return
+            breaks = []
+            while self.peek() in ' \r\n\x85\u2028\u2029':
+                if self.peek() == ' ':
+                    self.forward()
+                else:
+                    breaks.append(self.scan_line_break())
+                    prefix = self.prefix(3)
+                    if (prefix == '---' or prefix == '...')   \
+                            and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+                        return
+            if line_break != '\n':
+                chunks.append(line_break)
+            elif not breaks:
+                chunks.append(' ')
+            chunks.extend(breaks)
+        elif whitespaces:
+            chunks.append(whitespaces)
+        return chunks
+
+    def scan_tag_handle(self, name, start_mark):
+        # See the specification for details.
+        # For some strange reasons, the specification does not allow '_' in
+        # tag handles. I have allowed it anyway.
+        ch = self.peek()
+        if ch != '!':
+            raise ScannerError("while scanning a %s" % name, start_mark,
+                    "expected '!', but found %r" % ch, self.get_mark())
+        length = 1
+        ch = self.peek(length)
+        if ch != ' ':
+            while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                    or ch in '-_':
+                length += 1
+                ch = self.peek(length)
+            if ch != '!':
+                self.forward(length)
+                raise ScannerError("while scanning a %s" % name, start_mark,
+                        "expected '!', but found %r" % ch, self.get_mark())
+            length += 1
+        value = self.prefix(length)
+        self.forward(length)
+        return value
+
+    def scan_tag_uri(self, name, start_mark):
+        # See the specification for details.
+        # Note: we do not check if URI is well-formed.
+        chunks = []
+        length = 0
+        ch = self.peek(length)
+        while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z'  \
+                or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+            if ch == '%':
+                chunks.append(self.prefix(length))
+                self.forward(length)
+                length = 0
+                chunks.append(self.scan_uri_escapes(name, start_mark))
+            else:
+                length += 1
+            ch = self.peek(length)
+        if length:
+            chunks.append(self.prefix(length))
+            self.forward(length)
+            length = 0
+        if not chunks:
+            raise ScannerError("while parsing a %s" % name, start_mark,
+                    "expected URI, but found %r" % ch, self.get_mark())
+        return ''.join(chunks)
+
+    def scan_uri_escapes(self, name, start_mark):
+        # See the specification for details.
+        codes = []
+        mark = self.get_mark()
+        while self.peek() == '%':
+            self.forward()
+            for k in range(2):
+                if self.peek(k) not in '0123456789ABCDEFabcdef':
+                    raise ScannerError("while scanning a %s" % name, start_mark,
+                            "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+                            % self.peek(k), self.get_mark())
+            codes.append(int(self.prefix(2), 16))
+            self.forward(2)
+        try:
+            value = bytes(codes).decode('utf-8')
+        except UnicodeDecodeError as exc:
+            raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+        return value
+
+    def scan_line_break(self):
+        # Transforms:
+        #   '\r\n'      :   '\n'
+        #   '\r'        :   '\n'
+        #   '\n'        :   '\n'
+        #   '\x85'      :   '\n'
+        #   '\u2028'    :   '\u2028'
+        #   '\u2029     :   '\u2029'
+        #   default     :   ''
+        ch = self.peek()
+        if ch in '\r\n\x85':
+            if self.prefix(2) == '\r\n':
+                self.forward(2)
+            else:
+                self.forward()
+            return '\n'
+        elif ch in '\u2028\u2029':
+            self.forward()
+            return ch
+        return ''
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/serializer.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/serializer.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe911e67ae7a739abb491fbbc6834b9c37bbda4b
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+    pass
+
+class Serializer:
+
+    ANCHOR_TEMPLATE = 'id%03d'
+
+    def __init__(self, encoding=None,
+            explicit_start=None, explicit_end=None, version=None, tags=None):
+        self.use_encoding = encoding
+        self.use_explicit_start = explicit_start
+        self.use_explicit_end = explicit_end
+        self.use_version = version
+        self.use_tags = tags
+        self.serialized_nodes = {}
+        self.anchors = {}
+        self.last_anchor_id = 0
+        self.closed = None
+
+    def open(self):
+        if self.closed is None:
+            self.emit(StreamStartEvent(encoding=self.use_encoding))
+            self.closed = False
+        elif self.closed:
+            raise SerializerError("serializer is closed")
+        else:
+            raise SerializerError("serializer is already opened")
+
+    def close(self):
+        if self.closed is None:
+            raise SerializerError("serializer is not opened")
+        elif not self.closed:
+            self.emit(StreamEndEvent())
+            self.closed = True
+
+    #def __del__(self):
+    #    self.close()
+
+    def serialize(self, node):
+        if self.closed is None:
+            raise SerializerError("serializer is not opened")
+        elif self.closed:
+            raise SerializerError("serializer is closed")
+        self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+            version=self.use_version, tags=self.use_tags))
+        self.anchor_node(node)
+        self.serialize_node(node, None, None)
+        self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+        self.serialized_nodes = {}
+        self.anchors = {}
+        self.last_anchor_id = 0
+
+    def anchor_node(self, node):
+        if node in self.anchors:
+            if self.anchors[node] is None:
+                self.anchors[node] = self.generate_anchor(node)
+        else:
+            self.anchors[node] = None
+            if isinstance(node, SequenceNode):
+                for item in node.value:
+                    self.anchor_node(item)
+            elif isinstance(node, MappingNode):
+                for key, value in node.value:
+                    self.anchor_node(key)
+                    self.anchor_node(value)
+
+    def generate_anchor(self, node):
+        self.last_anchor_id += 1
+        return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+    def serialize_node(self, node, parent, index):
+        alias = self.anchors[node]
+        if node in self.serialized_nodes:
+            self.emit(AliasEvent(alias))
+        else:
+            self.serialized_nodes[node] = True
+            self.descend_resolver(parent, index)
+            if isinstance(node, ScalarNode):
+                detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+                default_tag = self.resolve(ScalarNode, node.value, (False, True))
+                implicit = (node.tag == detected_tag), (node.tag == default_tag)
+                self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+                    style=node.style))
+            elif isinstance(node, SequenceNode):
+                implicit = (node.tag
+                            == self.resolve(SequenceNode, node.value, True))
+                self.emit(SequenceStartEvent(alias, node.tag, implicit,
+                    flow_style=node.flow_style))
+                index = 0
+                for item in node.value:
+                    self.serialize_node(item, node, index)
+                    index += 1
+                self.emit(SequenceEndEvent())
+            elif isinstance(node, MappingNode):
+                implicit = (node.tag
+                            == self.resolve(MappingNode, node.value, True))
+                self.emit(MappingStartEvent(alias, node.tag, implicit,
+                    flow_style=node.flow_style))
+                for key, value in node.value:
+                    self.serialize_node(key, node, None)
+                    self.serialize_node(value, node, key)
+                self.emit(MappingEndEvent())
+            self.ascend_resolver()
+
diff --git a/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/tokens.py b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/tokens.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d0b48a394ac8c019b401516a12f688df361cf90
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/grafana-operator/venv/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+    def __init__(self, start_mark, end_mark):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+    def __repr__(self):
+        attributes = [key for key in self.__dict__
+                if not key.endswith('_mark')]
+        attributes.sort()
+        arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+                for key in attributes])
+        return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+#    id = '<byte order mark>'
+
+class DirectiveToken(Token):
+    id = '<directive>'
+    def __init__(self, name, value, start_mark, end_mark):
+        self.name = name
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+    id = '<document start>'
+
+class DocumentEndToken(Token):
+    id = '<document end>'
+
+class StreamStartToken(Token):
+    id = '<stream start>'
+    def __init__(self, start_mark=None, end_mark=None,
+            encoding=None):
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.encoding = encoding
+
+class StreamEndToken(Token):
+    id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+    id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+    id = '<block mapping start>'
+
+class BlockEndToken(Token):
+    id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+    id = '['
+
+class FlowMappingStartToken(Token):
+    id = '{'
+
+class FlowSequenceEndToken(Token):
+    id = ']'
+
+class FlowMappingEndToken(Token):
+    id = '}'
+
+class KeyToken(Token):
+    id = '?'
+
+class ValueToken(Token):
+    id = ':'
+
+class BlockEntryToken(Token):
+    id = '-'
+
+class FlowEntryToken(Token):
+    id = ','
+
+class AliasToken(Token):
+    id = '<alias>'
+    def __init__(self, value, start_mark, end_mark):
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class AnchorToken(Token):
+    id = '<anchor>'
+    def __init__(self, value, start_mark, end_mark):
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class TagToken(Token):
+    id = '<tag>'
+    def __init__(self, value, start_mark, end_mark):
+        self.value = value
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+
+class ScalarToken(Token):
+    id = '<scalar>'
+    def __init__(self, value, plain, start_mark, end_mark, style=None):
+        self.value = value
+        self.plain = plain
+        self.start_mark = start_mark
+        self.end_mark = end_mark
+        self.style = style
+
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/.flake8 b/squid_cnf/juju-bundles/charms/ops/grafana-operator/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..8ef84fcd43f3b7a46768c31b20f36cab48ffdfe0
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/.flake8
@@ -0,0 +1,9 @@
+[flake8]
+max-line-length = 99
+select: E,W,F,C,N
+exclude:
+  venv
+  .git
+  build
+  dist
+  *.egg_info
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/.gitignore b/squid_cnf/juju-bundles/charms/ops/grafana-operator/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..7d315ecbda5024f3f81756c91caa6d7256970db0
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/.gitignore
@@ -0,0 +1,4 @@
+build
+*.charm
+.idea
+__pycache__
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/.jujuignore b/squid_cnf/juju-bundles/charms/ops/grafana-operator/.jujuignore
new file mode 100644
index 0000000000000000000000000000000000000000..6ccd559eabeae93e4d23215fa450130fa9b37ace
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/.jujuignore
@@ -0,0 +1,3 @@
+/venv
+*.py[cod]
+*.charm
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/LICENSE b/squid_cnf/juju-bundles/charms/ops/grafana-operator/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/LICENSE
@@ -0,0 +1,674 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/README.md b/squid_cnf/juju-bundles/charms/ops/grafana-operator/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6f3abb7fe9ce429ce54cc9009e93e1efede56fec
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/README.md
@@ -0,0 +1,64 @@
+# Grafana Charm
+
+## Description
+
+This is the Grafana charm for Kubernetes using the Operator Framework.
+
+## Usage
+
+Initial setup (ensure microk8s is a clean slate with `microk8s.reset` or a fresh install with `snap install microk8s --classic`:
+```bash
+microk8s.enable dns storage registry dashboard
+juju bootstrap microk8s mk8s
+juju add-model lma
+juju create-storage-pool operator-storage kubernetes storage-class=microk8s-hostpath
+```
+
+Deploy Grafana on its own:
+```bash
+git clone git@github.com:canonical/grafana-operator.git
+cd grafana-operator
+charmcraft build
+juju deploy ./grafana.charm --resource grafana-image=grafana/grafana:7.2.1
+```
+
+View the dashboard in a browser:
+1. `juju status` to check the IP of the of the running Grafana application
+2. Navigate to `http://IP_ADDRESS:3000`
+3. Log in with the default credentials username=admin, password=admin.
+
+Add Prometheus as a datasource:
+```bash
+git clone git@github.com:canonical/prometheus-operator.git
+cd prometheus-operator
+charmcraft build
+juju deploy ./prometheus.charm
+juju add-relation grafana prometheus
+watch -c juju status --color  # wait for things to settle down
+```
+> Once the deployed charm and relation settles, you should be able to see Prometheus data propagating to the Grafana dashboard.
+
+### High Availability Grafana
+
+This charm is written to support a high-availability Grafana cluster, but a database relation is required (MySQL or Postgresql).
+
+If HA is not required, there is no need to add a database relation.
+
+> NOTE: HA should not be considered for production use.
+
+...
+
+## Developing
+
+Create and activate a virtualenv,
+and install the development requirements,
+
+    virtualenv -p python3 venv
+    source venv/bin/activate
+    pip install -r requirements-dev.txt
+
+## Testing
+
+Just run `run_tests`:
+
+    ./run_tests
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/config.yaml b/squid_cnf/juju-bundles/charms/ops/grafana-operator/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c91c65a3567d2fba3572c126f52f9f626c2ef05f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/config.yaml
@@ -0,0 +1,11 @@
+options:
+    port:
+        description: The port grafana will be listening on
+        type: int
+        default: 3000
+    grafana_log_level:
+        type: string
+        description: |
+            Logging level for Grafana. Options are “debug”, “info”,
+            “warn”, “error”, and “critical”.
+        default: info
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/icon.svg b/squid_cnf/juju-bundles/charms/ops/grafana-operator/icon.svg
new file mode 100644
index 0000000000000000000000000000000000000000..2ad84eebbd3188fa28bb7f2379b78ce1a0a1933f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/icon.svg
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Generator: Adobe Illustrator 23.0.4, SVG Export Plug-In . SVG Version: 6.00 Build 0)  -->
+<svg id="Layer_1" style="enable-background:new 0 0 85.12 92.46" xmlns="http://www.w3.org/2000/svg" xml:space="preserve" height="250px" viewBox="0 0 85.12 92.46" width="250px" version="1.1" y="0px" x="0px" xmlns:xlink="http://www.w3.org/1999/xlink">
+<style type="text/css">
+	.st0{fill:url(#SVGID_1_);}
+</style>
+<linearGradient id="SVGID_1_" y2="28.783" gradientUnits="userSpaceOnUse" x2="42.562" y1="113.26" x1="42.562">
+	<stop stop-color="#FFF200" offset="0"/>
+	<stop stop-color="#F15A29" offset="1"/>
+</linearGradient>
+<path class="st0" d="m85.01 40.8c-0.14-1.55-0.41-3.35-0.93-5.32-0.51-1.97-1.28-4.13-2.39-6.37-1.12-2.24-2.57-4.57-4.47-6.82-0.74-0.88-1.54-1.76-2.42-2.6 1.3-5.17-1.59-9.65-1.59-9.65-4.98-0.31-8.14 1.54-9.31 2.39-0.2-0.08-0.39-0.17-0.59-0.25-0.85-0.34-1.72-0.66-2.61-0.95-0.89-0.28-1.81-0.54-2.74-0.76-0.94-0.22-1.89-0.4-2.86-0.55-0.17-0.03-0.34-0.05-0.51-0.07-2.18-6.95-8.41-9.85-8.41-9.85-6.95 4.41-8.27 10.57-8.27 10.57s-0.03 0.14-0.07 0.36c-0.38 0.11-0.77 0.22-1.15 0.34-0.53 0.16-1.06 0.36-1.59 0.55-0.53 0.21-1.06 0.41-1.58 0.64-1.05 0.45-2.09 0.96-3.1 1.53-0.99 0.55-1.95 1.16-2.9 1.82-0.14-0.06-0.24-0.11-0.24-0.11-9.62-3.68-18.17 0.75-18.17 0.75-0.78 10.24 3.84 16.68 4.76 17.86-0.23 0.63-0.44 1.27-0.64 1.92-0.71 2.32-1.24 4.7-1.57 7.16-0.05 0.35-0.09 0.71-0.13 1.07-8.9 4.38-11.53 13.38-11.53 13.38 7.42 8.53 16.07 9.06 16.07 9.06 0.01-0.01 0.02-0.01 0.02-0.02 1.1 1.96 2.37 3.83 3.8 5.57 0.6 0.73 1.23 1.43 1.88 2.11-2.71 7.74 0.38 14.18 0.38 14.18 8.26 0.31 13.69-3.61 14.83-4.52 0.82 0.28 1.66 0.53 2.5 0.74 2.54 0.65 5.14 1.04 7.74 1.15 0.65 0.03 1.3 0.04 1.95 0.04h0.31l0.21-0.01 0.41-0.01 0.4-0.02 0.01 0.01c3.89 5.55 10.74 6.34 10.74 6.34 4.87-5.13 5.15-10.22 5.15-11.33v-0.07-0.15s0 0 0 0c0-0.08-0.01-0.15-0.01-0.23 1.02-0.72 2-1.49 2.92-2.31 1.95-1.76 3.65-3.77 5.06-5.93 0.13-0.2 0.26-0.41 0.39-0.62 5.51 0.32 9.39-3.41 9.39-3.41-0.91-5.74-4.18-8.54-4.87-9.07 0 0-0.03-0.02-0.07-0.05s-0.06-0.05-0.06-0.05c-0.04-0.02-0.08-0.05-0.12-0.08 0.03-0.35 0.06-0.69 0.08-1.04 0.04-0.62 0.06-1.24 0.06-1.85v-0.46-0.23-0.12-0.16l-0.02-0.38-0.03-0.52c-0.01-0.18-0.02-0.34-0.04-0.5-0.01-0.16-0.03-0.32-0.05-0.48l-0.06-0.48-0.07-0.47c-0.09-0.63-0.21-1.26-0.36-1.88-0.58-2.47-1.54-4.82-2.82-6.93s-2.86-3.98-4.65-5.56-3.79-2.85-5.9-3.79c-2.1-0.95-4.31-1.55-6.51-1.83-1.1-0.14-2.2-0.2-3.28-0.19l-0.41 0.01h-0.1-0.14l-0.17 0.01-0.4 0.03c-0.15 0.01-0.31 0.02-0.45 0.04-0.56 0.05-1.11 0.13-1.66 0.23-2.18 0.41-4.24 1.2-6.06 2.28-1.82 1.09-3.39 2.45-4.68 3.98-1.28 1.54-2.28 3.24-2.96 5-0.69 1.76-1.07 3.58-1.18 5.35-0.03 0.44-0.04 0.88-0.03 1.32 0 0.11 0 0.22 0.01 0.33l0.01 0.35c0.02 0.21 0.03 0.42 0.05 0.63 0.09 0.9 0.25 1.75 0.49 2.58 0.48 1.66 1.25 3.15 2.2 4.43s2.08 2.33 3.28 3.15 2.49 1.41 3.76 1.79 2.54 0.54 3.74 0.53c0.15 0 0.3 0 0.44-0.01 0.08 0 0.16-0.01 0.24-0.01s0.16-0.01 0.24-0.01c0.13-0.01 0.25-0.03 0.38-0.04 0.03 0 0.07-0.01 0.11-0.01l0.12-0.02c0.08-0.01 0.15-0.02 0.23-0.03 0.16-0.02 0.29-0.05 0.43-0.08s0.28-0.05 0.42-0.09c0.27-0.06 0.54-0.14 0.8-0.22 0.52-0.17 1.01-0.38 1.46-0.61s0.87-0.5 1.26-0.77c0.11-0.08 0.22-0.16 0.33-0.25 0.42-0.33 0.48-0.94 0.15-1.35-0.29-0.36-0.79-0.45-1.19-0.23-0.1 0.05-0.2 0.11-0.3 0.16-0.35 0.17-0.71 0.32-1.09 0.45-0.39 0.12-0.79 0.22-1.2 0.29-0.21 0.03-0.42 0.06-0.63 0.08-0.11 0.01-0.21 0.02-0.32 0.02s-0.22 0.01-0.32 0.01-0.21 0-0.31-0.01c-0.13-0.01-0.26-0.01-0.39-0.02h-0.01-0.04l-0.09 0.02c-0.06-0.01-0.12-0.01-0.17-0.02-0.12-0.01-0.23-0.03-0.35-0.04-0.93-0.13-1.88-0.4-2.79-0.82-0.91-0.41-1.79-0.98-2.57-1.69-0.79-0.71-1.48-1.56-2.01-2.52-0.54-0.96-0.92-2.03-1.09-3.16-0.09-0.56-0.13-1.14-0.11-1.71 0.01-0.16 0.01-0.31 0.02-0.47v-0.03-0.06l0.01-0.12c0.01-0.08 0.01-0.15 0.02-0.23 0.03-0.31 0.08-0.62 0.13-0.92 0.43-2.45 1.65-4.83 3.55-6.65 0.47-0.45 0.98-0.87 1.53-1.25 0.55-0.37 1.12-0.7 1.73-0.98 0.6-0.28 1.23-0.5 1.88-0.68 0.65-0.17 1.31-0.29 1.98-0.35 0.34-0.03 0.67-0.04 1.01-0.04h0.23l0.27 0.01 0.17 0.01h0.03 0.07l0.27 0.02c0.73 0.06 1.46 0.16 2.17 0.32 1.43 0.32 2.83 0.85 4.13 1.57 2.6 1.44 4.81 3.69 6.17 6.4 0.69 1.35 1.16 2.81 1.4 4.31 0.06 0.38 0.1 0.76 0.13 1.14l0.02 0.29 0.01 0.29c0.01 0.1 0.01 0.19 0.01 0.29 0 0.09 0.01 0.2 0 0.27v0.25l-0.01 0.28c-0.01 0.19-0.02 0.49-0.03 0.67-0.03 0.42-0.07 0.83-0.12 1.24s-0.12 0.82-0.19 1.22c-0.08 0.4-0.17 0.81-0.27 1.21-0.2 0.8-0.46 1.59-0.76 2.36-0.61 1.54-1.42 3-2.4 4.36-1.96 2.7-4.64 4.9-7.69 6.29-1.52 0.69-3.13 1.19-4.78 1.47-0.82 0.14-1.66 0.22-2.5 0.25l-0.15 0.01h-0.13-0.27-0.41-0.21-0.01-0.08c-0.45-0.01-0.9-0.03-1.34-0.07-1.79-0.13-3.55-0.45-5.27-0.95-1.71-0.49-3.38-1.16-4.95-2-3.14-1.68-5.95-3.98-8.15-6.76-1.11-1.38-2.07-2.87-2.87-4.43s-1.42-3.2-1.89-4.88c-0.46-1.68-0.75-3.39-0.86-5.12l-0.02-0.32-0.01-0.08v-0.07-0.14l-0.01-0.28v-0.07-0.1-0.2l-0.01-0.4v-0.08-0.03-0.16c0-0.21 0.01-0.42 0.01-0.63 0.03-0.85 0.1-1.73 0.21-2.61s0.26-1.76 0.44-2.63 0.39-1.74 0.64-2.59c0.49-1.71 1.1-3.36 1.82-4.92 1.44-3.12 3.34-5.88 5.61-8.09 0.57-0.55 1.16-1.08 1.77-1.57s1.25-0.95 1.9-1.37c0.65-0.43 1.32-0.82 2.02-1.18 0.34-0.19 0.7-0.35 1.05-0.52 0.18-0.08 0.36-0.16 0.53-0.24 0.18-0.08 0.36-0.16 0.54-0.23 0.72-0.3 1.46-0.56 2.21-0.8 0.19-0.06 0.38-0.11 0.56-0.17 0.19-0.06 0.38-0.1 0.57-0.16 0.38-0.11 0.76-0.2 1.14-0.29 0.19-0.05 0.39-0.08 0.58-0.13 0.19-0.04 0.38-0.08 0.58-0.12 0.19-0.04 0.39-0.07 0.58-0.11l0.29-0.05 0.29-0.04c0.2-0.03 0.39-0.06 0.59-0.09 0.22-0.04 0.44-0.05 0.66-0.09 0.18-0.02 0.48-0.06 0.65-0.08 0.14-0.01 0.28-0.03 0.41-0.04l0.28-0.03 0.14-0.01 0.16-0.01c0.22-0.01 0.44-0.03 0.66-0.04l0.33-0.02h0.02 0.07l0.14-0.01c0.19-0.01 0.38-0.02 0.56-0.03 0.75-0.02 1.5-0.02 2.24 0 1.48 0.06 2.93 0.22 4.34 0.48 2.82 0.53 5.49 1.43 7.89 2.62 2.41 1.18 4.57 2.63 6.44 4.2 0.12 0.1 0.23 0.2 0.35 0.3 0.11 0.1 0.23 0.2 0.34 0.3 0.23 0.2 0.44 0.41 0.66 0.61s0.43 0.41 0.64 0.62c0.2 0.21 0.41 0.41 0.61 0.63 0.8 0.84 1.53 1.69 2.19 2.55 1.33 1.71 2.39 3.44 3.24 5.07 0.05 0.1 0.11 0.2 0.16 0.3l0.15 0.3c0.1 0.2 0.2 0.4 0.29 0.6s0.19 0.39 0.27 0.59c0.09 0.2 0.17 0.39 0.25 0.58 0.32 0.76 0.61 1.49 0.84 2.18 0.39 1.11 0.67 2.11 0.89 2.98 0.09 0.35 0.42 0.58 0.78 0.55 0.37-0.03 0.66-0.34 0.66-0.71 0.04-0.95 0.01-2.05-0.09-3.3z"/>
+</svg>
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/metadata.yaml b/squid_cnf/juju-bundles/charms/ops/grafana-operator/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e52b205e242068576f86454324c25bd118ec65bf
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/metadata.yaml
@@ -0,0 +1,34 @@
+name: grafana
+summary: Data visualization and observability with Grafana
+maintainers:
+    - Justin Clark <justin.clark@canonical.com>
+description: |
+    Grafana provides dashboards for monitoring data and this
+    charm is written to allow for HA on Kubernetes and can take
+    multiple data sources (for example, Prometheus).
+tags:
+    - lma
+    - grafana
+    - prometheus
+    - monitoring
+    - observability
+series:
+    - kubernetes
+provides:
+    grafana-source:
+        interface: grafana-datasource
+    grafana-dashboard:
+        interface: grafana-dash
+requires:
+    database:
+        interface: db
+        limit: 1
+peers:
+    grafana:
+        interface: grafana-peers
+storage:
+    sqlitedb:
+        type: filesystem
+        location: /var/lib/grafana
+deployment:
+    service: loadbalancer
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/requirements-dev.txt b/squid_cnf/juju-bundles/charms/ops/grafana-operator/requirements-dev.txt
new file mode 100644
index 0000000000000000000000000000000000000000..eded44146a5877d5d81b343988b516c4acaa4573
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/requirements-dev.txt
@@ -0,0 +1,2 @@
+-r requirements.txt
+flake8
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/requirements.txt b/squid_cnf/juju-bundles/charms/ops/grafana-operator/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ca625b4c913fa655ee7beb6ab2769131f7b5a21c
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/requirements.txt
@@ -0,0 +1,2 @@
+ops
+git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/run_tests b/squid_cnf/juju-bundles/charms/ops/grafana-operator/run_tests
new file mode 100755
index 0000000000000000000000000000000000000000..14bb4f4e1b3a9a8ffef0da6da128bbddb8861ce5
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/run_tests
@@ -0,0 +1,16 @@
+#!/bin/sh -e
+# Copyright 2020 Justin
+# See LICENSE file for licensing details.
+
+if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then
+    . venv/bin/activate
+fi
+
+if [ -z "$PYTHONPATH" ]; then
+    export PYTHONPATH=src
+else
+    export PYTHONPATH="src:$PYTHONPATH"
+fi
+
+flake8
+python3 -m unittest -v "$@"
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/src/charm.py b/squid_cnf/juju-bundles/charms/ops/grafana-operator/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..1053f8f871535a9eaec0f1f0712ebddd2218f16d
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/src/charm.py
@@ -0,0 +1,494 @@
+#! /usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import logging
+import hashlib
+import textwrap
+
+from oci_image import OCIImageResource, OCIImageResourceError
+from ops.charm import CharmBase
+from ops.framework import StoredState
+from ops.main import main
+from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus
+
+log = logging.getLogger()
+
+
+# These are the required and optional relation data fields
+# In other words, when relating to this charm, these are the fields
+# that will be processed by this charm.
+REQUIRED_DATASOURCE_FIELDS = {
+    'private-address',  # the hostname/IP of the data source server
+    'port',  # the port of the data source server
+    'source-type',  # the data source type (e.g. prometheus)
+}
+
+OPTIONAL_DATASOURCE_FIELDS = {
+    'source-name',  # a human-readable name of the source
+}
+
+# https://grafana.com/docs/grafana/latest/administration/configuration/#database
+REQUIRED_DATABASE_FIELDS = {
+    'type',  # mysql, postgres or sqlite3 (sqlite3 doesn't work for HA)
+    'host',  # in the form '<url_or_ip>:<port>', e.g. 127.0.0.1:3306
+    'name',
+    'user',
+    'password',
+}
+
+# verify with Grafana documentation to ensure fields have valid values
+# as this charm will not directly handle these cases
+# TODO: fill with optional fields
+OPTIONAL_DATABASE_FIELDS = set()
+
+VALID_DATABASE_TYPES = {'mysql', 'postgres', 'sqlite3'}
+
+
+def get_container(pod_spec, container_name):
+    """Find and return the first container in pod_spec whose name is
+    container_name, otherwise return None."""
+    for container in pod_spec['containers']:
+        if container['name'] == container_name:
+            return container
+    raise ValueError("Unable to find container named '{}' in pod spec".format(
+        container_name))
+
+
+class GrafanaK8s(CharmBase):
+    """Charm to run Grafana on Kubernetes.
+
+    This charm allows for high-availability
+    (as long as a non-sqlite database relation is present).
+
+    Developers of this charm should be aware of the Grafana provisioning docs:
+    https://grafana.com/docs/grafana/latest/administration/provisioning/
+    """
+
+    datastore = StoredState()
+
+    def __init__(self, *args):
+        log.debug('Initializing charm.')
+        super().__init__(*args)
+
+        # -- get image information
+        self.image = OCIImageResource(self, 'grafana-image')
+
+        # -- standard hooks
+        self.framework.observe(self.on.config_changed, self.on_config_changed)
+        self.framework.observe(self.on.update_status, self.on_update_status)
+        self.framework.observe(self.on.stop, self._on_stop)
+
+        # -- grafana-source relation observations
+        self.framework.observe(self.on['grafana-source'].relation_changed,
+                               self.on_grafana_source_changed)
+        self.framework.observe(self.on['grafana-source'].relation_broken,
+                               self.on_grafana_source_broken)
+
+        # -- grafana (peer) relation observations
+        self.framework.observe(self.on['grafana'].relation_changed,
+                               self.on_peer_changed)
+        # self.framework.observe(self.on['grafana'].relation_departed,
+        #                        self.on_peer_departed)
+
+        # -- database relation observations
+        self.framework.observe(self.on['database'].relation_changed,
+                               self.on_database_changed)
+        self.framework.observe(self.on['database'].relation_broken,
+                               self.on_database_broken)
+
+        # -- initialize states --
+        self.datastore.set_default(sources=dict())  # available data sources
+        self.datastore.set_default(source_names=set())  # unique source names
+        self.datastore.set_default(sources_to_delete=set())
+        self.datastore.set_default(database=dict())  # db configuration
+
+    @property
+    def has_peer(self) -> bool:
+        rel = self.model.get_relation('grafana')
+        return len(rel.units) > 0 if rel is not None else False
+
+    @property
+    def has_db(self) -> bool:
+        """Only consider a DB connection if we have config info."""
+        return len(self.datastore.database) > 0
+
+    def _on_stop(self, _):
+        """Go into maintenance state if the unit is stopped."""
+        self.unit.status = MaintenanceStatus('Pod is terminating.')
+
+    def on_config_changed(self, _):
+        self.configure_pod()
+
+    def on_update_status(self, _):
+        """Various health checks of the charm."""
+        self._check_high_availability()
+
+    def on_grafana_source_changed(self, event):
+        """ Get relation data for Grafana source and set k8s pod spec.
+
+        This event handler (if the unit is the leader) will get data for
+        an incoming grafana-source relation and make the relation data
+        is available in the app's datastore object (StoredState).
+        """
+
+        # if this unit is the leader, set the required data
+        # of the grafana-source in this charm's datastore
+        if not self.unit.is_leader():
+            return
+
+        # if there is no available unit, remove data-source info if it exists
+        if event.unit is None:
+            log.warning("event unit can't be None when setting data sources.")
+            return
+
+        # dictionary of all the required/optional datasource field values
+        # using this as a more generic way of getting data source fields
+        datasource_fields = \
+            {field: event.relation.data[event.unit].get(field) for field in
+             REQUIRED_DATASOURCE_FIELDS | OPTIONAL_DATASOURCE_FIELDS}
+
+        missing_fields = [field for field
+                          in REQUIRED_DATASOURCE_FIELDS
+                          if datasource_fields.get(field) is None]
+        # check the relation data for missing required fields
+        if len(missing_fields) > 0:
+            log.error("Missing required data fields for grafana-source "
+                      "relation: {}".format(missing_fields))
+            self._remove_source_from_datastore(event.relation.id)
+            return
+
+        # specifically handle optional fields if necessary
+        # check if source-name was not passed or if we have already saved the provided name
+        if datasource_fields['source-name'] is None\
+                or datasource_fields['source-name'] in self.datastore.source_names:
+            default_source_name = '{}_{}'.format(
+                event.app.name,
+                event.relation.id
+            )
+            log.warning("No name 'grafana-source' or provided name is already in use. "
+                        "Using safe default: {}.".format(default_source_name))
+            datasource_fields['source-name'] = default_source_name
+
+        self.datastore.source_names.add(datasource_fields['source-name'])
+
+        # set the first grafana-source as the default (needed for pod config)
+        # if `self.datastore.sources` is currently empty, this is the first
+        datasource_fields['isDefault'] = 'false'
+        if not dict(self.datastore.sources):
+            datasource_fields['isDefault'] = 'true'
+
+        # add unit name so the source can be removed might be a
+        # duplicate of 'source-name', but this will guarantee lookup
+        datasource_fields['unit_name'] = event.unit.name
+
+        # add the new datasource relation data to the current state
+        new_source_data = {
+            field: value for field, value in datasource_fields.items()
+            if value is not None
+        }
+        self.datastore.sources.update({event.relation.id: new_source_data})
+        self.configure_pod()
+
+    def on_grafana_source_broken(self, event):
+        """When a grafana-source is removed, delete from the datastore."""
+        if self.unit.is_leader():
+            self._remove_source_from_datastore(event.relation.id)
+        self.configure_pod()
+
+    def on_peer_changed(self, _):
+        # TODO: https://grafana.com/docs/grafana/latest/tutorials/ha_setup/
+        #       According to these docs ^, as long as we have a DB, HA should
+        #       work out of the box if we are OK with "Sticky Sessions"
+        #       but having "Stateless Sessions" could require more config
+
+        # if the config changed, set a new pod spec
+        self.configure_pod()
+
+    def on_peer_departed(self, _):
+        """Sets pod spec with new info."""
+        # TODO: setting pod spec shouldn't do anything now,
+        #       but if we ever need to change config based peer units,
+        #       we will want to make sure configure_pod() is called
+        self.configure_pod()
+
+    def on_database_changed(self, event):
+        """Sets configuration information for database connection."""
+        if not self.unit.is_leader():
+            return
+
+        if event.unit is None:
+            log.warning("event unit can't be None when setting db config.")
+            return
+
+        # save the necessary configuration of this database connection
+        database_fields = \
+            {field: event.relation.data[event.unit].get(field) for field in
+             REQUIRED_DATABASE_FIELDS | OPTIONAL_DATABASE_FIELDS}
+
+        # if any required fields are missing, warn the user and return
+        missing_fields = [field for field
+                          in REQUIRED_DATABASE_FIELDS
+                          if database_fields.get(field) is None]
+        if len(missing_fields) > 0:
+            log.error("Missing required data fields for related database "
+                      "relation: {}".format(missing_fields))
+            return
+
+        # check if the passed database type is not in VALID_DATABASE_TYPES
+        if database_fields['type'] not in VALID_DATABASE_TYPES:
+            log.error('Grafana can only accept databases of the following '
+                      'types: {}'.format(VALID_DATABASE_TYPES))
+            return
+
+        # add the new database relation data to the datastore
+        self.datastore.database.update({
+            field: value for field, value in database_fields.items()
+            if value is not None
+        })
+        self.configure_pod()
+
+    def on_database_broken(self, _):
+        """Removes database connection info from datastore.
+
+        We are guaranteed to only have one DB connection, so clearing
+        datastore.database is all we need for the change to be propagated
+        to the pod spec."""
+        if not self.unit.is_leader():
+            return
+
+        # remove the existing database info from datastore
+        self.datastore.database = dict()
+
+        # set pod spec because datastore config has changed
+        self.configure_pod()
+
+    def _remove_source_from_datastore(self, rel_id):
+        """Remove the grafana-source from the datastore.
+
+        Once removed from the datastore, this datasource will not
+        part of the next pod spec."""
+        log.info('Removing all data for relation: {}'.format(rel_id))
+        removed_source = self.datastore.sources.pop(rel_id, None)
+        if removed_source is None:
+            log.warning('Could not remove source for relation: {}'.format(
+                rel_id))
+        else:
+            # free name from charm's set of source names
+            # and save to set which will be used in set_pod_spec
+            self.datastore.source_names.remove(removed_source['source-name'])
+            self.datastore.sources_to_delete.add(removed_source['source-name'])
+
+    def _check_high_availability(self):
+        """Checks whether the configuration allows for HA."""
+        if self.has_peer:
+            if self.has_db:
+                log.info('high availability possible.')
+                status = MaintenanceStatus('Grafana ready for HA.')
+            else:
+                log.warning('high availability not possible '
+                            'with current configuration.')
+                status = BlockedStatus('Need database relation for HA.')
+        else:
+            log.info('running Grafana on single node.')
+            status = MaintenanceStatus('Grafana ready on single node.')
+
+        # make sure we don't have a maintenance status overwrite
+        # a currently active status
+        if isinstance(status, MaintenanceStatus) \
+                and isinstance(self.unit.status, ActiveStatus):
+            return status
+
+        self.unit.status = status
+        return status
+
+    def _make_delete_datasources_config_text(self) -> str:
+        """Generate text of data sources to delete."""
+        if not self.datastore.sources_to_delete:
+            return "\n"
+
+        delete_datasources_text = textwrap.dedent("""
+        deleteDatasources:""")
+        for name in self.datastore.sources_to_delete:
+            delete_datasources_text += textwrap.dedent("""
+            - name: {}
+              orgId: 1""".format(name))
+
+        # clear datastore.sources_to_delete and return text result
+        self.datastore.sources_to_delete.clear()
+        return delete_datasources_text + '\n\n'
+
+    def _make_data_source_config_text(self) -> str:
+        """Build config based on Data Sources section of provisioning docs."""
+        # get starting text for the config file and sources to delete
+        delete_text = self._make_delete_datasources_config_text()
+        config_text = textwrap.dedent("""
+        apiVersion: 1
+        """)
+        config_text += delete_text
+        if self.datastore.sources:
+            config_text += "datasources:"
+        for rel_id, source_info in self.datastore.sources.items():
+            # TODO: handle more optional fields and verify that current
+            #       defaults are what we want (e.g. "access")
+            config_text += textwrap.dedent("""
+                - name: {0}
+                  type: {1}
+                  access: proxy
+                  url: http://{2}:{3}
+                  isDefault: {4}
+                  editable: true
+                  orgId: 1""").format(
+                source_info['source-name'],
+                source_info['source-type'],
+                source_info['private-address'],
+                source_info['port'],
+                source_info['isDefault'],
+            )
+
+        # check if there these are empty
+        return config_text + '\n'
+
+    def _update_pod_data_source_config_file(self, pod_spec):
+        """Adds datasources to pod configuration."""
+        file_text = self._make_data_source_config_text()
+        data_source_file_meta = {
+            'name': 'grafana-datasources',
+            'mountPath': '/etc/grafana/provisioning/datasources',
+            'files': [{
+                'path': 'datasources.yaml',
+                'content': file_text,
+            }]
+        }
+        container = get_container(pod_spec, self.app.name)
+        container['volumeConfig'].append(data_source_file_meta)
+
+        # get hash string of the new file text and put into container config
+        # if this changes, it will trigger a pod restart
+        file_text_hash = hashlib.md5(file_text.encode()).hexdigest()
+        if 'DATASOURCES_YAML' in container['envConfig'] \
+                and container['envConfig']['DATASOURCES_YAML'] != file_text_hash:
+            log.info('datasources.yaml hash has changed. '
+                     'Triggering pod restart.')
+        container['envConfig']['DATASOURCES_YAML'] = file_text_hash
+
+    def _make_config_ini_text(self):
+        """Create the text of the config.ini file.
+
+        More information about this can be found in the Grafana docs:
+        https://grafana.com/docs/grafana/latest/administration/configuration/
+        """
+
+        config_text = textwrap.dedent("""
+        [paths]
+        provisioning = /etc/grafana/provisioning
+
+        [log]
+        mode = console
+        level = {0}
+        """.format(
+            self.model.config['grafana_log_level'],
+        ))
+
+        # if there is a database available, add that information
+        if self.datastore.database:
+            db_config = self.datastore.database
+            config_text += textwrap.dedent("""
+            [database]
+            type = {0}
+            host = {1}
+            name = {2}
+            user = {3}
+            password = {4}
+            url = {0}://{3}:{4}@{1}/{2}""".format(
+                db_config['type'],
+                db_config['host'],
+                db_config['name'],
+                db_config['user'],
+                db_config['password'],
+            ))
+        return config_text
+
+    def _update_pod_config_ini_file(self, pod_spec):
+        file_text = self._make_config_ini_text()
+        config_ini_file_meta = {
+            'name': 'grafana-config-ini',
+            'mountPath': '/etc/grafana',
+            'files': [{
+                'path': 'grafana.ini',
+                'content': file_text
+            }]
+        }
+        container = get_container(pod_spec, self.app.name)
+        container['volumeConfig'].append(config_ini_file_meta)
+
+        # get hash string of the new file text and put into container config
+        # if this changes, it will trigger a pod restart
+        file_text_hash = hashlib.md5(file_text.encode()).hexdigest()
+        if 'GRAFANA_INI' in container['envConfig'] \
+                and container['envConfig']['GRAFANA_INI'] != file_text_hash:
+            log.info('grafana.ini hash has changed. Triggering pod restart.')
+        container['envConfig']['GRAFANA_INI'] = file_text_hash
+
+    def _build_pod_spec(self):
+        """Builds the pod spec based on available info in datastore`."""
+
+        config = self.model.config
+
+        spec = {
+            'version': 3,
+            'containers': [{
+                'name': self.app.name,
+                'image': "ubuntu/grafana:latest",
+                'ports': [{
+                    'containerPort': config['port'],
+                    'protocol': 'TCP'
+                }],
+                'volumeConfig': [],
+                'envConfig': {},  # used to store hashes of config file text
+                'kubernetes': {
+                    'readinessProbe': {
+                        'httpGet': {
+                            'path': '/api/health',
+                            'port': config['port']
+                        },
+                        'initialDelaySeconds': 10,
+                        'timeoutSeconds': 30
+                    },
+                },
+            }]
+        }
+
+        return spec
+
+    def configure_pod(self):
+        """Set Juju / Kubernetes pod spec built from `_build_pod_spec()`."""
+
+        # check for valid high availability (or single node) configuration
+        self._check_high_availability()
+
+        # in the case where we have peers but no DB connection,
+        # don't set the pod spec until it is resolved
+        if self.unit.status == BlockedStatus('Need database relation for HA.'):
+            log.error('Application is in a blocked state. '
+                      'Please resolve before pod spec can be set.')
+            return
+
+        if not self.unit.is_leader():
+            self.unit.status = ActiveStatus()
+            return
+
+        # general pod spec component updates
+        self.unit.status = MaintenanceStatus('Building pod spec.')
+        pod_spec = self._build_pod_spec()
+        if not pod_spec:
+            return
+        self._update_pod_data_source_config_file(pod_spec)
+        self._update_pod_config_ini_file(pod_spec)
+
+        # set the pod spec with Juju
+        self.model.pod.set_spec(pod_spec)
+        self.unit.status = ActiveStatus()
+
+
+if __name__ == '__main__':
+    main(GrafanaK8s)
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/tests/__init__.py b/squid_cnf/juju-bundles/charms/ops/grafana-operator/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/squid_cnf/juju-bundles/charms/ops/grafana-operator/tests/test_charm.py b/squid_cnf/juju-bundles/charms/ops/grafana-operator/tests/test_charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6b87e4151bf4ef5e87674bbd914adc12b49fd6a
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/grafana-operator/tests/test_charm.py
@@ -0,0 +1,490 @@
+import hashlib
+import textwrap
+import unittest
+
+from ops.testing import Harness
+from ops.model import (
+    TooManyRelatedAppsError,
+    ActiveStatus,
+)
+from charm import (
+    GrafanaK8s,
+    MaintenanceStatus,
+    BlockedStatus,
+    get_container,
+)
+
+BASE_CONFIG = {
+    'port': 3000,
+    'grafana_log_level': 'info',
+}
+
+
+class GrafanaCharmTest(unittest.TestCase):
+
+    def setUp(self) -> None:
+        self.harness = Harness(GrafanaK8s)
+        self.addCleanup(self.harness.cleanup)
+        self.harness.begin()
+        self.harness.add_oci_resource('grafana-image')
+
+    def test__grafana_source_data(self):
+
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.sources, {})
+
+        rel_id = self.harness.add_relation('grafana-source', 'prometheus')
+        self.harness.add_relation_unit(rel_id, 'prometheus/0')
+        self.assertIsInstance(rel_id, int)
+
+        # test that the unit data propagates the correct way
+        # which is through the triggering of on_relation_changed
+        self.harness.update_relation_data(rel_id,
+                                          'prometheus/0',
+                                          {
+                                              'private-address': '192.0.2.1',
+                                              'port': 1234,
+                                              'source-type': 'prometheus',
+                                              'source-name': 'prometheus-app',
+                                          })
+
+        expected_first_source_data = {
+            'private-address': '192.0.2.1',
+            'port': 1234,
+            'source-name': 'prometheus-app',
+            'source-type': 'prometheus',
+            'isDefault': 'true',
+            'unit_name': 'prometheus/0'
+        }
+        self.assertEqual(expected_first_source_data,
+                         dict(self.harness.charm.datastore.sources[rel_id]))
+
+        # test that clearing the relation data leads to
+        # the datastore for this data source being cleared
+        self.harness.update_relation_data(rel_id,
+                                          'prometheus/0',
+                                          {
+                                              'private-address': None,
+                                              'port': None,
+                                          })
+        self.assertEqual(None, self.harness.charm.datastore.sources.get(rel_id))
+
+    def test__ha_database_and_status_check(self):
+        """If there is a peer connection and no database (needed for HA),
+        the charm should put the application in a blocked state."""
+
+        # start charm with one peer and no database relation
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.unit.status,
+                         ActiveStatus())
+
+        # ensure _check_high_availability() ends up with the correct status
+        status = self.harness.charm._check_high_availability()
+        self.assertEqual(status, MaintenanceStatus('Grafana ready on single node.'))
+
+        # make sure that triggering 'update-status' hook does not
+        # overwrite the current active status
+        self.harness.charm.on.update_status.emit()
+        self.assertEqual(self.harness.charm.unit.status,
+                         ActiveStatus())
+
+        peer_rel_id = self.harness.add_relation('grafana', 'grafana')
+
+        # add main unit and its data
+        # self.harness.add_relation_unit(peer_rel_id, 'grafana/0')
+        # will trigger the grafana-changed hook
+        self.harness.update_relation_data(peer_rel_id,
+                                          'grafana/0',
+                                          {'private-address': '10.1.2.3'})
+
+        # add peer unit and its data
+        self.harness.add_relation_unit(peer_rel_id, 'grafana/1')
+        self.harness.update_relation_data(peer_rel_id,
+                                          'grafana/1',
+                                          {'private-address': '10.0.0.1'})
+
+        self.assertTrue(self.harness.charm.has_peer)
+        self.assertFalse(self.harness.charm.has_db)
+        self.assertEqual(
+            self.harness.charm.unit.status,
+            BlockedStatus('Need database relation for HA.')
+        )
+
+        # ensure update-status hook doesn't overwrite this
+        self.harness.charm.on.update_status.emit()
+        self.assertEqual(self.harness.charm.unit.status,
+                         BlockedStatus('Need database relation for HA.'))
+
+        # now add the database connection and the model should
+        # not have a blocked status
+        db_rel_id = self.harness.add_relation('database', 'mysql')
+        self.harness.add_relation_unit(db_rel_id, 'mysql/0')
+        self.harness.update_relation_data(db_rel_id,
+                                          'mysql/0',
+                                          {
+                                              'type': 'mysql',
+                                              'host': '10.10.10.10:3306',
+                                              'name': 'test_mysql_db',
+                                              'user': 'test-admin',
+                                              'password': 'super!secret!password',
+                                          })
+        self.assertTrue(self.harness.charm.has_db)
+        self.assertEqual(self.harness.charm.unit.status, ActiveStatus())
+
+        # ensure _check_high_availability() ends up with the correct status
+        status = self.harness.charm._check_high_availability()
+        self.assertEqual(status, MaintenanceStatus('Grafana ready for HA.'))
+
+    def test__database_relation_data(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.database, {})
+
+        # add relation and update relation data
+        rel_id = self.harness.add_relation('database', 'mysql')
+        rel = self.harness.model.get_relation('database')
+        self.harness.add_relation_unit(rel_id, 'mysql/0')
+        test_relation_data = {
+            'type': 'mysql',
+            'host': '0.1.2.3:3306',
+            'name': 'my-test-db',
+            'user': 'test-user',
+            'password': 'super!secret!password',
+        }
+        self.harness.update_relation_data(rel_id,
+                                          'mysql/0',
+                                          test_relation_data)
+        # check that charm datastore was properly set
+        self.assertEqual(dict(self.harness.charm.datastore.database),
+                         test_relation_data)
+
+        # now depart this relation and ensure the datastore is emptied
+        self.harness.charm.on.database_relation_broken.emit(rel)
+        self.assertEqual({}, dict(self.harness.charm.datastore.database))
+
+    def test__multiple_database_relation_handling(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.database, {})
+
+        # add first database relation
+        self.harness.add_relation('database', 'mysql')
+
+        # add second database relation -- should fail here
+        with self.assertRaises(TooManyRelatedAppsError):
+            self.harness.add_relation('database', 'mysql')
+            self.harness.charm.model.get_relation('database')
+
+    def test__multiple_source_relations(self):
+        """This will test data-source config text with multiple sources.
+
+        Specifically, it will test multiple grafana-source relations."""
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.sources, {})
+
+        # add first relation
+        rel_id0 = self.harness.add_relation('grafana-source', 'prometheus')
+        self.harness.add_relation_unit(rel_id0, 'prometheus/0')
+
+        # add test data to grafana-source relation
+        # and test that _make_data_source_config_text() works as expected
+        prom_source_data = {
+            'private-address': '192.0.2.1',
+            'port': 4321,
+            'source-type': 'prometheus'
+        }
+        self.harness.update_relation_data(rel_id0, 'prometheus/0', prom_source_data)
+        header_text = textwrap.dedent("""
+                apiVersion: 1
+
+                datasources:""")
+        correct_config_text0 = header_text + textwrap.dedent("""
+            - name: prometheus_0
+              type: prometheus
+              access: proxy
+              url: http://192.0.2.1:4321
+              isDefault: true
+              editable: true
+              orgId: 1""")
+
+        generated_text = self.harness.charm._make_data_source_config_text()
+        self.assertEqual(correct_config_text0 + '\n', generated_text)
+
+        # add another source relation and check the resulting config text
+        jaeger_source_data = {
+            'private-address': '255.255.255.0',
+            'port': 7890,
+            'source-type': 'jaeger',
+            'source-name': 'jaeger-application'
+        }
+        rel_id1 = self.harness.add_relation('grafana-source', 'jaeger')
+        self.harness.add_relation_unit(rel_id1, 'jaeger/0')
+        self.harness.update_relation_data(rel_id1, 'jaeger/0', jaeger_source_data)
+
+        correct_config_text1 = correct_config_text0 + textwrap.dedent("""
+            - name: jaeger-application
+              type: jaeger
+              access: proxy
+              url: http://255.255.255.0:7890
+              isDefault: false
+              editable: true
+              orgId: 1""")
+
+        generated_text = self.harness.charm._make_data_source_config_text()
+        self.assertEqual(correct_config_text1 + '\n', generated_text)
+
+        # test removal of second source results in config_text
+        # that is the same as the original
+        self.harness.update_relation_data(rel_id1,
+                                          'jaeger/0',
+                                          {
+                                              'private-address': None,
+                                              'port': None,
+                                          })
+        generated_text = self.harness.charm._make_data_source_config_text()
+        correct_text_after_removal = textwrap.dedent("""
+            apiVersion: 1
+
+            deleteDatasources:
+            - name: jaeger-application
+              orgId: 1
+
+            datasources:
+            - name: prometheus_0
+              type: prometheus
+              access: proxy
+              url: http://192.0.2.1:4321
+              isDefault: true
+              editable: true
+              orgId: 1""")
+
+        self.assertEqual(correct_text_after_removal + '\n', generated_text)
+
+        # now test that the 'deleteDatasources' is gone
+        generated_text = self.harness.charm._make_data_source_config_text()
+        self.assertEqual(correct_config_text0 + '\n', generated_text)
+
+    def test__pod_spec_container_datasources(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.sources, {})
+
+        # add first relation
+        rel_id = self.harness.add_relation('grafana-source', 'prometheus')
+        self.harness.add_relation_unit(rel_id, 'prometheus/0')
+
+        # add test data to grafana-source relation
+        # and test that _make_data_source_config_text() works as expected
+        prom_source_data = {
+            'private-address': '192.0.2.1',
+            'port': 4321,
+            'source-type': 'prometheus'
+        }
+        self.harness.update_relation_data(rel_id, 'prometheus/0', prom_source_data)
+
+        data_source_file_text = textwrap.dedent("""
+            apiVersion: 1
+
+            datasources:
+            - name: prometheus_0
+              type: prometheus
+              access: proxy
+              url: http://192.0.2.1:4321
+              isDefault: true
+              editable: true
+              orgId: 1
+              """)
+
+        config_ini_file_text = textwrap.dedent("""
+        [paths]
+        provisioning = /etc/grafana/provisioning
+
+        [log]
+        mode = console
+        level = {0}
+        """).format(
+            self.harness.model.config['grafana_log_level'],
+        )
+
+        expected_container_files_spec = [
+            {
+                'name': 'grafana-datasources',
+                'mountPath': '/etc/grafana/provisioning/datasources',
+                'files': [{
+                    'path': 'datasources.yaml',
+                    'content': data_source_file_text,
+                }],
+            },
+            {
+                'name': 'grafana-config-ini',
+                'mountPath': '/etc/grafana',
+                'files': [{
+                    'path': 'grafana.ini',
+                    'content': config_ini_file_text,
+                }]
+            }
+        ]
+        pod_spec, _ = self.harness.get_pod_spec()
+        container = get_container(pod_spec, 'grafana')
+        actual_container_files_spec = container['volumeConfig']
+        self.assertEqual(expected_container_files_spec,
+                         actual_container_files_spec)
+
+    def test__access_sqlite_storage_location(self):
+        expected_path = '/var/lib/grafana'
+        actual_path = self.harness.charm.meta.storages['sqlitedb'].location
+        self.assertEqual(expected_path, actual_path)
+
+    def test__config_ini_without_database(self):
+        self.harness.update_config(BASE_CONFIG)
+        expected_config_text = textwrap.dedent("""
+        [paths]
+        provisioning = /etc/grafana/provisioning
+
+        [log]
+        mode = console
+        level = {0}
+        """).format(
+            self.harness.model.config['grafana_log_level'],
+        )
+
+        actual_config_text = self.harness.charm._make_config_ini_text()
+        self.assertEqual(expected_config_text, actual_config_text)
+
+    def test__config_ini_with_database(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+
+        # add database relation and update relation data
+        rel_id = self.harness.add_relation('database', 'mysql')
+        self.harness.add_relation_unit(rel_id, 'mysql/0')
+        test_relation_data = {
+            'type': 'mysql',
+            'host': '0.1.2.3:3306',
+            'name': 'my-test-db',
+            'user': 'test-user',
+            'password': 'super!secret!password',
+        }
+        self.harness.update_relation_data(rel_id,
+                                          'mysql/0',
+                                          test_relation_data)
+
+        # test the results of _make_config_ini_text()
+        expected_config_text = textwrap.dedent("""
+        [paths]
+        provisioning = /etc/grafana/provisioning
+
+        [log]
+        mode = console
+        level = {0}
+
+        [database]
+        type = mysql
+        host = 0.1.2.3:3306
+        name = my-test-db
+        user = test-user
+        password = super!secret!password
+        url = mysql://test-user:super!secret!password@0.1.2.3:3306/my-test-db""").format(
+            self.harness.model.config['grafana_log_level'],
+        )
+
+        actual_config_text = self.harness.charm._make_config_ini_text()
+        self.assertEqual(expected_config_text, actual_config_text)
+
+    def test__duplicate_source_names(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+        self.assertEqual(self.harness.charm.datastore.sources, {})
+
+        # add first relation
+        p_rel_id = self.harness.add_relation('grafana-source', 'prometheus')
+        p_rel = self.harness.model.get_relation('grafana-source', p_rel_id)
+        self.harness.add_relation_unit(p_rel_id, 'prometheus/0')
+
+        # add test data to grafana-source relation
+        prom_source_data0 = {
+            'private-address': '192.0.2.1',
+            'port': 4321,
+            'source-type': 'prometheus',
+            'source-name': 'duplicate-source-name'
+        }
+        self.harness.update_relation_data(p_rel_id, 'prometheus/0', prom_source_data0)
+        expected_prom_source_data = {
+            'private-address': '192.0.2.1',
+            'port': 4321,
+            'source-name': 'duplicate-source-name',
+            'source-type': 'prometheus',
+            'isDefault': 'true',
+            'unit_name': 'prometheus/0'
+        }
+        self.assertEqual(dict(self.harness.charm.datastore.sources[p_rel_id]),
+                         expected_prom_source_data)
+
+        # add second source with the same name as the first source
+        g_rel_id = self.harness.add_relation('grafana-source', 'graphite')
+        g_rel = self.harness.model.get_relation('grafana-source', g_rel_id)
+        self.harness.add_relation_unit(g_rel_id, 'graphite/0')
+
+        graphite_source_data0 = {
+            'private-address': '192.12.23.34',
+            'port': 4321,
+            'source-type': 'graphite',
+            'source-name': 'duplicate-source-name'
+        }
+        expected_graphite_source_data = {
+            'isDefault': 'false',
+            'port': 4321,
+            'private-address': '192.12.23.34',
+            'source-name': 'graphite_1',
+            'source-type': 'graphite',
+            'unit_name': 'graphite/0'
+        }
+        self.harness.update_relation_data(g_rel_id, 'graphite/0', graphite_source_data0)
+        self.assertEqual(
+            expected_graphite_source_data,
+            dict(self.harness.charm.datastore.sources.get(g_rel_id))
+        )
+        self.assertEqual(2, len(self.harness.charm.datastore.sources))
+
+        # now remove the relation and ensure datastore source-name is removed
+        self.harness.charm.on.grafana_source_relation_broken.emit(p_rel)
+        self.assertEqual(None, self.harness.charm.datastore.sources.get(p_rel_id))
+        self.assertEqual(1, len(self.harness.charm.datastore.sources))
+
+        # remove graphite relation
+        self.harness.charm.on.grafana_source_relation_broken.emit(g_rel)
+        self.assertEqual(None, self.harness.charm.datastore.sources.get(g_rel_id))
+        self.assertEqual(0, len(self.harness.charm.datastore.sources))
+
+    def test__idempotent_datasource_file_hash(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(BASE_CONFIG)
+
+        rel_id = self.harness.add_relation('grafana-source', 'prometheus')
+        self.harness.add_relation_unit(rel_id, 'prometheus/0')
+        self.assertIsInstance(rel_id, int)
+
+        # test that the unit data propagates the correct way
+        # which is through the triggering of on_relation_changed
+        self.harness.update_relation_data(rel_id,
+                                          'prometheus/0',
+                                          {
+                                              'private-address': '192.0.2.1',
+                                              'port': 1234,
+                                              'source-type': 'prometheus',
+                                              'source-name': 'prometheus-app',
+                                          })
+
+        # get a hash of the created file and check that it matches the pod spec
+        pod_spec, _ = self.harness.get_pod_spec()
+        container = get_container(pod_spec, 'grafana')
+        hash_text = hashlib.md5(
+            container['volumeConfig'][0]['files'][0]['content'].encode()).hexdigest()
+        self.assertEqual(container['envConfig']['DATASOURCES_YAML'], hash_text)
+
+        # test the idempotence of the call by re-configuring the pod spec
+        self.harness.charm.configure_pod()
+        self.assertEqual(container['envConfig']['DATASOURCES_YAML'], hash_text)
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/.flake8 b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..8ef84fcd43f3b7a46768c31b20f36cab48ffdfe0
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/.flake8
@@ -0,0 +1,9 @@
+[flake8]
+max-line-length = 99
+select: E,W,F,C,N
+exclude:
+  venv
+  .git
+  build
+  dist
+  *.egg_info
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/.gitignore b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b3b17b402232904b604711f178aefca0a623bdf5
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/.gitignore
@@ -0,0 +1,6 @@
+*~
+*swp
+*.charm
+__pycache__
+build
+venv
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/.jujuignore b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/.jujuignore
new file mode 100644
index 0000000000000000000000000000000000000000..6ccd559eabeae93e4d23215fa450130fa9b37ace
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/.jujuignore
@@ -0,0 +1,3 @@
+/venv
+*.py[cod]
+*.charm
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/LICENSE b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/LICENSE
@@ -0,0 +1,674 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/README.md b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..35f666076133ad99f6a0503a44ccedbf04bd7775
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/README.md
@@ -0,0 +1,78 @@
+# Prometheus Operator
+
+## Description
+
+The Prometheus Operator provides a cluster monitoring solution using
+[Prometheus](https://prometheus.io), which is an open source
+monitoring system and alerting toolkit.
+
+This repository contains a [Juju](https://jaas.ai/) Charm for
+deploying the monitoring component of Prometheus in a Kubernetes
+cluster. The alerting component of prometheus is offered through a
+separate Charm.
+
+## Setup
+
+A typical setup using [snaps](https://snapcraft.io/), for deployments
+to a [microk8s](https://microk8s.io/) cluster can be done using the
+following commands
+
+    sudo snap install microk8s --classic
+    microk8s.enable dns storage registry dashboard
+    sudo snap install juju --classic
+    juju bootstrap microk8s microk8s
+    juju create-storage-pool operator-storage kubernetes storage-class=microk8s-hostpath
+
+## Build
+
+Install the charmcraft tool
+
+    sudo snap install charmcraft
+
+Build the charm in this git repository
+
+    charmcraft build
+
+## Usage
+
+Create a Juju model for your monitoring operators
+
+    juju add-model lma
+
+Deploy Prometheus using its default configuration.
+
+    juju deploy ./prometheus.charm
+
+View the Prometheus dashboard
+
+1. Use `juju status` to determine IP of the Prometheus unit
+2. Navigate to `http://<IP-Address>:9090` using your browser
+
+If required, remove the deployed monitoring model completely
+
+    juju destroy-model -y lma --no-wait --force --destroy-storage
+
+## Relations
+
+Currently supported relations are
+
+- [Grafana](https://github.com/canonical/grafana-operator)
+- [Alertmanager](https://github.com/canonical/alertmanager-operator)
+
+## Developing
+
+Use your existing Python 3 development environment or create and
+activate a Python 3 virtualenv
+
+    virtualenv -p python3 venv
+    source venv/bin/activate
+
+Install the development requirements
+
+    pip install -r requirements-dev.txt
+
+## Testing
+
+Just run `run_tests`:
+
+    ./run_tests
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/actions.yaml b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/actions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fef67f32c4a9134c536b965e8c53d055e18c4457
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/actions.yaml
@@ -0,0 +1,3 @@
+reload-config:
+  description: |
+    Tell Prometheus to reload its config from the ConfigMap.
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/actions/reload-config b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/actions/reload-config
new file mode 100755
index 0000000000000000000000000000000000000000..d736d4e1627e01599ba7cef209ba684ef4b0ef41
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/actions/reload-config
@@ -0,0 +1,2 @@
+#!/bin/sh
+kill -HUP 1 && echo "Sent SIGHUP to the Prometheus container, config reloaded"
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/config.yaml b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e40cb172421a1ec10708a96377fbeb9bee5391cd
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/config.yaml
@@ -0,0 +1,99 @@
+options:
+  prometheus-image-path:
+    type: string
+    description: |
+      The location of the image to use,
+      e.g. "registry.example.com/prometheus:v1".
+
+      This setting is required.
+    default: "prom/prometheus:latest"
+  prometheus-image-username:
+    type: string
+    description: |
+      The username for accessing the registry specified in
+      prometheus-image-path.
+    default: ""
+  prometheus-image-password:
+    type: string
+    description: |
+      The password associated with prometheus-image-username for
+      accessing the registry specified in prometheus-image-path.
+    default: ""
+  port:
+    description: The port prometheus will be listening on
+    type: int
+    default: 9090
+  ssl-cert:
+    type: string
+    default:
+    description: |
+      SSL certificate to install and use for Prometheus endpoint.
+  ssl-key:
+    type: string
+    default:
+    description: |
+      SSL key to use with certificate specified as ssl-cert.
+  log-level:
+    description: |
+      Prometheus server log level (only log messages with the given severity
+      or above). Must be one of: [debug, info, warn, error, fatal].
+      If not set, the Prometheus default one (info) will be used.
+    type: string
+    default:
+  web-external-url:
+    description: |
+      The URL under which Prometheus is externally reachable (for example,
+      if Prometheus is served via a reverse proxy).
+      Used for generating relative and absolute links back to
+      Prometheus itself. If the URL has a path portion, it will be used to
+      prefix all HTTP endpoints served by Prometheus.
+
+      If omitted, relevant URL components will be derived automatically.
+    type: string
+    default: ""
+  tsdb-retention-time:
+    description: |
+      How long to retain samples in the storage.
+      Units Supported: y, w, d, h, m, s
+    type: string
+    default: 15d
+  tsdb-wal-compression:
+    description: |
+      This flag enables compression of the write-ahead log (WAL).
+      Depending on your data, you can expect the WAL size to be
+      halved with little extra cpu load.
+    type: boolean
+    default: false
+  external-labels:
+    description: |
+      A JSON string of key-value pairs that specify the labels to
+      attach to metrics in this Prometheus instance when they get pulled
+      by an aggregating parent. This is useful in the case of federation
+      where, for example, you want each datacenter to have its own
+      Prometheus instance and then have a global instance that pulls from
+      each of these datacenter instances. By specifying a unique set of
+      external-labels for each datacenter instance, you can easily determine
+      in the aggregating Prometheus instance which datacenter a metric is
+      coming from. Note that you are not limited to one instance per
+      datacenter. The datacenter example here is arbitrary and you are free
+      to organize your federation's hierarchy as you see fit.
+      Ex. '{ "cluster": "datacenter1" }'. Both keys and values may be
+      arbitrarily chosen as you see fit.
+    type: string
+    default: "{}"
+  scrape-interval:
+    description: |
+      How frequently to scrape targets by default.
+    type: string
+    default: 1m
+  scrape-timeout:
+    description: |
+      How long until a scrape request times out.
+    type: string
+    default: 10s
+  evaluation-interval:
+    description: |
+      How frequently rules will be evaluated.
+    type: string
+    default: 1m
+
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/config/prometheus-k8s.yml b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/config/prometheus-k8s.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e003db0ae0df07f539a6c725ddd7925ccba67232
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/config/prometheus-k8s.yml
@@ -0,0 +1,283 @@
+#
+# This file copied from https://github.com/prometheus/prometheus/blob/release-2.18/documentation/examples/prometheus-kubernetes.yml
+#
+
+# A scrape configuration for running Prometheus on a Kubernetes cluster.
+# This uses separate scrape configs for cluster components (i.e. API server, node)
+# and services to allow each to use different authentication configs.
+#
+# Kubernetes labels will be added as Prometheus labels on metrics via the
+# `labelmap` relabeling action.
+#
+# If you are using Kubernetes 1.7.2 or earlier, please take note of the comments
+# for the kubernetes-cadvisor job; you will need to edit or remove this job.
+
+# Scrape config for API servers.
+#
+# Kubernetes exposes API servers as endpoints to the default/kubernetes
+# service so this uses `endpoints` role and uses relabelling to only keep
+# the endpoints associated with the default/kubernetes service using the
+# default named port `https`. This works for single API server deployments as
+# well as HA API server deployments.
+scrape_configs:
+- job_name: 'kubernetes-apiservers'
+
+  kubernetes_sd_configs:
+  - role: endpoints
+
+  # Default to scraping over https. If required, just disable this or change to
+  # `http`.
+  scheme: https
+
+  # This TLS & bearer token file config is used to connect to the actual scrape
+  # endpoints for cluster components. This is separate to discovery auth
+  # configuration because discovery & scraping are two separate concerns in
+  # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+  # the cluster. Otherwise, more config options have to be provided within the
+  # <kubernetes_sd_config>.
+  tls_config:
+    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    # If your node certificates are self-signed or use a different CA to the
+    # master CA, then disable certificate verification below. Note that
+    # certificate verification is an integral part of a secure infrastructure
+    # so this should only be disabled in a controlled environment. You can
+    # disable certificate verification by uncommenting the line below.
+    #
+    # insecure_skip_verify: true
+  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+  # Keep only the default/kubernetes service endpoints for the https port. This
+  # will add targets for each API server which Kubernetes adds an endpoint to
+  # the default/kubernetes service.
+  relabel_configs:
+  - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+    action: keep
+    regex: default;kubernetes;https
+
+# Scrape config for nodes (kubelet).
+#
+# Rather than connecting directly to the node, the scrape is proxied though the
+# Kubernetes apiserver.  This means it will work if Prometheus is running out of
+# cluster, or can't connect to nodes for some other reason (e.g. because of
+# firewalling).
+- job_name: 'kubernetes-nodes'
+
+  # Default to scraping over https. If required, just disable this or change to
+  # `http`.
+  scheme: https
+
+  # This TLS & bearer token file config is used to connect to the actual scrape
+  # endpoints for cluster components. This is separate to discovery auth
+  # configuration because discovery & scraping are two separate concerns in
+  # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+  # the cluster. Otherwise, more config options have to be provided within the
+  # <kubernetes_sd_config>.
+  tls_config:
+    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+  kubernetes_sd_configs:
+  - role: node
+
+  relabel_configs:
+  - action: labelmap
+    regex: __meta_kubernetes_node_label_(.+)
+  - target_label: __address__
+    replacement: kubernetes.default.svc:443
+  - source_labels: [__meta_kubernetes_node_name]
+    regex: (.+)
+    target_label: __metrics_path__
+    replacement: /api/v1/nodes/${1}/proxy/metrics
+
+# Scrape config for Kubelet cAdvisor.
+#
+# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
+# (those whose names begin with 'container_') have been removed from the
+# Kubelet metrics endpoint.  This job scrapes the cAdvisor endpoint to
+# retrieve those metrics.
+#
+# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
+# HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
+# in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
+# the --cadvisor-port=0 Kubelet flag).
+#
+# This job is not necessary and should be removed in Kubernetes 1.6 and
+# earlier versions, or it will cause the metrics to be scraped twice.
+- job_name: 'kubernetes-cadvisor'
+
+  # Default to scraping over https. If required, just disable this or change to
+  # `http`.
+  scheme: https
+
+  # This TLS & bearer token file config is used to connect to the actual scrape
+  # endpoints for cluster components. This is separate to discovery auth
+  # configuration because discovery & scraping are two separate concerns in
+  # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+  # the cluster. Otherwise, more config options have to be provided within the
+  # <kubernetes_sd_config>.
+  tls_config:
+    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+  kubernetes_sd_configs:
+  - role: node
+
+  relabel_configs:
+  - action: labelmap
+    regex: __meta_kubernetes_node_label_(.+)
+  - target_label: __address__
+    replacement: kubernetes.default.svc:443
+  - source_labels: [__meta_kubernetes_node_name]
+    regex: (.+)
+    target_label: __metrics_path__
+    replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
+
+# Example scrape config for service endpoints.
+#
+# The relabeling allows the actual service scrape endpoint to be configured
+# for all or only some endpoints.
+- job_name: 'kubernetes-service-endpoints'
+
+  kubernetes_sd_configs:
+  - role: endpoints
+
+  relabel_configs:
+  # Example relabel to scrape only endpoints that have
+  # "example.io/should_be_scraped = true" annotation.
+  #  - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_scraped]
+  #    action: keep
+  #    regex: true
+  #
+  # Example relabel to customize metric path based on endpoints
+  # "example.io/metric_path = <metric path>" annotation.
+  #  - source_labels: [__meta_kubernetes_service_annotation_example_io_metric_path]
+  #    action: replace
+  #    target_label: __metrics_path__
+  #    regex: (.+)
+  #
+  # Example relabel to scrape only single, desired port for the service based
+  # on endpoints "example.io/scrape_port = <port>" annotation.
+  #  - source_labels: [__address__, __meta_kubernetes_service_annotation_example_io_scrape_port]
+  #    action: replace
+  #    regex: ([^:]+)(?::\d+)?;(\d+)
+  #    replacement: $1:$2
+  #    target_label: __address__
+  #
+  # Example relabel to configure scrape scheme for all service scrape targets
+  # based on endpoints "example.io/scrape_scheme = <scheme>" annotation.
+  #  - source_labels: [__meta_kubernetes_service_annotation_example_io_scrape_scheme]
+  #    action: replace
+  #    target_label: __scheme__
+  #    regex: (https?)
+  - action: labelmap
+    regex: __meta_kubernetes_service_label_(.+)
+  - source_labels: [__meta_kubernetes_namespace]
+    action: replace
+    target_label: kubernetes_namespace
+  - source_labels: [__meta_kubernetes_service_name]
+    action: replace
+    target_label: kubernetes_name
+
+# Example scrape config for probing services via the Blackbox Exporter.
+#
+# The relabeling allows the actual service scrape endpoint to be configured
+# for all or only some services.
+- job_name: 'kubernetes-services'
+
+  metrics_path: /probe
+  params:
+    module: [http_2xx]
+
+  kubernetes_sd_configs:
+  - role: service
+
+  relabel_configs:
+  # Example relabel to probe only some services that have "example.io/should_be_probed = true" annotation
+  #  - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_probed]
+  #    action: keep
+  #    regex: true
+  - source_labels: [__address__]
+    target_label: __param_target
+  - target_label: __address__
+    replacement: blackbox-exporter.example.com:9115
+  - source_labels: [__param_target]
+    target_label: instance
+  - action: labelmap
+    regex: __meta_kubernetes_service_label_(.+)
+  - source_labels: [__meta_kubernetes_namespace]
+    target_label: kubernetes_namespace
+  - source_labels: [__meta_kubernetes_service_name]
+    target_label: kubernetes_name
+
+# Example scrape config for probing ingresses via the Blackbox Exporter.
+#
+# The relabeling allows the actual ingress scrape endpoint to be configured
+# for all or only some services.
+- job_name: 'kubernetes-ingresses'
+
+  metrics_path: /probe
+  params:
+    module: [http_2xx]
+
+  kubernetes_sd_configs:
+  - role: ingress
+
+  relabel_configs:
+  # Example relabel to probe only some ingresses that have "example.io/should_be_probed = true" annotation
+  #  - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed]
+  #    action: keep
+  #    regex: true
+  - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path]
+    regex: (.+);(.+);(.+)
+    replacement: ${1}://${2}${3}
+    target_label: __param_target
+  - target_label: __address__
+    replacement: blackbox-exporter.example.com:9115
+  - source_labels: [__param_target]
+    target_label: instance
+  - action: labelmap
+    regex: __meta_kubernetes_ingress_label_(.+)
+  - source_labels: [__meta_kubernetes_namespace]
+    target_label: kubernetes_namespace
+  - source_labels: [__meta_kubernetes_ingress_name]
+    target_label: kubernetes_name
+
+# Example scrape config for pods
+#
+# The relabeling allows the actual pod scrape to be configured
+# for all the declared ports (or port-free target if none is declared)
+# or only some ports.
+- job_name: 'kubernetes-pods'
+
+  kubernetes_sd_configs:
+  - role: pod
+
+  relabel_configs:
+  # Example relabel to scrape only pods that have
+  # "example.io/should_be_scraped = true" annotation.
+  #  - source_labels: [__meta_kubernetes_pod_annotation_example_io_should_be_scraped]
+  #    action: keep
+  #    regex: true
+  #
+  # Example relabel to customize metric path based on pod
+  # "example.io/metric_path = <metric path>" annotation.
+  #  - source_labels: [__meta_kubernetes_pod_annotation_example_io_metric_path]
+  #    action: replace
+  #    target_label: __metrics_path__
+  #    regex: (.+)
+  #
+  # Example relabel to scrape only single, desired port for the pod
+  # based on pod "example.io/scrape_port = <port>" annotation.
+  #  - source_labels: [__address__, __meta_kubernetes_pod_annotation_example_io_scrape_port]
+  #    action: replace
+  #    regex: ([^:]+)(?::\d+)?;(\d+)
+  #    replacement: $1:$2
+  #    target_label: __address__
+  - action: labelmap
+    regex: __meta_kubernetes_pod_label_(.+)
+  - source_labels: [__meta_kubernetes_namespace]
+    action: replace
+    target_label: kubernetes_namespace
+  - source_labels: [__meta_kubernetes_pod_name]
+    action: replace
+    target_label: kubernetes_pod_name
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/icon.svg b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/icon.svg
new file mode 100644
index 0000000000000000000000000000000000000000..5c51f66d901d0a30c082a7207a53d19b763acc2b
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/icon.svg
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0)  -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   version="1.1"
+   id="Layer_1"
+   x="0px"
+   y="0px"
+   width="115.333px"
+   height="114px"
+   viewBox="0 0 115.333 114"
+   enable-background="new 0 0 115.333 114"
+   xml:space="preserve"
+   sodipodi:docname="prometheus_logo_orange.svg"
+   inkscape:version="0.92.1 r15371"><metadata
+     id="metadata4495"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs4493" /><sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1484"
+     inkscape:window-height="886"
+     id="namedview4491"
+     showgrid="false"
+     inkscape:zoom="5.2784901"
+     inkscape:cx="60.603667"
+     inkscape:cy="60.329656"
+     inkscape:window-x="54"
+     inkscape:window-y="7"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="Layer_1" /><g
+     id="Layer_2" /><path
+     style="fill:#e6522c;fill-opacity:1"
+     inkscape:connector-curvature="0"
+     id="path4486"
+     d="M 56.667,0.667 C 25.372,0.667 0,26.036 0,57.332 c 0,31.295 25.372,56.666 56.667,56.666 31.295,0 56.666,-25.371 56.666,-56.666 0,-31.296 -25.372,-56.665 -56.666,-56.665 z m 0,106.055 c -8.904,0 -16.123,-5.948 -16.123,-13.283 H 72.79 c 0,7.334 -7.219,13.283 -16.123,13.283 z M 83.297,89.04 H 30.034 V 79.382 H 83.298 V 89.04 Z M 83.106,74.411 H 30.186 C 30.01,74.208 29.83,74.008 29.66,73.802 24.208,67.182 22.924,63.726 21.677,60.204 c -0.021,-0.116 6.611,1.355 11.314,2.413 0,0 2.42,0.56 5.958,1.205 -3.397,-3.982 -5.414,-9.044 -5.414,-14.218 0,-11.359 8.712,-21.285 5.569,-29.308 3.059,0.249 6.331,6.456 6.552,16.161 3.252,-4.494 4.613,-12.701 4.613,-17.733 0,-5.21 3.433,-11.262 6.867,-11.469 -3.061,5.045 0.793,9.37 4.219,20.099 1.285,4.03 1.121,10.812 2.113,15.113 C 63.797,33.534 65.333,20.5 71,16 c -2.5,5.667 0.37,12.758 2.333,16.167 3.167,5.5 5.087,9.667 5.087,17.548 0,5.284 -1.951,10.259 -5.242,14.148 3.742,-0.702 6.326,-1.335 6.326,-1.335 l 12.152,-2.371 c 10e-4,-10e-4 -1.765,7.261 -8.55,14.254 z" /></svg>
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/metadata.yaml b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5d4329da48a621aad3a50ab6df40d72a11dbcb3f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/metadata.yaml
@@ -0,0 +1,34 @@
+name: prometheus
+summary: Prometheus for Kubernetes clusters
+maintainers:
+    - Balbir Thomas <balbir.thomas@canonical.com>
+description: |
+  Prometheus is an open source monitoring solution. Prometheus
+  supports aggregating high dimensional data and exposes a powerful
+  query language PromQL. This charm deploys and operates Prometheus on
+  Kubernetes clusters. Prometheus can raise alerts through a relation
+  with the Altermanager charm. Alerting rules for Prometheus need to
+  be provided through a relation with the application that requires
+  alerting. Prometheus provides its own dashboard for data
+  visualization but a richer visualization interface may be obtained
+  through a relation with the Grafana charm.
+tags:
+    - observability
+    - lma
+    - prometheus
+    - monitoring
+    - alerting
+    - grafana
+series:
+    - kubernetes
+requires:
+    grafana-source:
+        interface: grafana-datasource
+    alertmanager:
+        interface: alertmanager
+    target:
+        interface: http
+storage:
+  database:
+    type: filesystem
+    location: /var/lib/prometheus
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/requirements-dev.txt b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/requirements-dev.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3950bef2e306b78aaa231135636a04f2d443d569
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/requirements-dev.txt
@@ -0,0 +1,5 @@
+-r requirements.txt
+black
+flake8
+pytest
+pytest-cov
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/requirements.txt b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ff3e3351770b50b916e2fcc6478e986f59c35845
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/requirements.txt
@@ -0,0 +1,2 @@
+ops
+pyaml
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/run_tests b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/run_tests
new file mode 100755
index 0000000000000000000000000000000000000000..637497ffe1bac2f75fec96b3bc1d25e16e39e1d8
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/run_tests
@@ -0,0 +1,16 @@
+#!/bin/sh -e
+# Copyright 2020 Balbir Thomas
+# See LICENSE file for licensing details.
+
+if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then
+    . venv/bin/activate
+fi
+
+if [ -z "$PYTHONPATH" ]; then
+    export PYTHONPATH=src
+else
+    export PYTHONPATH="src:$PYTHONPATH"
+fi
+
+black --diff
+python3 -m unittest -v "$@"
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/setup.py b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..15ce0b9ca35e585d2b1925a13bd58d1ea67f0900
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/setup.py
@@ -0,0 +1,21 @@
+import setuptools
+
+with open("README.md", "r") as fh:
+    long_description = fh.read()
+
+setuptools.setup(
+    name="prometheus-charm",
+    version="0.0.1",
+    author="Balbir Thomas",
+    author_email="balbir.thomas@canonical.com",
+    description="Kubernetes Charm/Operator for Prometheus",
+    long_description=long_description,
+    long_description_content_type="text/markdown",
+    url="https://github.com/balbirthomas/prometheus-charm",
+    packages=setuptools.find_packages(),
+    classifiers=[
+        "Programming Language :: Python :: 3",
+        "Operating System :: OS Independent",
+    ],
+    python_requires='>=3.5',
+)
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/src/charm.py b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..517a5c456843b057be4c40edf816221f9a8ef6a6
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/src/charm.py
@@ -0,0 +1,398 @@
+#!/usr/bin/env python3
+# Copyright 2020 Balbir Thomas
+# See LICENSE file for licensing details.
+
+import logging
+import yaml
+import json
+
+from ops.charm import CharmBase
+from ops.framework import StoredState
+from ops.main import main
+from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus
+
+logger = logging.getLogger(__name__)
+
+
+class PrometheusCharm(CharmBase):
+    """A Juju Charm for Prometheus
+    """
+    _stored = StoredState()
+
+    def __init__(self, *args):
+        logger.debug('Initializing Charm')
+
+        super().__init__(*args)
+
+        self._stored.set_default(alertmanagers=[])
+        self._stored.set_default(alertmanager_port='9093')
+
+        self.framework.observe(self.on.config_changed, self._on_config_changed)
+        self.framework.observe(self.on.stop, self._on_stop)
+        self.framework.observe(self.on['alertmanager'].relation_changed,
+                               self._on_alertmanager_changed)
+        self.framework.observe(self.on['alertmanager'].relation_broken,
+                               self._on_alertmanager_broken)
+
+        self.framework.observe(self.on['grafana-source'].relation_changed,
+                               self._on_grafana_changed)
+        self.framework.observe(self.on['target'].relation_changed,
+                               self._on_config_changed)
+
+    def _on_config_changed(self, _):
+        """Set a new Juju pod specification
+        """
+        self._configure_pod()
+
+    def _on_stop(self, _):
+        """Mark unit is inactive
+        """
+        self.unit.status = MaintenanceStatus('Pod is terminating.')
+
+    def _on_grafana_changed(self, event):
+        """Provide Grafana with data source information
+        """
+        event.relation.data[self.unit]['port'] = str(self.model.config['port'])
+        event.relation.data[self.unit]['source-type'] = 'prometheus'
+
+    def _on_alertmanager_changed(self, event):
+        """Set an alertmanager configuation
+        """
+        if not self.unit.is_leader():
+            return
+
+        addrs = json.loads(event.relation.data[event.app].get('addrs', '[]'))
+        port = event.relation.data[event.app]['port']
+
+        self._stored.alertmanager_port = port
+        self._stored.alertmanagers = addrs
+
+
+        self._configure_pod()
+
+    def _on_alertmanager_broken(self, event):
+        """Remove all alertmanager configuration
+        """
+        if not self.unit.is_leader():
+            return
+        self._stored.alertmanagers.clear()
+        self._configure_pod()
+
+    def _cli_args(self):
+        """Construct command line arguments for Prometheus
+        """
+        config = self.model.config
+        args = [
+            '--config.file=/etc/prometheus/prometheus.yml',
+            '--storage.tsdb.path=/var/lib/prometheus',
+            '--web.enable-lifecycle',
+            '--web.console.templates=/usr/share/prometheus/consoles',
+            '--web.console.libraries=/usr/share/prometheus/console_libraries'
+        ]
+
+        # get log level
+        allowed_log_levels = ['debug', 'info', 'warn', 'error', 'fatal']
+        if config.get('log-level'):
+            log_level = config['log-level'].lower()
+        else:
+            log_level = 'info'
+
+        # If log level is invalid set it to debug
+        if log_level not in allowed_log_levels:
+            logging.error(
+                'Invalid loglevel: {0} given, {1} allowed. '
+                'defaulting to DEBUG loglevel.'.format(
+                    log_level, '/'.join(allowed_log_levels)
+                )
+            )
+            log_level = 'debug'
+
+        # set log level
+        args.append(
+            '--log.level={0}'.format(log_level)
+        )
+
+        # Enable time series database compression
+        if config.get('tsdb-wal-compression'):
+            args.append('--storage.tsdb.wal-compression')
+
+        # Set time series retention time
+        if config.get('tsdb-retention-time') and self._is_valid_timespec(
+                config['tsdb-retention-time']):
+            args.append('--storage.tsdb.retention.time={}'.format(config['tsdb-retention-time']))
+
+        return args
+
+    def _is_valid_timespec(self, timeval):
+        """Is a time interval unit and value valid
+        """
+        if not timeval:
+            return False
+
+        time, unit = timeval[:-1], timeval[-1]
+
+        if unit not in ['y', 'w', 'd', 'h', 'm', 's']:
+            logger.error('Invalid unit {} in time spec'.format(unit))
+            return False
+
+        try:
+            int(time)
+        except ValueError:
+            logger.error('Can not convert time {} to integer'.format(time))
+            return False
+
+        if not int(time) > 0:
+            logger.error('Expected positive time spec but got {}'.format(time))
+            return False
+
+        return True
+
+    def _are_valid_labels(self, json_data):
+        """Are Prometheus external labels valid
+        """
+        if not json_data:
+            return False
+
+        try:
+            labels = json.loads(json_data)
+        except (ValueError, TypeError):
+            logger.error('Can not parse external labels : {}'.format(json_data))
+            return False
+
+        if not isinstance(labels, dict):
+            logger.error('Expected label dictionary but got : {}'.format(labels))
+            return False
+
+        for key, value in labels.items():
+            if not isinstance(key, str) or not isinstance(value, str):
+                logger.error('External label keys/values must be strings')
+                return False
+
+        return True
+
+    def _external_labels(self):
+        """Extract external labels for Prometheus from configuration
+        """
+        config = self.model.config
+        labels = {}
+
+        if config.get('external-labels') and self._are_valid_labels(
+                config['external-labels']):
+            labels = json.loads(config['external-labels'])
+
+        return labels
+
+    def _prometheus_global_config(self):
+        """Construct Prometheus global configuration
+        """
+        config = self.model.config
+        global_config = {}
+
+        labels = self._external_labels()
+        if labels:
+            global_config['external_labels'] = labels
+
+        if config.get('scrape-interval') and self._is_valid_timespec(
+                config['scrape-interval']):
+            global_config['scrape_interval'] = config['scrape-interval']
+
+        if config.get('scrape-timeout') and self._is_valid_timespec(
+                config['scrape-timeout']):
+            global_config['scrape_timeout'] = config['scrape-timeout']
+
+        if config.get('evaluation-interval') and self._is_valid_timespec(
+                config['evaluation-interval']):
+            global_config['evaluation_interval'] = config['evaluation-interval']
+
+        return global_config
+
+    def _alerting_config(self):
+        """Construct Prometheus altering configuation
+        """
+        alerting_config = ''
+
+        if len(self._stored.alertmanagers) < 1:
+            logger.debug('No alertmanagers available')
+            return alerting_config
+
+        targets = []
+        for manager in self._stored.alertmanagers:
+            port = self._stored.alertmanager_port
+            targets.append("{}:{}".format(manager, port))
+
+        manager_config = {'static_configs': [{'targets': targets}]}
+        alerting_config = {'alertmanagers': [manager_config]}
+
+        return alerting_config
+
+    def _prometheus_config(self):
+        """Construct Prometheus configuration
+        """
+        config = self.model.config
+
+        scrape_config = {'global': self._prometheus_global_config(),
+                         'scrape_configs': []}
+
+        alerting_config = self._alerting_config()
+        if alerting_config:
+            scrape_config['alerting'] = alerting_config
+
+        # By default only monitor prometheus server itself
+        targets = ['localhost:{}'.format(config['port'])]
+        relation_targets = self.relation_targets
+        if relation_targets:
+            targets.extend(relation_targets)
+
+        default_config = {
+            'job_name': 'prometheus',
+            'scrape_interval': '5s',
+            'scrape_timeout': '5s',
+            'metrics_path': '/metrics',
+            'honor_timestamps': True,
+            'scheme': 'http',
+            'static_configs': [{
+                'targets': targets
+            }]
+        }
+        scrape_config['scrape_configs'].append(default_config)
+
+        logger.debug('Prometheus config : {}'.format(scrape_config))
+
+        return yaml.dump(scrape_config), targets
+
+    def _build_pod_spec(self):
+        """Construct a Juju pod specification for Prometheus
+        """
+        logger.debug('Building Pod Spec')
+        config = self.model.config
+        prometheus_config, targets = self._prometheus_config()
+        spec = {
+            'version': 3,
+            'containers': [{
+                'name': self.app.name,
+                'imageDetails': {
+                    'imagePath': config['prometheus-image-path'],
+                    'username': config.get('prometheus-image-username', ''),
+                    'password': config.get('prometheus-image-password', '')
+                },
+                'args': self._cli_args(),
+                "envConfig": {
+                    "targets": str(targets),
+                },
+                'kubernetes': {
+                    'readinessProbe': {
+                        'httpGet': {
+                            'path': '/-/ready',
+                            'port': config['port']
+                        },
+                        'initialDelaySeconds': 10,
+                        'timeoutSeconds': 30
+                    },
+                    'livenessProbe': {
+                        'httpGet': {
+                            'path': '/-/healthy',
+                            'port': config['port']
+                        },
+                        'initialDelaySeconds': 30,
+                        'timeoutSeconds': 30
+                    }
+                },
+                'ports': [{
+                    'containerPort': config['port'],
+                    'name': 'prometheus-http',
+                    'protocol': 'TCP'
+                }],
+                'volumeConfig': [{
+                    'name': 'prometheus-config',
+                    'mountPath': '/etc/prometheus',
+                    'files': [{
+                        'path': 'prometheus.yml',
+                        'content': prometheus_config
+                    }]
+                }]
+            }]
+        }
+
+        return spec
+
+    def _check_config(self):
+        """Identify missing but required items in configuation
+
+        :returns: list of missing configuration items (configuration keys)
+        """
+        logger.debug('Checking Config')
+        config = self.model.config
+        missing = []
+
+        if not config.get('prometheus-image-path'):
+            missing.append('prometheus-image-path')
+
+        if config.get('prometheus-image-username') \
+                and not config.get('prometheus-image-password'):
+            missing.append('prometheus-image-password')
+
+        return missing
+
+    def _configure_pod(self):
+        """Setup a new Prometheus pod specification
+        """
+        logger.debug('Configuring Pod')
+        missing_config = self._check_config()
+        if missing_config:
+            logger.error('Incomplete Configuration : {}. '
+                         'Application will be blocked.'.format(missing_config))
+            self.unit.status = \
+                BlockedStatus('Missing configuration: {}'.format(missing_config))
+            return
+
+        if not self.unit.is_leader():
+            self.unit.status = ActiveStatus()
+            return
+
+        self.unit.status = MaintenanceStatus('Setting pod spec.')
+        pod_spec = self._build_pod_spec()
+
+        self.model.pod.set_spec(pod_spec)
+        self.app.status = ActiveStatus()
+        self.unit.status = ActiveStatus()
+
+    @property
+    def relation_targets(self):
+        targets = []
+        for service in self.services():
+            if len(service["hosts"]) > 0:
+                for host in service["hosts"]:
+                    targets.append("{}:{}".format(
+                        host["hostname"],
+                        host["port"])
+                    )
+        return targets
+
+    def services(self):
+        services = []
+        relations = self.framework.model.relations.get("target")
+        if relations:
+            for relation in relations:
+                if not relation.app:
+                    continue
+                service_name = relation.app.name
+                hosts = []
+                for unit in relation.units:
+                    unit_name = unit.name.replace("/", "-")
+                    hostname = f"{unit_name}.{relation.app.name}-endpoints"
+                    private_address = relation.data[unit].get("private-address")
+                    port = relation.data[unit].get("port", 9100)
+                    if hostname and private_address:
+                        hosts.append({
+                            "hostname": hostname,
+                            "private-address": private_address,
+                            "port": port,
+                        })
+                services.append({
+                    "service_name": service_name,
+                    "hosts": hosts,
+                })
+        return services
+
+if __name__ == "__main__":
+    main(PrometheusCharm)
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/src/http_client.py b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/src/http_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bfdf084b328f6ec36e7a848fc0a0cea913e3635
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/src/http_client.py
@@ -0,0 +1,54 @@
+
+import ops.framework
+import ops.charm
+
+
+class BaseRelationClient(ops.framework.Object):
+    """Requires side of a Kafka Endpoint"""
+
+    def __init__(
+        self,
+        charm: ops.charm.CharmBase,
+        relation_name: str,
+        mandatory_fields: list,
+    ):
+        super().__init__(charm, relation_name)
+        self.relation_name = relation_name
+        self.mandatory_fields = mandatory_fields
+        self._update_relation()
+
+    def get_data_from_unit(self, key: str):
+        if not self.relation:
+            # This update relation doesn't seem to be needed, but I added it because apparently
+            # the data is empty in the unit tests. In reality, the constructor is called in every hook.
+            # In the unit tests when doing an update_relation_data, apparently it is not called.
+            self._update_relation()
+        if self.relation:
+            for unit in self.relation.units:
+                data = self.relation.data[unit].get(key)
+                if data:
+                    return data
+
+    def get_data_from_app(self, key: str):
+        if not self.relation or self.relation.app not in self.relation.data:
+            # This update relation doesn't seem to be needed, but I added it because apparently
+            # the data is empty in the unit tests. In reality, the constructor is called in every hook.
+            # In the unit tests when doing an update_relation_data, apparently it is not called.
+            self._update_relation()
+        if self.relation and self.relation.app in self.relation.data:
+            data = self.relation.data[self.relation.app].get(key)
+            if data:
+                return data
+
+    def is_missing_data_in_unit(self):
+        return not all(
+            [self.get_data_from_unit(field) for field in self.mandatory_fields]
+        )
+
+    def is_missing_data_in_app(self):
+        return not all(
+            [self.get_data_from_app(field) for field in self.mandatory_fields]
+        )
+
+    def _update_relation(self):
+        self.relation = self.framework.model.get_relation(self.relation_name)
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/tests/__init__.py b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/squid_cnf/juju-bundles/charms/ops/prometheus-operator/tests/test_charm.py b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/tests/test_charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..05f95782d01dfa5d3ae5965531c61fcc442909f5
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/prometheus-operator/tests/test_charm.py
@@ -0,0 +1,313 @@
+# Copyright 2020 Balbir Thomas
+# See LICENSE file for licensing details.
+
+import unittest
+import yaml
+import json
+
+from ops.testing import Harness
+from charm import PrometheusCharm
+
+MINIMAL_CONFIG = {
+    'prometheus-image-path': 'prom/prometheus',
+    'port': 9090
+}
+
+SAMPLE_ALERTING_CONFIG = {
+    'alertmanagers': [{
+        'static_configs': [{
+            'targets': ['192.168.0.1:9093']
+        }]
+    }]
+}
+
+
+class TestCharm(unittest.TestCase):
+    def setUp(self):
+        self.harness = Harness(PrometheusCharm)
+        self.addCleanup(self.harness.cleanup)
+        self.harness.begin()
+
+    def test_image_path_is_required(self):
+        missing_image_config = {
+            'prometheus-image-path': '',
+            'prometheus-image-username': '',
+            'prometheus-image-password': ''
+        }
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(missing_image_config)
+            expected_logs = [
+                "ERROR:charm:Incomplete Configuration : ['prometheus-image-path']. "
+                "Application will be blocked."
+            ]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        missing = self.harness.charm._check_config()
+        expected = ['prometheus-image-path']
+        self.assertEqual(missing, expected)
+
+    def test_password_is_required_when_username_is_set(self):
+        missing_password_config = {
+            'prometheus-image-path': 'prom/prometheus:latest',
+            'prometheus-image-username': 'some-user',
+            'prometheus-image-password': '',
+        }
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(missing_password_config)
+            expected_logs = [
+                "ERROR:charm:Incomplete Configuration : ['prometheus-image-password']. "
+                "Application will be blocked."
+            ]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        missing = self.harness.charm._check_config()
+        expected = ['prometheus-image-password']
+        self.assertEqual(missing, expected)
+
+    def test_alerting_config_is_updated_by_alertmanager_relation(self):
+        self.harness.set_leader(True)
+
+        # check alerting config is empty without alertmanager relation
+        self.harness.update_config(MINIMAL_CONFIG)
+
+        self.assertEqual(self.harness.charm._stored.alertmanagers, [])
+        rel_id = self.harness.add_relation('alertmanager', 'alertmanager')
+
+        self.assertIsInstance(rel_id, int)
+        self.harness.add_relation_unit(rel_id, 'alertmanager/0')
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(alerting_config(pod_spec), None)
+
+        # check alerting config is updated when a alertmanager joins
+        self.harness.update_relation_data(rel_id,
+                                          'alertmanager',
+                                          {
+                                              'port': '9093',
+                                              'addrs': '["192.168.0.1"]'
+                                          })
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(alerting_config(pod_spec), SAMPLE_ALERTING_CONFIG)
+
+    def test_alerting_config_is_removed_when_alertmanager_is_broken(self):
+        self.harness.set_leader(True)
+
+        # ensure there is a non-empty alerting config
+        self.harness.update_config(MINIMAL_CONFIG)
+        rel_id = self.harness.add_relation('alertmanager', 'alertmanager')
+        rel = self.harness.model.get_relation('alertmanager')
+        self.assertIsInstance(rel_id, int)
+        self.harness.add_relation_unit(rel_id, 'alertmanager/0')
+        self.harness.update_relation_data(rel_id,
+                                          'alertmanager',
+                                          {
+                                              'port': '9093',
+                                              'addrs': '["192.168.0.1"]'
+                                          })
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(alerting_config(pod_spec), SAMPLE_ALERTING_CONFIG)
+
+        # check alerting config is removed when relation departs
+        self.harness.charm.on.alertmanager_relation_broken.emit(rel)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(alerting_config(pod_spec), None)
+
+    def test_grafana_is_provided_port_and_source(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(MINIMAL_CONFIG)
+        rel_id = self.harness.add_relation('grafana-source', 'grafana')
+        self.harness.add_relation_unit(rel_id, 'grafana/0')
+        self.harness.update_relation_data(rel_id, 'grafana/0', {})
+        data = self.harness.get_relation_data(rel_id, self.harness.model.unit.name)
+
+        self.assertEqual(int(data['port']), MINIMAL_CONFIG['port'])
+        self.assertEqual(data['source-type'], 'prometheus')
+
+    def test_default_cli_log_level_is_info(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(MINIMAL_CONFIG)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--log.level'), 'info')
+
+    def test_invalid_log_level_defaults_to_debug(self):
+        self.harness.set_leader(True)
+        bad_log_config = MINIMAL_CONFIG.copy()
+        bad_log_config['log-level'] = 'bad-level'
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(bad_log_config)
+            expected_logs = [
+                "ERROR:root:Invalid loglevel: bad-level given, "
+                "debug/info/warn/error/fatal allowed. "
+                "defaulting to DEBUG loglevel."
+            ]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--log.level'), 'debug')
+
+    def test_valid_log_level_is_accepted(self):
+        self.harness.set_leader(True)
+        valid_log_config = MINIMAL_CONFIG.copy()
+        valid_log_config['log-level'] = 'warn'
+        self.harness.update_config(valid_log_config)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--log.level'), 'warn')
+
+    def test_tsdb_compression_is_not_enabled_by_default(self):
+        self.harness.set_leader(True)
+        compress_config = MINIMAL_CONFIG.copy()
+        self.harness.update_config(compress_config)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.wal-compression'),
+                         None)
+
+    def test_tsdb_compression_can_be_enabled(self):
+        self.harness.set_leader(True)
+        compress_config = MINIMAL_CONFIG.copy()
+        compress_config['tsdb-wal-compression'] = True
+        self.harness.update_config(compress_config)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.wal-compression'),
+                         '--storage.tsdb.wal-compression')
+
+    def test_valid_tsdb_retention_times_can_be_set(self):
+        self.harness.set_leader(True)
+        retention_time_config = MINIMAL_CONFIG.copy()
+        acceptable_units = ['y', 'w', 'd', 'h', 'm', 's']
+        for unit in acceptable_units:
+            retention_time = '{}{}'.format(1, unit)
+            retention_time_config['tsdb-retention-time'] = retention_time
+            self.harness.update_config(retention_time_config)
+            pod_spec = self.harness.get_pod_spec()
+            self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'),
+                             retention_time)
+
+    def test_invalid_tsdb_retention_times_can_not_be_set(self):
+        self.harness.set_leader(True)
+        retention_time_config = MINIMAL_CONFIG.copy()
+
+        # invalid unit
+        retention_time = '{}{}'.format(1, 'x')
+        retention_time_config['tsdb-retention-time'] = retention_time
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(retention_time_config)
+            expected_logs = ["ERROR:charm:Invalid unit x in time spec"]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'),
+                         None)
+
+        # invalid time value
+        retention_time = '{}{}'.format(0, 'd')
+        retention_time_config['tsdb-retention-time'] = retention_time
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(retention_time_config)
+            expected_logs = ["ERROR:charm:Expected positive time spec but got 0"]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'),
+                         None)
+
+    def test_global_scrape_interval_can_be_set(self):
+        self.harness.set_leader(True)
+        scrapeint_config = MINIMAL_CONFIG.copy()
+        acceptable_units = ['y', 'w', 'd', 'h', 'm', 's']
+        for unit in acceptable_units:
+            scrapeint_config['scrape-interval'] = '{}{}'.format(1, unit)
+            self.harness.update_config(scrapeint_config)
+            pod_spec = self.harness.get_pod_spec()
+            gconfig = global_config(pod_spec)
+            self.assertEqual(gconfig['scrape_interval'],
+                             scrapeint_config['scrape-interval'])
+
+    def test_global_scrape_timeout_can_be_set(self):
+        self.harness.set_leader(True)
+        scrapetime_config = MINIMAL_CONFIG.copy()
+        acceptable_units = ['y', 'w', 'd', 'h', 'm', 's']
+        for unit in acceptable_units:
+            scrapetime_config['scrape-timeout'] = '{}{}'.format(1, unit)
+            self.harness.update_config(scrapetime_config)
+            pod_spec = self.harness.get_pod_spec()
+            gconfig = global_config(pod_spec)
+            self.assertEqual(gconfig['scrape_timeout'],
+                             scrapetime_config['scrape-timeout'])
+
+    def test_global_evaluation_interval_can_be_set(self):
+        self.harness.set_leader(True)
+        evalint_config = MINIMAL_CONFIG.copy()
+        acceptable_units = ['y', 'w', 'd', 'h', 'm', 's']
+        for unit in acceptable_units:
+            evalint_config['evaluation-interval'] = '{}{}'.format(1, unit)
+            self.harness.update_config(evalint_config)
+            pod_spec = self.harness.get_pod_spec()
+            gconfig = global_config(pod_spec)
+            self.assertEqual(gconfig['evaluation_interval'],
+                             evalint_config['evaluation-interval'])
+
+    def test_valid_external_labels_can_be_set(self):
+        self.harness.set_leader(True)
+        label_config = MINIMAL_CONFIG.copy()
+        labels = {'name1': 'value1',
+                  'name2': 'value2'}
+        label_config['external-labels'] = json.dumps(labels)
+        self.harness.update_config(label_config)
+        pod_spec = self.harness.get_pod_spec()
+        gconfig = global_config(pod_spec)
+        self.assertIsNotNone(gconfig['external_labels'])
+        self.assertEqual(labels, gconfig['external_labels'])
+
+    def test_invalid_external_labels_can_not_be_set(self):
+        self.harness.set_leader(True)
+        label_config = MINIMAL_CONFIG.copy()
+        # label value must be string
+        labels = {'name': 1}
+        label_config['external-labels'] = json.dumps(labels)
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(label_config)
+            expected_logs = ["ERROR:charm:External label keys/values must be strings"]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        pod_spec = self.harness.get_pod_spec()
+        gconfig = global_config(pod_spec)
+        self.assertIsNone(gconfig.get('external_labels'))
+
+    def test_default_scrape_config_is_always_set(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(MINIMAL_CONFIG)
+        pod_spec = self.harness.get_pod_spec()
+        prometheus_scrape_config = scrape_config(pod_spec, 'prometheus')
+        self.assertIsNotNone(prometheus_scrape_config, 'No default config found')
+
+
+def alerting_config(pod_spec):
+    config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content']
+    config_dict = yaml.safe_load(config_yaml)
+    return config_dict.get('alerting')
+
+
+def global_config(pod_spec):
+    config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content']
+    config_dict = yaml.safe_load(config_yaml)
+    return config_dict['global']
+
+
+def scrape_config(pod_spec, job_name):
+    config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content']
+    config_dict = yaml.safe_load(config_yaml)
+    scrape_configs = config_dict['scrape_configs']
+    for config in scrape_configs:
+        if config['job_name'] == job_name:
+            return config
+    return None
+
+
+def cli_arg(pod_spec, cli_opt):
+    args = pod_spec[0]['containers'][0]['args']
+    for arg in args:
+        opt_list = arg.split('=')
+        if len(opt_list) == 2 and opt_list[0] == cli_opt:
+            return opt_list[1]
+        if len(opt_list) == 1 and opt_list[0] == cli_opt:
+            return opt_list[0]
+    return None
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/.gitignore b/squid_cnf/juju-bundles/charms/ops/squid-operator/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..94893c15705701a6ddf8c5870ead21941211233c
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/.gitignore
@@ -0,0 +1,11 @@
+venv
+.vscode
+build
+*.charm
+.coverage
+coverage.xml
+.stestr
+cover
+release
+__pycache__
+.tox
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/README.md b/squid_cnf/juju-bundles/charms/ops/squid-operator/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e51e282ddbcd3d884b751a5940f004c707bb492b
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/README.md
@@ -0,0 +1,50 @@
+# squid
+
+## Overview
+
+This is a Kubernetes Charm to deploy [Squid Cache](http://www.squid-cache.org/).
+
+Sugested Actions for this charm:
+* Set allowed URLs
+  Possible way to run action: `juju run-action squid/0 add-url url=google.com`
+* Stop/Start/Restart the squid service - done
+  Run like this: `juju run-action squid/0 restart`
+* Set ftp, http, https proxies
+
+## Quickstart
+
+If you don't have microk8s and juju installed executing the following commands:
+```
+sudo snap install juju --classic
+sudo snap install microk8s --classic
+juju bootstrap microk8s
+juju add-model squid
+juju deploy cs:~charmed-osm/squid
+```
+
+# Building it locally
+
+```bash
+git clone https://github.com/charmed-osm/squid-operator.git
+cd squid-operator
+charmcraft build
+juju deploy ./squid.charm --resources image=davigar15/squid:latest
+```
+
+Check if the charm is deployed correctly with `juju status`
+
+To test the `addurl` action open another terminal and type the following command:
+`export https_proxy=http://<squid-ip>:3128`
+
+Where squid-ip is the Squid App Address shown in `juju status`
+
+Now when executing `curl https://www.google.com` squid will block access to the url
+
+Execute the `addurl` action:
+`juju run-action squid/0 add-url url=google.com`
+
+Now when executing `curl https://www.google.com` it will give you the google output.
+
+## Contact
+ - Author: David García
+ - Bug Tracker: [here](https://github.com/charmed-osm/squid-operator)
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/actions.yaml b/squid_cnf/juju-bundles/charms/ops/squid-operator/actions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..73ce7b2c9774958d280194ccf135126729cf530f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/actions.yaml
@@ -0,0 +1,14 @@
+add-url:
+    description: "Add allowed URL to squid config"
+    params:
+        url:
+            description: "URL that will be allowed"
+            type: string
+            default: ""
+delete-url:
+    description: "Delete allowed URL squid config"
+    params:
+        url:
+            description: "URL that will stop to be allowed"
+            type: string
+            default: ""
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/config.yaml b/squid_cnf/juju-bundles/charms/ops/squid-operator/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6d0e9cbd3971169fa3eecbe8af4921698fd10fb2
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/config.yaml
@@ -0,0 +1,4 @@
+options:
+    enable-exporter:
+        type: boolean
+        description: Set to True for enabling node exporter
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/metadata.yaml b/squid_cnf/juju-bundles/charms/ops/squid-operator/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c52012377e18306ed60b9f7500a956cfdd237ae4
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/metadata.yaml
@@ -0,0 +1,25 @@
+name: squid
+summary: Kubernetes operator for Squid
+maintainers:
+    - David Garcia <david.garcia@canonical.com>
+description: |
+    Squid is a caching proxy for the Web supporting HTTP, HTTPS, FTP, and more.
+    It reduces bandwidth and improves response times by caching and reusing
+    frequently-requested web pages. Squid has extensive access controls and
+    makes a great server accelerator. It runs on most available operating
+    systems, including Windows and is licensed under the GNU GPL.
+tags:
+    - proxy
+    - firewall
+    - web
+series:
+    - kubernetes
+deployment:
+    type: stateful
+    service: loadbalancer
+provides:
+    prometheus-target:
+        interface: http
+peers:
+    cluster:
+        interface: squid-cluster
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/requirements-test.txt b/squid_cnf/juju-bundles/charms/ops/squid-operator/requirements-test.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5c97248cc4b1b4a2e462c6cbf53a6b414f6d7709
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/requirements-test.txt
@@ -0,0 +1,11 @@
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/requirements.in b/squid_cnf/juju-bundles/charms/ops/squid-operator/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..9dc75b844c5fb78673f3c902190733cad32c9ac3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/requirements.in
@@ -0,0 +1,2 @@
+ops
+jinja2
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/requirements.txt b/squid_cnf/juju-bundles/charms/ops/squid-operator/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5125805d98dc9ab10055d5d3aec3267a684f17e4
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/requirements.txt
@@ -0,0 +1,14 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile --output-file=requirements.txt requirements.in
+#
+jinja2==2.11.3
+    # via -r requirements.in
+markupsafe==1.1.1
+    # via jinja2
+ops==1.1.0
+    # via -r requirements.in
+pyyaml==5.4.1
+    # via ops
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/src/charm.py b/squid_cnf/juju-bundles/charms/ops/squid-operator/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..57584b7440eff04424f1d0ac6fb7c67b16719b1c
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/src/charm.py
@@ -0,0 +1,97 @@
+#! /usr/bin/env python3
+
+import logging
+
+# import subprocess
+
+from ops.charm import CharmBase
+from ops.main import main
+from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus
+from lib.squid.cluster import SquidCluster
+
+logger = logging.getLogger(__name__)
+
+EXPORTER_CONTAINER = {
+    "name": "exporter",
+    "image": "prom/node-exporter",
+    "ports": [
+        {
+            "containerPort": 9100,
+            "name": "exporter-http",
+            "protocol": "TCP",
+        }
+    ],
+}
+
+
+class SquidCharm(CharmBase):
+    """Class representing this Operator charm."""
+
+    def __init__(self, *args):
+        """Initialize charm and configure states and events to observe."""
+        super().__init__(*args)
+
+        self.framework.observe(self.on.config_changed, self.configure_pod)
+        self.framework.observe(self.on["add_url"].action, self._on_add_url_action)
+        self.framework.observe(self.on["delete_url"].action, self._on_delete_url_action)
+
+        self.framework.observe(
+            self.on["prometheus-target"].relation_joined,
+            self._publish_prometheus_target_info,
+        )
+
+        self.cluster = SquidCluster(self, "cluster")
+        self.framework.observe(self.on["cluster"].relation_changed, self.configure_pod)
+
+    def _publish_prometheus_target_info(self, event):
+        event.relation.data[self.unit]["host"] = self.app.name
+        event.relation.data[self.unit]["port"] = str(9100)
+
+    def _on_add_url_action(self, event):
+        self.cluster.add_url(event.params["url"])
+
+    def _on_delete_url_action(self, event):
+        self.cluster.delete_url(event.params["url"])
+
+    def configure_pod(self, event):
+        if not self.unit.is_leader():
+            self.unit.status = ActiveStatus()
+            return
+        self.unit.status = MaintenanceStatus("Applying pod spec")
+        containers = [
+            {
+                "name": self.framework.model.app.name,
+                "image": "davigar15/squid:latest",
+                "ports": [
+                    {
+                        "name": "squid",
+                        "containerPort": 3128,
+                        "protocol": "TCP",
+                    }
+                ],
+                "volumeConfig": [
+                    {
+                        "name": "config",
+                        "mountPath": "/etc/squid",
+                        "files": [
+                            {
+                                "path": "squid.conf",
+                                "content": self.cluster.squid_config,
+                            }
+                        ],
+                    }
+                ],
+            }
+        ]
+        if self.config.get("enable-exporter"):
+            containers.append(EXPORTER_CONTAINER)
+
+        self.model.pod.set_spec({"version": 3, "containers": containers})
+
+        self.unit.status = ActiveStatus()
+        self.app.status = ActiveStatus()
+
+
+
+if __name__ == "__main__":
+    main(SquidCharm)
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/__init__.py b/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e82b76cc6bd51101abec24f114230b23f9d38cdd
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/__init__.py
@@ -0,0 +1,2 @@
+LIBAPI = 0
+LIBPATCH = 1
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/cluster.py b/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d790f2eea0979195cb66d2addf10b4176116e9b
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/cluster.py
@@ -0,0 +1,84 @@
+from jinja2 import Template
+import logging
+import ops.charm
+import ops.model
+import ops.framework
+from . import templates
+
+
+try:
+    import importlib.resources as pkg_resources
+except ImportError:
+    # Try backported to PY<37 `importlib_resources`.
+    import importlib_resources as pkg_resources
+
+
+class SquidCluster(ops.framework.Object):
+    """Peer relation object for Squid"""
+
+    relation_name: str = None
+    log: logging.Logger = None
+
+    def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
+        super().__init__(charm, relation_name)
+
+        self.relation = self.framework.model.get_relation(relation_name)
+        self.log = logging.getLogger("squid.{}".format(relation_name))
+
+        self.framework.observe(
+            charm.on[relation_name].relation_changed, self._on_changed
+        )
+        self.framework.observe(charm.on[relation_name].relation_broken, self._on_broken)
+
+    def add_url(self, url: str):
+        if self.framework.model.unit.is_leader():
+            allowed_urls = self.allowed_urls
+            allowed_urls.add(url)
+            self.update_allowed_urls(allowed_urls)
+            self.framework.model.unit.status = ops.model.ActiveStatus(
+                repr(self.allowed_urls)
+            )
+
+    def delete_url(self, url: str):
+        if self.framework.model.unit.is_leader():
+            allowed_urls = self.allowed_urls
+            self.framework.model.unit.status = ops.model.ActiveStatus(self.allowed_urls)
+            if url in allowed_urls:
+                allowed_urls.remove(url)
+                self.update_allowed_urls(allowed_urls)
+
+    def _on_changed(self, event):
+        self.log.debug(f"on_changed: {self.framework.model.unit.name}")
+
+    def _on_broken(self, event):
+        self.log.debug(f"on_broken: {self.framework.model.unit.name}")
+
+    @property
+    def squid_config(self):
+        allowed_urls_string = self._generate_allowedurls_config(self.allowed_urls)
+        squid_config_template = pkg_resources.read_text(templates, "squid.conf")
+        return Template(squid_config_template).render(allowed_urls=allowed_urls_string)
+
+    @property
+    def allowed_urls(self):
+        return eval(
+            self.relation.data[self.framework.model.app].get(
+                "allowed_urls", repr(set())
+            )
+        )
+
+    def update_allowed_urls(self, allowed_urls: set):
+        self.relation.data[self.framework.model.app]["allowed_urls"] = repr(
+            allowed_urls
+        )
+
+    def is_ready(self):
+        return self.relation is not None
+
+    def _generate_allowedurls_config(self, allowed_urls: set):
+        allowed_urls_text = ""
+        for url in allowed_urls:
+            allowed_urls_text += f"acl allowedurls dstdomain {url}\n"
+        if allowed_urls:
+            allowed_urls_text += "http_access allow allowedurls\n"
+        return allowed_urls_text
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/templates/__init__.py b/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/templates/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/templates/squid.conf b/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/templates/squid.conf
new file mode 100644
index 0000000000000000000000000000000000000000..073d3cdfd95c4a84759fb77f09ecb3a155f6a995
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/src/lib/squid/templates/squid.conf
@@ -0,0 +1,26 @@
+acl SSL_ports port 443
+acl Safe_ports port 80		# http
+acl Safe_ports port 21		# ftp
+acl Safe_ports port 443		# https
+acl Safe_ports port 70		# gopher
+acl Safe_ports port 210		# wais
+acl Safe_ports port 1025-65535	# unregistered ports
+acl Safe_ports port 280		# http-mgmt
+acl Safe_ports port 488		# gss-http
+acl Safe_ports port 591		# filemaker
+acl Safe_ports port 777		# multiling http
+acl CONNECT method CONNECT
+http_access deny !Safe_ports
+http_access deny CONNECT !SSL_ports
+http_access allow localhost manager
+http_access deny manager
+http_access allow localhost
+{{ allowed_urls }}
+http_access deny all
+http_port 3128
+coredump_dir /var/spool/squid
+refresh_pattern ^ftp:		1440	20%	10080
+refresh_pattern ^gopher:	1440	0%	1440
+refresh_pattern -i (/cgi-bin/|\?) 0	0%	0
+refresh_pattern (Release|Packages(.gz)*)$      0       20%     2880
+refresh_pattern .		0	20%	4320
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/tests/test_charm.py b/squid_cnf/juju-bundles/charms/ops/squid-operator/tests/test_charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..90ec4525e5a0cbada9bcecc5829fd5ceca49cf97
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/tests/test_charm.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+import sys
+from typing import NoReturn
+import unittest
+from ops.testing import Harness
+
+from charm import SquidCharm
+
+
+class TestCharm(unittest.TestCase):
+    """Prometheus Charm unit tests."""
+
+    def setUp(self) -> NoReturn:
+        """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
+        self.harness = Harness(SquidCharm)
+        self.harness.set_leader(is_leader=True)
+        self.harness.begin()
+        self.config = {
+            "port": 3128,
+            "enable-exporter": True,
+        }
+        self.harness.update_config(self.config)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/squid_cnf/juju-bundles/charms/ops/squid-operator/tox.ini b/squid_cnf/juju-bundles/charms/ops/squid-operator/tox.ini
new file mode 100644
index 0000000000000000000000000000000000000000..b2ba7dfadd45dcd58750369e7211eda711421d9e
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/ops/squid-operator/tox.ini
@@ -0,0 +1,119 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+#######################################################################################
+
+[tox]
+envlist = flake8, cover, pylint, safety, yamllint
+skipsdist = True
+
+[testenv]
+basepython = python3.8
+setenv =
+  VIRTUAL_ENV={envdir}
+  PYTHONHASHSEED=0
+  PYTHONPATH = {toxinidir}/src
+deps =  -r{toxinidir}/requirements.txt
+
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
+[testenv:build]
+passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
+whitelist_externals =
+  charmcraft
+  cp
+  mv
+commands =
+  charmcraft build
+  rm -r ../../squid-operator
+  mv build ../../squid-operator
+
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
+
+max-complexity = 10
+import-order-style = google
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/.flake8 b/squid_cnf/juju-bundles/charms/prometheus-operator/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..8ef84fcd43f3b7a46768c31b20f36cab48ffdfe0
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/.flake8
@@ -0,0 +1,9 @@
+[flake8]
+max-line-length = 99
+select: E,W,F,C,N
+exclude:
+  venv
+  .git
+  build
+  dist
+  *.egg_info
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/.gitignore b/squid_cnf/juju-bundles/charms/prometheus-operator/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..b3b17b402232904b604711f178aefca0a623bdf5
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/.gitignore
@@ -0,0 +1,6 @@
+*~
+*swp
+*.charm
+__pycache__
+build
+venv
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/LICENSE b/squid_cnf/juju-bundles/charms/prometheus-operator/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/LICENSE
@@ -0,0 +1,674 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+  The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works.  By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users.  We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors.  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+  To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights.  Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received.  You must make sure that they, too, receive
+or can get the source code.  And you must show them these terms so they
+know their rights.
+
+  Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+  For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software.  For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+  Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so.  This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software.  The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable.  Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products.  If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+  Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary.  To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                       TERMS AND CONDITIONS
+
+  0. Definitions.
+
+  "This License" refers to version 3 of the GNU General Public License.
+
+  "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+  "The Program" refers to any copyrightable work licensed under this
+License.  Each licensee is addressed as "you".  "Licensees" and
+"recipients" may be individuals or organizations.
+
+  To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy.  The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+  A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+  To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy.  Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+  To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies.  Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+  An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License.  If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+  1. Source Code.
+
+  The "source code" for a work means the preferred form of the work
+for making modifications to it.  "Object code" means any non-source
+form of a work.
+
+  A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+  The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form.  A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+  The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities.  However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work.  For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+  The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+  The Corresponding Source for a work in source code form is that
+same work.
+
+  2. Basic Permissions.
+
+  All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met.  This License explicitly affirms your unlimited
+permission to run the unmodified Program.  The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work.  This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+  You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force.  You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright.  Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+  Conveying under any other circumstances is permitted solely under
+the conditions stated below.  Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+  No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+  When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+  4. Conveying Verbatim Copies.
+
+  You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+  You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+  5. Conveying Modified Source Versions.
+
+  You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+    a) The work must carry prominent notices stating that you modified
+    it, and giving a relevant date.
+
+    b) The work must carry prominent notices stating that it is
+    released under this License and any conditions added under section
+    7.  This requirement modifies the requirement in section 4 to
+    "keep intact all notices".
+
+    c) You must license the entire work, as a whole, under this
+    License to anyone who comes into possession of a copy.  This
+    License will therefore apply, along with any applicable section 7
+    additional terms, to the whole of the work, and all its parts,
+    regardless of how they are packaged.  This License gives no
+    permission to license the work in any other way, but it does not
+    invalidate such permission if you have separately received it.
+
+    d) If the work has interactive user interfaces, each must display
+    Appropriate Legal Notices; however, if the Program has interactive
+    interfaces that do not display Appropriate Legal Notices, your
+    work need not make them do so.
+
+  A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit.  Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+  6. Conveying Non-Source Forms.
+
+  You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+    a) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by the
+    Corresponding Source fixed on a durable physical medium
+    customarily used for software interchange.
+
+    b) Convey the object code in, or embodied in, a physical product
+    (including a physical distribution medium), accompanied by a
+    written offer, valid for at least three years and valid for as
+    long as you offer spare parts or customer support for that product
+    model, to give anyone who possesses the object code either (1) a
+    copy of the Corresponding Source for all the software in the
+    product that is covered by this License, on a durable physical
+    medium customarily used for software interchange, for a price no
+    more than your reasonable cost of physically performing this
+    conveying of source, or (2) access to copy the
+    Corresponding Source from a network server at no charge.
+
+    c) Convey individual copies of the object code with a copy of the
+    written offer to provide the Corresponding Source.  This
+    alternative is allowed only occasionally and noncommercially, and
+    only if you received the object code with such an offer, in accord
+    with subsection 6b.
+
+    d) Convey the object code by offering access from a designated
+    place (gratis or for a charge), and offer equivalent access to the
+    Corresponding Source in the same way through the same place at no
+    further charge.  You need not require recipients to copy the
+    Corresponding Source along with the object code.  If the place to
+    copy the object code is a network server, the Corresponding Source
+    may be on a different server (operated by you or a third party)
+    that supports equivalent copying facilities, provided you maintain
+    clear directions next to the object code saying where to find the
+    Corresponding Source.  Regardless of what server hosts the
+    Corresponding Source, you remain obligated to ensure that it is
+    available for as long as needed to satisfy these requirements.
+
+    e) Convey the object code using peer-to-peer transmission, provided
+    you inform other peers where the object code and Corresponding
+    Source of the work are being offered to the general public at no
+    charge under subsection 6d.
+
+  A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+  A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling.  In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage.  For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product.  A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+  "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source.  The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+  If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information.  But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+  The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed.  Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+  Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+  7. Additional Terms.
+
+  "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law.  If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+  When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it.  (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.)  You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+  Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+    a) Disclaiming warranty or limiting liability differently from the
+    terms of sections 15 and 16 of this License; or
+
+    b) Requiring preservation of specified reasonable legal notices or
+    author attributions in that material or in the Appropriate Legal
+    Notices displayed by works containing it; or
+
+    c) Prohibiting misrepresentation of the origin of that material, or
+    requiring that modified versions of such material be marked in
+    reasonable ways as different from the original version; or
+
+    d) Limiting the use for publicity purposes of names of licensors or
+    authors of the material; or
+
+    e) Declining to grant rights under trademark law for use of some
+    trade names, trademarks, or service marks; or
+
+    f) Requiring indemnification of licensors and authors of that
+    material by anyone who conveys the material (or modified versions of
+    it) with contractual assumptions of liability to the recipient, for
+    any liability that these contractual assumptions directly impose on
+    those licensors and authors.
+
+  All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10.  If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term.  If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+  If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+  Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+  8. Termination.
+
+  You may not propagate or modify a covered work except as expressly
+provided under this License.  Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+  However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+  Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+  Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License.  If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+  9. Acceptance Not Required for Having Copies.
+
+  You are not required to accept this License in order to receive or
+run a copy of the Program.  Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance.  However,
+nothing other than this License grants you permission to propagate or
+modify any covered work.  These actions infringe copyright if you do
+not accept this License.  Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+  10. Automatic Licensing of Downstream Recipients.
+
+  Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License.  You are not responsible
+for enforcing compliance by third parties with this License.
+
+  An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations.  If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+  You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License.  For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+  11. Patents.
+
+  A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based.  The
+work thus licensed is called the contributor's "contributor version".
+
+  A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version.  For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+  Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+  In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement).  To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+  If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients.  "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+  If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+  A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License.  You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+  Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+  12. No Surrender of Others' Freedom.
+
+  If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all.  For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+  13. Use with the GNU Affero General Public License.
+
+  Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work.  The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+  14. Revised Versions of this License.
+
+  The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+  Each version is given a distinguishing version number.  If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation.  If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+  If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+  Later license versions may give you additional or different
+permissions.  However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+  15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. Limitation of Liability.
+
+  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+  17. Interpretation of Sections 15 and 16.
+
+  If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+  If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+    <program>  Copyright (C) <year>  <name of author>
+    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+  You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+  The GNU General Public License does not permit incorporating your program
+into proprietary programs.  If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.  But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/README.md b/squid_cnf/juju-bundles/charms/prometheus-operator/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..35f666076133ad99f6a0503a44ccedbf04bd7775
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/README.md
@@ -0,0 +1,78 @@
+# Prometheus Operator
+
+## Description
+
+The Prometheus Operator provides a cluster monitoring solution using
+[Prometheus](https://prometheus.io), which is an open source
+monitoring system and alerting toolkit.
+
+This repository contains a [Juju](https://jaas.ai/) Charm for
+deploying the monitoring component of Prometheus in a Kubernetes
+cluster. The alerting component of prometheus is offered through a
+separate Charm.
+
+## Setup
+
+A typical setup using [snaps](https://snapcraft.io/), for deployments
+to a [microk8s](https://microk8s.io/) cluster can be done using the
+following commands
+
+    sudo snap install microk8s --classic
+    microk8s.enable dns storage registry dashboard
+    sudo snap install juju --classic
+    juju bootstrap microk8s microk8s
+    juju create-storage-pool operator-storage kubernetes storage-class=microk8s-hostpath
+
+## Build
+
+Install the charmcraft tool
+
+    sudo snap install charmcraft
+
+Build the charm in this git repository
+
+    charmcraft build
+
+## Usage
+
+Create a Juju model for your monitoring operators
+
+    juju add-model lma
+
+Deploy Prometheus using its default configuration.
+
+    juju deploy ./prometheus.charm
+
+View the Prometheus dashboard
+
+1. Use `juju status` to determine IP of the Prometheus unit
+2. Navigate to `http://<IP-Address>:9090` using your browser
+
+If required, remove the deployed monitoring model completely
+
+    juju destroy-model -y lma --no-wait --force --destroy-storage
+
+## Relations
+
+Currently supported relations are
+
+- [Grafana](https://github.com/canonical/grafana-operator)
+- [Alertmanager](https://github.com/canonical/alertmanager-operator)
+
+## Developing
+
+Use your existing Python 3 development environment or create and
+activate a Python 3 virtualenv
+
+    virtualenv -p python3 venv
+    source venv/bin/activate
+
+Install the development requirements
+
+    pip install -r requirements-dev.txt
+
+## Testing
+
+Just run `run_tests`:
+
+    ./run_tests
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/actions.yaml b/squid_cnf/juju-bundles/charms/prometheus-operator/actions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fef67f32c4a9134c536b965e8c53d055e18c4457
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/actions.yaml
@@ -0,0 +1,3 @@
+reload-config:
+  description: |
+    Tell Prometheus to reload its config from the ConfigMap.
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/actions/reload-config b/squid_cnf/juju-bundles/charms/prometheus-operator/actions/reload-config
new file mode 100755
index 0000000000000000000000000000000000000000..d736d4e1627e01599ba7cef209ba684ef4b0ef41
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/actions/reload-config
@@ -0,0 +1,2 @@
+#!/bin/sh
+kill -HUP 1 && echo "Sent SIGHUP to the Prometheus container, config reloaded"
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/config.yaml b/squid_cnf/juju-bundles/charms/prometheus-operator/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e40cb172421a1ec10708a96377fbeb9bee5391cd
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/config.yaml
@@ -0,0 +1,99 @@
+options:
+  prometheus-image-path:
+    type: string
+    description: |
+      The location of the image to use,
+      e.g. "registry.example.com/prometheus:v1".
+
+      This setting is required.
+    default: "prom/prometheus:latest"
+  prometheus-image-username:
+    type: string
+    description: |
+      The username for accessing the registry specified in
+      prometheus-image-path.
+    default: ""
+  prometheus-image-password:
+    type: string
+    description: |
+      The password associated with prometheus-image-username for
+      accessing the registry specified in prometheus-image-path.
+    default: ""
+  port:
+    description: The port prometheus will be listening on
+    type: int
+    default: 9090
+  ssl-cert:
+    type: string
+    default:
+    description: |
+      SSL certificate to install and use for Prometheus endpoint.
+  ssl-key:
+    type: string
+    default:
+    description: |
+      SSL key to use with certificate specified as ssl-cert.
+  log-level:
+    description: |
+      Prometheus server log level (only log messages with the given severity
+      or above). Must be one of: [debug, info, warn, error, fatal].
+      If not set, the Prometheus default one (info) will be used.
+    type: string
+    default:
+  web-external-url:
+    description: |
+      The URL under which Prometheus is externally reachable (for example,
+      if Prometheus is served via a reverse proxy).
+      Used for generating relative and absolute links back to
+      Prometheus itself. If the URL has a path portion, it will be used to
+      prefix all HTTP endpoints served by Prometheus.
+
+      If omitted, relevant URL components will be derived automatically.
+    type: string
+    default: ""
+  tsdb-retention-time:
+    description: |
+      How long to retain samples in the storage.
+      Units Supported: y, w, d, h, m, s
+    type: string
+    default: 15d
+  tsdb-wal-compression:
+    description: |
+      This flag enables compression of the write-ahead log (WAL).
+      Depending on your data, you can expect the WAL size to be
+      halved with little extra cpu load.
+    type: boolean
+    default: false
+  external-labels:
+    description: |
+      A JSON string of key-value pairs that specify the labels to
+      attach to metrics in this Prometheus instance when they get pulled
+      by an aggregating parent. This is useful in the case of federation
+      where, for example, you want each datacenter to have its own
+      Prometheus instance and then have a global instance that pulls from
+      each of these datacenter instances. By specifying a unique set of
+      external-labels for each datacenter instance, you can easily determine
+      in the aggregating Prometheus instance which datacenter a metric is
+      coming from. Note that you are not limited to one instance per
+      datacenter. The datacenter example here is arbitrary and you are free
+      to organize your federation's hierarchy as you see fit.
+      Ex. '{ "cluster": "datacenter1" }'. Both keys and values may be
+      arbitrarily chosen as you see fit.
+    type: string
+    default: "{}"
+  scrape-interval:
+    description: |
+      How frequently to scrape targets by default.
+    type: string
+    default: 1m
+  scrape-timeout:
+    description: |
+      How long until a scrape request times out.
+    type: string
+    default: 10s
+  evaluation-interval:
+    description: |
+      How frequently rules will be evaluated.
+    type: string
+    default: 1m
+
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/config/prometheus-k8s.yml b/squid_cnf/juju-bundles/charms/prometheus-operator/config/prometheus-k8s.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e003db0ae0df07f539a6c725ddd7925ccba67232
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/config/prometheus-k8s.yml
@@ -0,0 +1,283 @@
+#
+# This file copied from https://github.com/prometheus/prometheus/blob/release-2.18/documentation/examples/prometheus-kubernetes.yml
+#
+
+# A scrape configuration for running Prometheus on a Kubernetes cluster.
+# This uses separate scrape configs for cluster components (i.e. API server, node)
+# and services to allow each to use different authentication configs.
+#
+# Kubernetes labels will be added as Prometheus labels on metrics via the
+# `labelmap` relabeling action.
+#
+# If you are using Kubernetes 1.7.2 or earlier, please take note of the comments
+# for the kubernetes-cadvisor job; you will need to edit or remove this job.
+
+# Scrape config for API servers.
+#
+# Kubernetes exposes API servers as endpoints to the default/kubernetes
+# service so this uses `endpoints` role and uses relabelling to only keep
+# the endpoints associated with the default/kubernetes service using the
+# default named port `https`. This works for single API server deployments as
+# well as HA API server deployments.
+scrape_configs:
+- job_name: 'kubernetes-apiservers'
+
+  kubernetes_sd_configs:
+  - role: endpoints
+
+  # Default to scraping over https. If required, just disable this or change to
+  # `http`.
+  scheme: https
+
+  # This TLS & bearer token file config is used to connect to the actual scrape
+  # endpoints for cluster components. This is separate to discovery auth
+  # configuration because discovery & scraping are two separate concerns in
+  # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+  # the cluster. Otherwise, more config options have to be provided within the
+  # <kubernetes_sd_config>.
+  tls_config:
+    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    # If your node certificates are self-signed or use a different CA to the
+    # master CA, then disable certificate verification below. Note that
+    # certificate verification is an integral part of a secure infrastructure
+    # so this should only be disabled in a controlled environment. You can
+    # disable certificate verification by uncommenting the line below.
+    #
+    # insecure_skip_verify: true
+  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+  # Keep only the default/kubernetes service endpoints for the https port. This
+  # will add targets for each API server which Kubernetes adds an endpoint to
+  # the default/kubernetes service.
+  relabel_configs:
+  - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+    action: keep
+    regex: default;kubernetes;https
+
+# Scrape config for nodes (kubelet).
+#
+# Rather than connecting directly to the node, the scrape is proxied though the
+# Kubernetes apiserver.  This means it will work if Prometheus is running out of
+# cluster, or can't connect to nodes for some other reason (e.g. because of
+# firewalling).
+- job_name: 'kubernetes-nodes'
+
+  # Default to scraping over https. If required, just disable this or change to
+  # `http`.
+  scheme: https
+
+  # This TLS & bearer token file config is used to connect to the actual scrape
+  # endpoints for cluster components. This is separate to discovery auth
+  # configuration because discovery & scraping are two separate concerns in
+  # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+  # the cluster. Otherwise, more config options have to be provided within the
+  # <kubernetes_sd_config>.
+  tls_config:
+    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+  kubernetes_sd_configs:
+  - role: node
+
+  relabel_configs:
+  - action: labelmap
+    regex: __meta_kubernetes_node_label_(.+)
+  - target_label: __address__
+    replacement: kubernetes.default.svc:443
+  - source_labels: [__meta_kubernetes_node_name]
+    regex: (.+)
+    target_label: __metrics_path__
+    replacement: /api/v1/nodes/${1}/proxy/metrics
+
+# Scrape config for Kubelet cAdvisor.
+#
+# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
+# (those whose names begin with 'container_') have been removed from the
+# Kubelet metrics endpoint.  This job scrapes the cAdvisor endpoint to
+# retrieve those metrics.
+#
+# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
+# HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
+# in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
+# the --cadvisor-port=0 Kubelet flag).
+#
+# This job is not necessary and should be removed in Kubernetes 1.6 and
+# earlier versions, or it will cause the metrics to be scraped twice.
+- job_name: 'kubernetes-cadvisor'
+
+  # Default to scraping over https. If required, just disable this or change to
+  # `http`.
+  scheme: https
+
+  # This TLS & bearer token file config is used to connect to the actual scrape
+  # endpoints for cluster components. This is separate to discovery auth
+  # configuration because discovery & scraping are two separate concerns in
+  # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+  # the cluster. Otherwise, more config options have to be provided within the
+  # <kubernetes_sd_config>.
+  tls_config:
+    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+
+  kubernetes_sd_configs:
+  - role: node
+
+  relabel_configs:
+  - action: labelmap
+    regex: __meta_kubernetes_node_label_(.+)
+  - target_label: __address__
+    replacement: kubernetes.default.svc:443
+  - source_labels: [__meta_kubernetes_node_name]
+    regex: (.+)
+    target_label: __metrics_path__
+    replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
+
+# Example scrape config for service endpoints.
+#
+# The relabeling allows the actual service scrape endpoint to be configured
+# for all or only some endpoints.
+- job_name: 'kubernetes-service-endpoints'
+
+  kubernetes_sd_configs:
+  - role: endpoints
+
+  relabel_configs:
+  # Example relabel to scrape only endpoints that have
+  # "example.io/should_be_scraped = true" annotation.
+  #  - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_scraped]
+  #    action: keep
+  #    regex: true
+  #
+  # Example relabel to customize metric path based on endpoints
+  # "example.io/metric_path = <metric path>" annotation.
+  #  - source_labels: [__meta_kubernetes_service_annotation_example_io_metric_path]
+  #    action: replace
+  #    target_label: __metrics_path__
+  #    regex: (.+)
+  #
+  # Example relabel to scrape only single, desired port for the service based
+  # on endpoints "example.io/scrape_port = <port>" annotation.
+  #  - source_labels: [__address__, __meta_kubernetes_service_annotation_example_io_scrape_port]
+  #    action: replace
+  #    regex: ([^:]+)(?::\d+)?;(\d+)
+  #    replacement: $1:$2
+  #    target_label: __address__
+  #
+  # Example relabel to configure scrape scheme for all service scrape targets
+  # based on endpoints "example.io/scrape_scheme = <scheme>" annotation.
+  #  - source_labels: [__meta_kubernetes_service_annotation_example_io_scrape_scheme]
+  #    action: replace
+  #    target_label: __scheme__
+  #    regex: (https?)
+  - action: labelmap
+    regex: __meta_kubernetes_service_label_(.+)
+  - source_labels: [__meta_kubernetes_namespace]
+    action: replace
+    target_label: kubernetes_namespace
+  - source_labels: [__meta_kubernetes_service_name]
+    action: replace
+    target_label: kubernetes_name
+
+# Example scrape config for probing services via the Blackbox Exporter.
+#
+# The relabeling allows the actual service scrape endpoint to be configured
+# for all or only some services.
+- job_name: 'kubernetes-services'
+
+  metrics_path: /probe
+  params:
+    module: [http_2xx]
+
+  kubernetes_sd_configs:
+  - role: service
+
+  relabel_configs:
+  # Example relabel to probe only some services that have "example.io/should_be_probed = true" annotation
+  #  - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_probed]
+  #    action: keep
+  #    regex: true
+  - source_labels: [__address__]
+    target_label: __param_target
+  - target_label: __address__
+    replacement: blackbox-exporter.example.com:9115
+  - source_labels: [__param_target]
+    target_label: instance
+  - action: labelmap
+    regex: __meta_kubernetes_service_label_(.+)
+  - source_labels: [__meta_kubernetes_namespace]
+    target_label: kubernetes_namespace
+  - source_labels: [__meta_kubernetes_service_name]
+    target_label: kubernetes_name
+
+# Example scrape config for probing ingresses via the Blackbox Exporter.
+#
+# The relabeling allows the actual ingress scrape endpoint to be configured
+# for all or only some services.
+- job_name: 'kubernetes-ingresses'
+
+  metrics_path: /probe
+  params:
+    module: [http_2xx]
+
+  kubernetes_sd_configs:
+  - role: ingress
+
+  relabel_configs:
+  # Example relabel to probe only some ingresses that have "example.io/should_be_probed = true" annotation
+  #  - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed]
+  #    action: keep
+  #    regex: true
+  - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path]
+    regex: (.+);(.+);(.+)
+    replacement: ${1}://${2}${3}
+    target_label: __param_target
+  - target_label: __address__
+    replacement: blackbox-exporter.example.com:9115
+  - source_labels: [__param_target]
+    target_label: instance
+  - action: labelmap
+    regex: __meta_kubernetes_ingress_label_(.+)
+  - source_labels: [__meta_kubernetes_namespace]
+    target_label: kubernetes_namespace
+  - source_labels: [__meta_kubernetes_ingress_name]
+    target_label: kubernetes_name
+
+# Example scrape config for pods
+#
+# The relabeling allows the actual pod scrape to be configured
+# for all the declared ports (or port-free target if none is declared)
+# or only some ports.
+- job_name: 'kubernetes-pods'
+
+  kubernetes_sd_configs:
+  - role: pod
+
+  relabel_configs:
+  # Example relabel to scrape only pods that have
+  # "example.io/should_be_scraped = true" annotation.
+  #  - source_labels: [__meta_kubernetes_pod_annotation_example_io_should_be_scraped]
+  #    action: keep
+  #    regex: true
+  #
+  # Example relabel to customize metric path based on pod
+  # "example.io/metric_path = <metric path>" annotation.
+  #  - source_labels: [__meta_kubernetes_pod_annotation_example_io_metric_path]
+  #    action: replace
+  #    target_label: __metrics_path__
+  #    regex: (.+)
+  #
+  # Example relabel to scrape only single, desired port for the pod
+  # based on pod "example.io/scrape_port = <port>" annotation.
+  #  - source_labels: [__address__, __meta_kubernetes_pod_annotation_example_io_scrape_port]
+  #    action: replace
+  #    regex: ([^:]+)(?::\d+)?;(\d+)
+  #    replacement: $1:$2
+  #    target_label: __address__
+  - action: labelmap
+    regex: __meta_kubernetes_pod_label_(.+)
+  - source_labels: [__meta_kubernetes_namespace]
+    action: replace
+    target_label: kubernetes_namespace
+  - source_labels: [__meta_kubernetes_pod_name]
+    action: replace
+    target_label: kubernetes_pod_name
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/dispatch b/squid_cnf/juju-bundles/charms/prometheus-operator/dispatch
new file mode 100755
index 0000000000000000000000000000000000000000..fe31c0567bdce62a6542a6470997cb6a874e4bd8
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/dispatch
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/hooks/install b/squid_cnf/juju-bundles/charms/prometheus-operator/hooks/install
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/hooks/install
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/hooks/start b/squid_cnf/juju-bundles/charms/prometheus-operator/hooks/start
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/hooks/start
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/hooks/upgrade-charm b/squid_cnf/juju-bundles/charms/prometheus-operator/hooks/upgrade-charm
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/hooks/upgrade-charm
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/icon.svg b/squid_cnf/juju-bundles/charms/prometheus-operator/icon.svg
new file mode 100644
index 0000000000000000000000000000000000000000..5c51f66d901d0a30c082a7207a53d19b763acc2b
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/icon.svg
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0)  -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   version="1.1"
+   id="Layer_1"
+   x="0px"
+   y="0px"
+   width="115.333px"
+   height="114px"
+   viewBox="0 0 115.333 114"
+   enable-background="new 0 0 115.333 114"
+   xml:space="preserve"
+   sodipodi:docname="prometheus_logo_orange.svg"
+   inkscape:version="0.92.1 r15371"><metadata
+     id="metadata4495"><rdf:RDF><cc:Work
+         rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
+     id="defs4493" /><sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1484"
+     inkscape:window-height="886"
+     id="namedview4491"
+     showgrid="false"
+     inkscape:zoom="5.2784901"
+     inkscape:cx="60.603667"
+     inkscape:cy="60.329656"
+     inkscape:window-x="54"
+     inkscape:window-y="7"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="Layer_1" /><g
+     id="Layer_2" /><path
+     style="fill:#e6522c;fill-opacity:1"
+     inkscape:connector-curvature="0"
+     id="path4486"
+     d="M 56.667,0.667 C 25.372,0.667 0,26.036 0,57.332 c 0,31.295 25.372,56.666 56.667,56.666 31.295,0 56.666,-25.371 56.666,-56.666 0,-31.296 -25.372,-56.665 -56.666,-56.665 z m 0,106.055 c -8.904,0 -16.123,-5.948 -16.123,-13.283 H 72.79 c 0,7.334 -7.219,13.283 -16.123,13.283 z M 83.297,89.04 H 30.034 V 79.382 H 83.298 V 89.04 Z M 83.106,74.411 H 30.186 C 30.01,74.208 29.83,74.008 29.66,73.802 24.208,67.182 22.924,63.726 21.677,60.204 c -0.021,-0.116 6.611,1.355 11.314,2.413 0,0 2.42,0.56 5.958,1.205 -3.397,-3.982 -5.414,-9.044 -5.414,-14.218 0,-11.359 8.712,-21.285 5.569,-29.308 3.059,0.249 6.331,6.456 6.552,16.161 3.252,-4.494 4.613,-12.701 4.613,-17.733 0,-5.21 3.433,-11.262 6.867,-11.469 -3.061,5.045 0.793,9.37 4.219,20.099 1.285,4.03 1.121,10.812 2.113,15.113 C 63.797,33.534 65.333,20.5 71,16 c -2.5,5.667 0.37,12.758 2.333,16.167 3.167,5.5 5.087,9.667 5.087,17.548 0,5.284 -1.951,10.259 -5.242,14.148 3.742,-0.702 6.326,-1.335 6.326,-1.335 l 12.152,-2.371 c 10e-4,-10e-4 -1.765,7.261 -8.55,14.254 z" /></svg>
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/manifest.yaml b/squid_cnf/juju-bundles/charms/prometheus-operator/manifest.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b6248c8ff1a11fc5c8190427d161a8a2771ae663
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/manifest.yaml
@@ -0,0 +1,7 @@
+bases:
+- architectures:
+  - amd64
+  channel: '20.04'
+  name: ubuntu
+charmcraft-started-at: '2021-05-31T06:48:04.832586Z'
+charmcraft-version: 0.10.0
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/metadata.yaml b/squid_cnf/juju-bundles/charms/prometheus-operator/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5d4329da48a621aad3a50ab6df40d72a11dbcb3f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/metadata.yaml
@@ -0,0 +1,34 @@
+name: prometheus
+summary: Prometheus for Kubernetes clusters
+maintainers:
+    - Balbir Thomas <balbir.thomas@canonical.com>
+description: |
+  Prometheus is an open source monitoring solution. Prometheus
+  supports aggregating high dimensional data and exposes a powerful
+  query language PromQL. This charm deploys and operates Prometheus on
+  Kubernetes clusters. Prometheus can raise alerts through a relation
+  with the Altermanager charm. Alerting rules for Prometheus need to
+  be provided through a relation with the application that requires
+  alerting. Prometheus provides its own dashboard for data
+  visualization but a richer visualization interface may be obtained
+  through a relation with the Grafana charm.
+tags:
+    - observability
+    - lma
+    - prometheus
+    - monitoring
+    - alerting
+    - grafana
+series:
+    - kubernetes
+requires:
+    grafana-source:
+        interface: grafana-datasource
+    alertmanager:
+        interface: alertmanager
+    target:
+        interface: http
+storage:
+  database:
+    type: filesystem
+    location: /var/lib/prometheus
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/requirements-dev.txt b/squid_cnf/juju-bundles/charms/prometheus-operator/requirements-dev.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3950bef2e306b78aaa231135636a04f2d443d569
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/requirements-dev.txt
@@ -0,0 +1,5 @@
+-r requirements.txt
+black
+flake8
+pytest
+pytest-cov
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/requirements.txt b/squid_cnf/juju-bundles/charms/prometheus-operator/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ff3e3351770b50b916e2fcc6478e986f59c35845
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/requirements.txt
@@ -0,0 +1,2 @@
+ops
+pyaml
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/run_tests b/squid_cnf/juju-bundles/charms/prometheus-operator/run_tests
new file mode 100755
index 0000000000000000000000000000000000000000..637497ffe1bac2f75fec96b3bc1d25e16e39e1d8
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/run_tests
@@ -0,0 +1,16 @@
+#!/bin/sh -e
+# Copyright 2020 Balbir Thomas
+# See LICENSE file for licensing details.
+
+if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then
+    . venv/bin/activate
+fi
+
+if [ -z "$PYTHONPATH" ]; then
+    export PYTHONPATH=src
+else
+    export PYTHONPATH="src:$PYTHONPATH"
+fi
+
+black --diff
+python3 -m unittest -v "$@"
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/setup.py b/squid_cnf/juju-bundles/charms/prometheus-operator/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..15ce0b9ca35e585d2b1925a13bd58d1ea67f0900
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/setup.py
@@ -0,0 +1,21 @@
+import setuptools
+
+with open("README.md", "r") as fh:
+    long_description = fh.read()
+
+setuptools.setup(
+    name="prometheus-charm",
+    version="0.0.1",
+    author="Balbir Thomas",
+    author_email="balbir.thomas@canonical.com",
+    description="Kubernetes Charm/Operator for Prometheus",
+    long_description=long_description,
+    long_description_content_type="text/markdown",
+    url="https://github.com/balbirthomas/prometheus-charm",
+    packages=setuptools.find_packages(),
+    classifiers=[
+        "Programming Language :: Python :: 3",
+        "Operating System :: OS Independent",
+    ],
+    python_requires='>=3.5',
+)
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/src/charm.py b/squid_cnf/juju-bundles/charms/prometheus-operator/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..517a5c456843b057be4c40edf816221f9a8ef6a6
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/src/charm.py
@@ -0,0 +1,398 @@
+#!/usr/bin/env python3
+# Copyright 2020 Balbir Thomas
+# See LICENSE file for licensing details.
+
+import logging
+import yaml
+import json
+
+from ops.charm import CharmBase
+from ops.framework import StoredState
+from ops.main import main
+from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus
+
+logger = logging.getLogger(__name__)
+
+
+class PrometheusCharm(CharmBase):
+    """A Juju Charm for Prometheus
+    """
+    _stored = StoredState()
+
+    def __init__(self, *args):
+        logger.debug('Initializing Charm')
+
+        super().__init__(*args)
+
+        self._stored.set_default(alertmanagers=[])
+        self._stored.set_default(alertmanager_port='9093')
+
+        self.framework.observe(self.on.config_changed, self._on_config_changed)
+        self.framework.observe(self.on.stop, self._on_stop)
+        self.framework.observe(self.on['alertmanager'].relation_changed,
+                               self._on_alertmanager_changed)
+        self.framework.observe(self.on['alertmanager'].relation_broken,
+                               self._on_alertmanager_broken)
+
+        self.framework.observe(self.on['grafana-source'].relation_changed,
+                               self._on_grafana_changed)
+        self.framework.observe(self.on['target'].relation_changed,
+                               self._on_config_changed)
+
+    def _on_config_changed(self, _):
+        """Set a new Juju pod specification
+        """
+        self._configure_pod()
+
+    def _on_stop(self, _):
+        """Mark unit is inactive
+        """
+        self.unit.status = MaintenanceStatus('Pod is terminating.')
+
+    def _on_grafana_changed(self, event):
+        """Provide Grafana with data source information
+        """
+        event.relation.data[self.unit]['port'] = str(self.model.config['port'])
+        event.relation.data[self.unit]['source-type'] = 'prometheus'
+
+    def _on_alertmanager_changed(self, event):
+        """Set an alertmanager configuation
+        """
+        if not self.unit.is_leader():
+            return
+
+        addrs = json.loads(event.relation.data[event.app].get('addrs', '[]'))
+        port = event.relation.data[event.app]['port']
+
+        self._stored.alertmanager_port = port
+        self._stored.alertmanagers = addrs
+
+
+        self._configure_pod()
+
+    def _on_alertmanager_broken(self, event):
+        """Remove all alertmanager configuration
+        """
+        if not self.unit.is_leader():
+            return
+        self._stored.alertmanagers.clear()
+        self._configure_pod()
+
+    def _cli_args(self):
+        """Construct command line arguments for Prometheus
+        """
+        config = self.model.config
+        args = [
+            '--config.file=/etc/prometheus/prometheus.yml',
+            '--storage.tsdb.path=/var/lib/prometheus',
+            '--web.enable-lifecycle',
+            '--web.console.templates=/usr/share/prometheus/consoles',
+            '--web.console.libraries=/usr/share/prometheus/console_libraries'
+        ]
+
+        # get log level
+        allowed_log_levels = ['debug', 'info', 'warn', 'error', 'fatal']
+        if config.get('log-level'):
+            log_level = config['log-level'].lower()
+        else:
+            log_level = 'info'
+
+        # If log level is invalid set it to debug
+        if log_level not in allowed_log_levels:
+            logging.error(
+                'Invalid loglevel: {0} given, {1} allowed. '
+                'defaulting to DEBUG loglevel.'.format(
+                    log_level, '/'.join(allowed_log_levels)
+                )
+            )
+            log_level = 'debug'
+
+        # set log level
+        args.append(
+            '--log.level={0}'.format(log_level)
+        )
+
+        # Enable time series database compression
+        if config.get('tsdb-wal-compression'):
+            args.append('--storage.tsdb.wal-compression')
+
+        # Set time series retention time
+        if config.get('tsdb-retention-time') and self._is_valid_timespec(
+                config['tsdb-retention-time']):
+            args.append('--storage.tsdb.retention.time={}'.format(config['tsdb-retention-time']))
+
+        return args
+
+    def _is_valid_timespec(self, timeval):
+        """Is a time interval unit and value valid
+        """
+        if not timeval:
+            return False
+
+        time, unit = timeval[:-1], timeval[-1]
+
+        if unit not in ['y', 'w', 'd', 'h', 'm', 's']:
+            logger.error('Invalid unit {} in time spec'.format(unit))
+            return False
+
+        try:
+            int(time)
+        except ValueError:
+            logger.error('Can not convert time {} to integer'.format(time))
+            return False
+
+        if not int(time) > 0:
+            logger.error('Expected positive time spec but got {}'.format(time))
+            return False
+
+        return True
+
+    def _are_valid_labels(self, json_data):
+        """Are Prometheus external labels valid
+        """
+        if not json_data:
+            return False
+
+        try:
+            labels = json.loads(json_data)
+        except (ValueError, TypeError):
+            logger.error('Can not parse external labels : {}'.format(json_data))
+            return False
+
+        if not isinstance(labels, dict):
+            logger.error('Expected label dictionary but got : {}'.format(labels))
+            return False
+
+        for key, value in labels.items():
+            if not isinstance(key, str) or not isinstance(value, str):
+                logger.error('External label keys/values must be strings')
+                return False
+
+        return True
+
+    def _external_labels(self):
+        """Extract external labels for Prometheus from configuration
+        """
+        config = self.model.config
+        labels = {}
+
+        if config.get('external-labels') and self._are_valid_labels(
+                config['external-labels']):
+            labels = json.loads(config['external-labels'])
+
+        return labels
+
+    def _prometheus_global_config(self):
+        """Construct Prometheus global configuration
+        """
+        config = self.model.config
+        global_config = {}
+
+        labels = self._external_labels()
+        if labels:
+            global_config['external_labels'] = labels
+
+        if config.get('scrape-interval') and self._is_valid_timespec(
+                config['scrape-interval']):
+            global_config['scrape_interval'] = config['scrape-interval']
+
+        if config.get('scrape-timeout') and self._is_valid_timespec(
+                config['scrape-timeout']):
+            global_config['scrape_timeout'] = config['scrape-timeout']
+
+        if config.get('evaluation-interval') and self._is_valid_timespec(
+                config['evaluation-interval']):
+            global_config['evaluation_interval'] = config['evaluation-interval']
+
+        return global_config
+
+    def _alerting_config(self):
+        """Construct Prometheus altering configuation
+        """
+        alerting_config = ''
+
+        if len(self._stored.alertmanagers) < 1:
+            logger.debug('No alertmanagers available')
+            return alerting_config
+
+        targets = []
+        for manager in self._stored.alertmanagers:
+            port = self._stored.alertmanager_port
+            targets.append("{}:{}".format(manager, port))
+
+        manager_config = {'static_configs': [{'targets': targets}]}
+        alerting_config = {'alertmanagers': [manager_config]}
+
+        return alerting_config
+
+    def _prometheus_config(self):
+        """Construct Prometheus configuration
+        """
+        config = self.model.config
+
+        scrape_config = {'global': self._prometheus_global_config(),
+                         'scrape_configs': []}
+
+        alerting_config = self._alerting_config()
+        if alerting_config:
+            scrape_config['alerting'] = alerting_config
+
+        # By default only monitor prometheus server itself
+        targets = ['localhost:{}'.format(config['port'])]
+        relation_targets = self.relation_targets
+        if relation_targets:
+            targets.extend(relation_targets)
+
+        default_config = {
+            'job_name': 'prometheus',
+            'scrape_interval': '5s',
+            'scrape_timeout': '5s',
+            'metrics_path': '/metrics',
+            'honor_timestamps': True,
+            'scheme': 'http',
+            'static_configs': [{
+                'targets': targets
+            }]
+        }
+        scrape_config['scrape_configs'].append(default_config)
+
+        logger.debug('Prometheus config : {}'.format(scrape_config))
+
+        return yaml.dump(scrape_config), targets
+
+    def _build_pod_spec(self):
+        """Construct a Juju pod specification for Prometheus
+        """
+        logger.debug('Building Pod Spec')
+        config = self.model.config
+        prometheus_config, targets = self._prometheus_config()
+        spec = {
+            'version': 3,
+            'containers': [{
+                'name': self.app.name,
+                'imageDetails': {
+                    'imagePath': config['prometheus-image-path'],
+                    'username': config.get('prometheus-image-username', ''),
+                    'password': config.get('prometheus-image-password', '')
+                },
+                'args': self._cli_args(),
+                "envConfig": {
+                    "targets": str(targets),
+                },
+                'kubernetes': {
+                    'readinessProbe': {
+                        'httpGet': {
+                            'path': '/-/ready',
+                            'port': config['port']
+                        },
+                        'initialDelaySeconds': 10,
+                        'timeoutSeconds': 30
+                    },
+                    'livenessProbe': {
+                        'httpGet': {
+                            'path': '/-/healthy',
+                            'port': config['port']
+                        },
+                        'initialDelaySeconds': 30,
+                        'timeoutSeconds': 30
+                    }
+                },
+                'ports': [{
+                    'containerPort': config['port'],
+                    'name': 'prometheus-http',
+                    'protocol': 'TCP'
+                }],
+                'volumeConfig': [{
+                    'name': 'prometheus-config',
+                    'mountPath': '/etc/prometheus',
+                    'files': [{
+                        'path': 'prometheus.yml',
+                        'content': prometheus_config
+                    }]
+                }]
+            }]
+        }
+
+        return spec
+
+    def _check_config(self):
+        """Identify missing but required items in configuation
+
+        :returns: list of missing configuration items (configuration keys)
+        """
+        logger.debug('Checking Config')
+        config = self.model.config
+        missing = []
+
+        if not config.get('prometheus-image-path'):
+            missing.append('prometheus-image-path')
+
+        if config.get('prometheus-image-username') \
+                and not config.get('prometheus-image-password'):
+            missing.append('prometheus-image-password')
+
+        return missing
+
+    def _configure_pod(self):
+        """Setup a new Prometheus pod specification
+        """
+        logger.debug('Configuring Pod')
+        missing_config = self._check_config()
+        if missing_config:
+            logger.error('Incomplete Configuration : {}. '
+                         'Application will be blocked.'.format(missing_config))
+            self.unit.status = \
+                BlockedStatus('Missing configuration: {}'.format(missing_config))
+            return
+
+        if not self.unit.is_leader():
+            self.unit.status = ActiveStatus()
+            return
+
+        self.unit.status = MaintenanceStatus('Setting pod spec.')
+        pod_spec = self._build_pod_spec()
+
+        self.model.pod.set_spec(pod_spec)
+        self.app.status = ActiveStatus()
+        self.unit.status = ActiveStatus()
+
+    @property
+    def relation_targets(self):
+        targets = []
+        for service in self.services():
+            if len(service["hosts"]) > 0:
+                for host in service["hosts"]:
+                    targets.append("{}:{}".format(
+                        host["hostname"],
+                        host["port"])
+                    )
+        return targets
+
+    def services(self):
+        services = []
+        relations = self.framework.model.relations.get("target")
+        if relations:
+            for relation in relations:
+                if not relation.app:
+                    continue
+                service_name = relation.app.name
+                hosts = []
+                for unit in relation.units:
+                    unit_name = unit.name.replace("/", "-")
+                    hostname = f"{unit_name}.{relation.app.name}-endpoints"
+                    private_address = relation.data[unit].get("private-address")
+                    port = relation.data[unit].get("port", 9100)
+                    if hostname and private_address:
+                        hosts.append({
+                            "hostname": hostname,
+                            "private-address": private_address,
+                            "port": port,
+                        })
+                services.append({
+                    "service_name": service_name,
+                    "hosts": hosts,
+                })
+        return services
+
+if __name__ == "__main__":
+    main(PrometheusCharm)
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/src/http_client.py b/squid_cnf/juju-bundles/charms/prometheus-operator/src/http_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bfdf084b328f6ec36e7a848fc0a0cea913e3635
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/src/http_client.py
@@ -0,0 +1,54 @@
+
+import ops.framework
+import ops.charm
+
+
+class BaseRelationClient(ops.framework.Object):
+    """Requires side of a Kafka Endpoint"""
+
+    def __init__(
+        self,
+        charm: ops.charm.CharmBase,
+        relation_name: str,
+        mandatory_fields: list,
+    ):
+        super().__init__(charm, relation_name)
+        self.relation_name = relation_name
+        self.mandatory_fields = mandatory_fields
+        self._update_relation()
+
+    def get_data_from_unit(self, key: str):
+        if not self.relation:
+            # This update relation doesn't seem to be needed, but I added it because apparently
+            # the data is empty in the unit tests. In reality, the constructor is called in every hook.
+            # In the unit tests when doing an update_relation_data, apparently it is not called.
+            self._update_relation()
+        if self.relation:
+            for unit in self.relation.units:
+                data = self.relation.data[unit].get(key)
+                if data:
+                    return data
+
+    def get_data_from_app(self, key: str):
+        if not self.relation or self.relation.app not in self.relation.data:
+            # This update relation doesn't seem to be needed, but I added it because apparently
+            # the data is empty in the unit tests. In reality, the constructor is called in every hook.
+            # In the unit tests when doing an update_relation_data, apparently it is not called.
+            self._update_relation()
+        if self.relation and self.relation.app in self.relation.data:
+            data = self.relation.data[self.relation.app].get(key)
+            if data:
+                return data
+
+    def is_missing_data_in_unit(self):
+        return not all(
+            [self.get_data_from_unit(field) for field in self.mandatory_fields]
+        )
+
+    def is_missing_data_in_app(self):
+        return not all(
+            [self.get_data_from_app(field) for field in self.mandatory_fields]
+        )
+
+    def _update_relation(self):
+        self.relation = self.framework.model.get_relation(self.relation_name)
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/tests/__init__.py b/squid_cnf/juju-bundles/charms/prometheus-operator/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/squid_cnf/juju-bundles/charms/prometheus-operator/tests/test_charm.py b/squid_cnf/juju-bundles/charms/prometheus-operator/tests/test_charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..05f95782d01dfa5d3ae5965531c61fcc442909f5
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/prometheus-operator/tests/test_charm.py
@@ -0,0 +1,313 @@
+# Copyright 2020 Balbir Thomas
+# See LICENSE file for licensing details.
+
+import unittest
+import yaml
+import json
+
+from ops.testing import Harness
+from charm import PrometheusCharm
+
+MINIMAL_CONFIG = {
+    'prometheus-image-path': 'prom/prometheus',
+    'port': 9090
+}
+
+SAMPLE_ALERTING_CONFIG = {
+    'alertmanagers': [{
+        'static_configs': [{
+            'targets': ['192.168.0.1:9093']
+        }]
+    }]
+}
+
+
+class TestCharm(unittest.TestCase):
+    def setUp(self):
+        self.harness = Harness(PrometheusCharm)
+        self.addCleanup(self.harness.cleanup)
+        self.harness.begin()
+
+    def test_image_path_is_required(self):
+        missing_image_config = {
+            'prometheus-image-path': '',
+            'prometheus-image-username': '',
+            'prometheus-image-password': ''
+        }
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(missing_image_config)
+            expected_logs = [
+                "ERROR:charm:Incomplete Configuration : ['prometheus-image-path']. "
+                "Application will be blocked."
+            ]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        missing = self.harness.charm._check_config()
+        expected = ['prometheus-image-path']
+        self.assertEqual(missing, expected)
+
+    def test_password_is_required_when_username_is_set(self):
+        missing_password_config = {
+            'prometheus-image-path': 'prom/prometheus:latest',
+            'prometheus-image-username': 'some-user',
+            'prometheus-image-password': '',
+        }
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(missing_password_config)
+            expected_logs = [
+                "ERROR:charm:Incomplete Configuration : ['prometheus-image-password']. "
+                "Application will be blocked."
+            ]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        missing = self.harness.charm._check_config()
+        expected = ['prometheus-image-password']
+        self.assertEqual(missing, expected)
+
+    def test_alerting_config_is_updated_by_alertmanager_relation(self):
+        self.harness.set_leader(True)
+
+        # check alerting config is empty without alertmanager relation
+        self.harness.update_config(MINIMAL_CONFIG)
+
+        self.assertEqual(self.harness.charm._stored.alertmanagers, [])
+        rel_id = self.harness.add_relation('alertmanager', 'alertmanager')
+
+        self.assertIsInstance(rel_id, int)
+        self.harness.add_relation_unit(rel_id, 'alertmanager/0')
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(alerting_config(pod_spec), None)
+
+        # check alerting config is updated when a alertmanager joins
+        self.harness.update_relation_data(rel_id,
+                                          'alertmanager',
+                                          {
+                                              'port': '9093',
+                                              'addrs': '["192.168.0.1"]'
+                                          })
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(alerting_config(pod_spec), SAMPLE_ALERTING_CONFIG)
+
+    def test_alerting_config_is_removed_when_alertmanager_is_broken(self):
+        self.harness.set_leader(True)
+
+        # ensure there is a non-empty alerting config
+        self.harness.update_config(MINIMAL_CONFIG)
+        rel_id = self.harness.add_relation('alertmanager', 'alertmanager')
+        rel = self.harness.model.get_relation('alertmanager')
+        self.assertIsInstance(rel_id, int)
+        self.harness.add_relation_unit(rel_id, 'alertmanager/0')
+        self.harness.update_relation_data(rel_id,
+                                          'alertmanager',
+                                          {
+                                              'port': '9093',
+                                              'addrs': '["192.168.0.1"]'
+                                          })
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(alerting_config(pod_spec), SAMPLE_ALERTING_CONFIG)
+
+        # check alerting config is removed when relation departs
+        self.harness.charm.on.alertmanager_relation_broken.emit(rel)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(alerting_config(pod_spec), None)
+
+    def test_grafana_is_provided_port_and_source(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(MINIMAL_CONFIG)
+        rel_id = self.harness.add_relation('grafana-source', 'grafana')
+        self.harness.add_relation_unit(rel_id, 'grafana/0')
+        self.harness.update_relation_data(rel_id, 'grafana/0', {})
+        data = self.harness.get_relation_data(rel_id, self.harness.model.unit.name)
+
+        self.assertEqual(int(data['port']), MINIMAL_CONFIG['port'])
+        self.assertEqual(data['source-type'], 'prometheus')
+
+    def test_default_cli_log_level_is_info(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(MINIMAL_CONFIG)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--log.level'), 'info')
+
+    def test_invalid_log_level_defaults_to_debug(self):
+        self.harness.set_leader(True)
+        bad_log_config = MINIMAL_CONFIG.copy()
+        bad_log_config['log-level'] = 'bad-level'
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(bad_log_config)
+            expected_logs = [
+                "ERROR:root:Invalid loglevel: bad-level given, "
+                "debug/info/warn/error/fatal allowed. "
+                "defaulting to DEBUG loglevel."
+            ]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--log.level'), 'debug')
+
+    def test_valid_log_level_is_accepted(self):
+        self.harness.set_leader(True)
+        valid_log_config = MINIMAL_CONFIG.copy()
+        valid_log_config['log-level'] = 'warn'
+        self.harness.update_config(valid_log_config)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--log.level'), 'warn')
+
+    def test_tsdb_compression_is_not_enabled_by_default(self):
+        self.harness.set_leader(True)
+        compress_config = MINIMAL_CONFIG.copy()
+        self.harness.update_config(compress_config)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.wal-compression'),
+                         None)
+
+    def test_tsdb_compression_can_be_enabled(self):
+        self.harness.set_leader(True)
+        compress_config = MINIMAL_CONFIG.copy()
+        compress_config['tsdb-wal-compression'] = True
+        self.harness.update_config(compress_config)
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.wal-compression'),
+                         '--storage.tsdb.wal-compression')
+
+    def test_valid_tsdb_retention_times_can_be_set(self):
+        self.harness.set_leader(True)
+        retention_time_config = MINIMAL_CONFIG.copy()
+        acceptable_units = ['y', 'w', 'd', 'h', 'm', 's']
+        for unit in acceptable_units:
+            retention_time = '{}{}'.format(1, unit)
+            retention_time_config['tsdb-retention-time'] = retention_time
+            self.harness.update_config(retention_time_config)
+            pod_spec = self.harness.get_pod_spec()
+            self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'),
+                             retention_time)
+
+    def test_invalid_tsdb_retention_times_can_not_be_set(self):
+        self.harness.set_leader(True)
+        retention_time_config = MINIMAL_CONFIG.copy()
+
+        # invalid unit
+        retention_time = '{}{}'.format(1, 'x')
+        retention_time_config['tsdb-retention-time'] = retention_time
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(retention_time_config)
+            expected_logs = ["ERROR:charm:Invalid unit x in time spec"]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'),
+                         None)
+
+        # invalid time value
+        retention_time = '{}{}'.format(0, 'd')
+        retention_time_config['tsdb-retention-time'] = retention_time
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(retention_time_config)
+            expected_logs = ["ERROR:charm:Expected positive time spec but got 0"]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        pod_spec = self.harness.get_pod_spec()
+        self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'),
+                         None)
+
+    def test_global_scrape_interval_can_be_set(self):
+        self.harness.set_leader(True)
+        scrapeint_config = MINIMAL_CONFIG.copy()
+        acceptable_units = ['y', 'w', 'd', 'h', 'm', 's']
+        for unit in acceptable_units:
+            scrapeint_config['scrape-interval'] = '{}{}'.format(1, unit)
+            self.harness.update_config(scrapeint_config)
+            pod_spec = self.harness.get_pod_spec()
+            gconfig = global_config(pod_spec)
+            self.assertEqual(gconfig['scrape_interval'],
+                             scrapeint_config['scrape-interval'])
+
+    def test_global_scrape_timeout_can_be_set(self):
+        self.harness.set_leader(True)
+        scrapetime_config = MINIMAL_CONFIG.copy()
+        acceptable_units = ['y', 'w', 'd', 'h', 'm', 's']
+        for unit in acceptable_units:
+            scrapetime_config['scrape-timeout'] = '{}{}'.format(1, unit)
+            self.harness.update_config(scrapetime_config)
+            pod_spec = self.harness.get_pod_spec()
+            gconfig = global_config(pod_spec)
+            self.assertEqual(gconfig['scrape_timeout'],
+                             scrapetime_config['scrape-timeout'])
+
+    def test_global_evaluation_interval_can_be_set(self):
+        self.harness.set_leader(True)
+        evalint_config = MINIMAL_CONFIG.copy()
+        acceptable_units = ['y', 'w', 'd', 'h', 'm', 's']
+        for unit in acceptable_units:
+            evalint_config['evaluation-interval'] = '{}{}'.format(1, unit)
+            self.harness.update_config(evalint_config)
+            pod_spec = self.harness.get_pod_spec()
+            gconfig = global_config(pod_spec)
+            self.assertEqual(gconfig['evaluation_interval'],
+                             evalint_config['evaluation-interval'])
+
+    def test_valid_external_labels_can_be_set(self):
+        self.harness.set_leader(True)
+        label_config = MINIMAL_CONFIG.copy()
+        labels = {'name1': 'value1',
+                  'name2': 'value2'}
+        label_config['external-labels'] = json.dumps(labels)
+        self.harness.update_config(label_config)
+        pod_spec = self.harness.get_pod_spec()
+        gconfig = global_config(pod_spec)
+        self.assertIsNotNone(gconfig['external_labels'])
+        self.assertEqual(labels, gconfig['external_labels'])
+
+    def test_invalid_external_labels_can_not_be_set(self):
+        self.harness.set_leader(True)
+        label_config = MINIMAL_CONFIG.copy()
+        # label value must be string
+        labels = {'name': 1}
+        label_config['external-labels'] = json.dumps(labels)
+        with self.assertLogs(level='ERROR') as logger:
+            self.harness.update_config(label_config)
+            expected_logs = ["ERROR:charm:External label keys/values must be strings"]
+            self.assertEqual(sorted(logger.output), expected_logs)
+
+        pod_spec = self.harness.get_pod_spec()
+        gconfig = global_config(pod_spec)
+        self.assertIsNone(gconfig.get('external_labels'))
+
+    def test_default_scrape_config_is_always_set(self):
+        self.harness.set_leader(True)
+        self.harness.update_config(MINIMAL_CONFIG)
+        pod_spec = self.harness.get_pod_spec()
+        prometheus_scrape_config = scrape_config(pod_spec, 'prometheus')
+        self.assertIsNotNone(prometheus_scrape_config, 'No default config found')
+
+
+def alerting_config(pod_spec):
+    config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content']
+    config_dict = yaml.safe_load(config_yaml)
+    return config_dict.get('alerting')
+
+
+def global_config(pod_spec):
+    config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content']
+    config_dict = yaml.safe_load(config_yaml)
+    return config_dict['global']
+
+
+def scrape_config(pod_spec, job_name):
+    config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content']
+    config_dict = yaml.safe_load(config_yaml)
+    scrape_configs = config_dict['scrape_configs']
+    for config in scrape_configs:
+        if config['job_name'] == job_name:
+            return config
+    return None
+
+
+def cli_arg(pod_spec, cli_opt):
+    args = pod_spec[0]['containers'][0]['args']
+    for arg in args:
+        opt_list = arg.split('=')
+        if len(opt_list) == 2 and opt_list[0] == cli_opt:
+            return opt_list[1]
+        if len(opt_list) == 1 and opt_list[0] == cli_opt:
+            return opt_list[0]
+    return None
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/.gitignore b/squid_cnf/juju-bundles/charms/squid-operator/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..94893c15705701a6ddf8c5870ead21941211233c
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/.gitignore
@@ -0,0 +1,11 @@
+venv
+.vscode
+build
+*.charm
+.coverage
+coverage.xml
+.stestr
+cover
+release
+__pycache__
+.tox
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/README.md b/squid_cnf/juju-bundles/charms/squid-operator/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e51e282ddbcd3d884b751a5940f004c707bb492b
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/README.md
@@ -0,0 +1,50 @@
+# squid
+
+## Overview
+
+This is a Kubernetes Charm to deploy [Squid Cache](http://www.squid-cache.org/).
+
+Sugested Actions for this charm:
+* Set allowed URLs
+  Possible way to run action: `juju run-action squid/0 add-url url=google.com`
+* Stop/Start/Restart the squid service - done
+  Run like this: `juju run-action squid/0 restart`
+* Set ftp, http, https proxies
+
+## Quickstart
+
+If you don't have microk8s and juju installed executing the following commands:
+```
+sudo snap install juju --classic
+sudo snap install microk8s --classic
+juju bootstrap microk8s
+juju add-model squid
+juju deploy cs:~charmed-osm/squid
+```
+
+# Building it locally
+
+```bash
+git clone https://github.com/charmed-osm/squid-operator.git
+cd squid-operator
+charmcraft build
+juju deploy ./squid.charm --resources image=davigar15/squid:latest
+```
+
+Check if the charm is deployed correctly with `juju status`
+
+To test the `addurl` action open another terminal and type the following command:
+`export https_proxy=http://<squid-ip>:3128`
+
+Where squid-ip is the Squid App Address shown in `juju status`
+
+Now when executing `curl https://www.google.com` squid will block access to the url
+
+Execute the `addurl` action:
+`juju run-action squid/0 add-url url=google.com`
+
+Now when executing `curl https://www.google.com` it will give you the google output.
+
+## Contact
+ - Author: David García
+ - Bug Tracker: [here](https://github.com/charmed-osm/squid-operator)
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/actions.yaml b/squid_cnf/juju-bundles/charms/squid-operator/actions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..73ce7b2c9774958d280194ccf135126729cf530f
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/actions.yaml
@@ -0,0 +1,14 @@
+add-url:
+    description: "Add allowed URL to squid config"
+    params:
+        url:
+            description: "URL that will be allowed"
+            type: string
+            default: ""
+delete-url:
+    description: "Delete allowed URL squid config"
+    params:
+        url:
+            description: "URL that will stop to be allowed"
+            type: string
+            default: ""
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/config.yaml b/squid_cnf/juju-bundles/charms/squid-operator/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6d0e9cbd3971169fa3eecbe8af4921698fd10fb2
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/config.yaml
@@ -0,0 +1,4 @@
+options:
+    enable-exporter:
+        type: boolean
+        description: Set to True for enabling node exporter
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/dispatch b/squid_cnf/juju-bundles/charms/squid-operator/dispatch
new file mode 100755
index 0000000000000000000000000000000000000000..fe31c0567bdce62a6542a6470997cb6a874e4bd8
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/dispatch
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/hooks/install b/squid_cnf/juju-bundles/charms/squid-operator/hooks/install
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/hooks/install
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/hooks/start b/squid_cnf/juju-bundles/charms/squid-operator/hooks/start
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/hooks/start
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/hooks/upgrade-charm b/squid_cnf/juju-bundles/charms/squid-operator/hooks/upgrade-charm
new file mode 120000
index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/hooks/upgrade-charm
@@ -0,0 +1 @@
+../dispatch
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/manifest.yaml b/squid_cnf/juju-bundles/charms/squid-operator/manifest.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..57104a34b106fb59ee58d9656002baf19fa20958
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/manifest.yaml
@@ -0,0 +1,7 @@
+bases:
+- architectures:
+  - amd64
+  channel: '20.04'
+  name: ubuntu
+charmcraft-started-at: '2021-05-31T07:02:08.417634Z'
+charmcraft-version: 1.0.0
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/metadata.yaml b/squid_cnf/juju-bundles/charms/squid-operator/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c52012377e18306ed60b9f7500a956cfdd237ae4
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/metadata.yaml
@@ -0,0 +1,25 @@
+name: squid
+summary: Kubernetes operator for Squid
+maintainers:
+    - David Garcia <david.garcia@canonical.com>
+description: |
+    Squid is a caching proxy for the Web supporting HTTP, HTTPS, FTP, and more.
+    It reduces bandwidth and improves response times by caching and reusing
+    frequently-requested web pages. Squid has extensive access controls and
+    makes a great server accelerator. It runs on most available operating
+    systems, including Windows and is licensed under the GNU GPL.
+tags:
+    - proxy
+    - firewall
+    - web
+series:
+    - kubernetes
+deployment:
+    type: stateful
+    service: loadbalancer
+provides:
+    prometheus-target:
+        interface: http
+peers:
+    cluster:
+        interface: squid-cluster
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/requirements-test.txt b/squid_cnf/juju-bundles/charms/squid-operator/requirements-test.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5c97248cc4b1b4a2e462c6cbf53a6b414f6d7709
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/requirements-test.txt
@@ -0,0 +1,11 @@
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/requirements.in b/squid_cnf/juju-bundles/charms/squid-operator/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..9dc75b844c5fb78673f3c902190733cad32c9ac3
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/requirements.in
@@ -0,0 +1,2 @@
+ops
+jinja2
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/requirements.txt b/squid_cnf/juju-bundles/charms/squid-operator/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5125805d98dc9ab10055d5d3aec3267a684f17e4
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/requirements.txt
@@ -0,0 +1,14 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile --output-file=requirements.txt requirements.in
+#
+jinja2==2.11.3
+    # via -r requirements.in
+markupsafe==1.1.1
+    # via jinja2
+ops==1.1.0
+    # via -r requirements.in
+pyyaml==5.4.1
+    # via ops
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/src/charm.py b/squid_cnf/juju-bundles/charms/squid-operator/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..57584b7440eff04424f1d0ac6fb7c67b16719b1c
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/src/charm.py
@@ -0,0 +1,97 @@
+#! /usr/bin/env python3
+
+import logging
+
+# import subprocess
+
+from ops.charm import CharmBase
+from ops.main import main
+from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus
+from lib.squid.cluster import SquidCluster
+
+logger = logging.getLogger(__name__)
+
+EXPORTER_CONTAINER = {
+    "name": "exporter",
+    "image": "prom/node-exporter",
+    "ports": [
+        {
+            "containerPort": 9100,
+            "name": "exporter-http",
+            "protocol": "TCP",
+        }
+    ],
+}
+
+
+class SquidCharm(CharmBase):
+    """Class representing this Operator charm."""
+
+    def __init__(self, *args):
+        """Initialize charm and configure states and events to observe."""
+        super().__init__(*args)
+
+        self.framework.observe(self.on.config_changed, self.configure_pod)
+        self.framework.observe(self.on["add_url"].action, self._on_add_url_action)
+        self.framework.observe(self.on["delete_url"].action, self._on_delete_url_action)
+
+        self.framework.observe(
+            self.on["prometheus-target"].relation_joined,
+            self._publish_prometheus_target_info,
+        )
+
+        self.cluster = SquidCluster(self, "cluster")
+        self.framework.observe(self.on["cluster"].relation_changed, self.configure_pod)
+
+    def _publish_prometheus_target_info(self, event):
+        event.relation.data[self.unit]["host"] = self.app.name
+        event.relation.data[self.unit]["port"] = str(9100)
+
+    def _on_add_url_action(self, event):
+        self.cluster.add_url(event.params["url"])
+
+    def _on_delete_url_action(self, event):
+        self.cluster.delete_url(event.params["url"])
+
+    def configure_pod(self, event):
+        if not self.unit.is_leader():
+            self.unit.status = ActiveStatus()
+            return
+        self.unit.status = MaintenanceStatus("Applying pod spec")
+        containers = [
+            {
+                "name": self.framework.model.app.name,
+                "image": "davigar15/squid:latest",
+                "ports": [
+                    {
+                        "name": "squid",
+                        "containerPort": 3128,
+                        "protocol": "TCP",
+                    }
+                ],
+                "volumeConfig": [
+                    {
+                        "name": "config",
+                        "mountPath": "/etc/squid",
+                        "files": [
+                            {
+                                "path": "squid.conf",
+                                "content": self.cluster.squid_config,
+                            }
+                        ],
+                    }
+                ],
+            }
+        ]
+        if self.config.get("enable-exporter"):
+            containers.append(EXPORTER_CONTAINER)
+
+        self.model.pod.set_spec({"version": 3, "containers": containers})
+
+        self.unit.status = ActiveStatus()
+        self.app.status = ActiveStatus()
+
+
+
+if __name__ == "__main__":
+    main(SquidCharm)
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/__init__.py b/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e82b76cc6bd51101abec24f114230b23f9d38cdd
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/__init__.py
@@ -0,0 +1,2 @@
+LIBAPI = 0
+LIBPATCH = 1
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/cluster.py b/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/cluster.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d790f2eea0979195cb66d2addf10b4176116e9b
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/cluster.py
@@ -0,0 +1,84 @@
+from jinja2 import Template
+import logging
+import ops.charm
+import ops.model
+import ops.framework
+from . import templates
+
+
+try:
+    import importlib.resources as pkg_resources
+except ImportError:
+    # Try backported to PY<37 `importlib_resources`.
+    import importlib_resources as pkg_resources
+
+
+class SquidCluster(ops.framework.Object):
+    """Peer relation object for Squid"""
+
+    relation_name: str = None
+    log: logging.Logger = None
+
+    def __init__(self, charm: ops.charm.CharmBase, relation_name: str):
+        super().__init__(charm, relation_name)
+
+        self.relation = self.framework.model.get_relation(relation_name)
+        self.log = logging.getLogger("squid.{}".format(relation_name))
+
+        self.framework.observe(
+            charm.on[relation_name].relation_changed, self._on_changed
+        )
+        self.framework.observe(charm.on[relation_name].relation_broken, self._on_broken)
+
+    def add_url(self, url: str):
+        if self.framework.model.unit.is_leader():
+            allowed_urls = self.allowed_urls
+            allowed_urls.add(url)
+            self.update_allowed_urls(allowed_urls)
+            self.framework.model.unit.status = ops.model.ActiveStatus(
+                repr(self.allowed_urls)
+            )
+
+    def delete_url(self, url: str):
+        if self.framework.model.unit.is_leader():
+            allowed_urls = self.allowed_urls
+            self.framework.model.unit.status = ops.model.ActiveStatus(self.allowed_urls)
+            if url in allowed_urls:
+                allowed_urls.remove(url)
+                self.update_allowed_urls(allowed_urls)
+
+    def _on_changed(self, event):
+        self.log.debug(f"on_changed: {self.framework.model.unit.name}")
+
+    def _on_broken(self, event):
+        self.log.debug(f"on_broken: {self.framework.model.unit.name}")
+
+    @property
+    def squid_config(self):
+        allowed_urls_string = self._generate_allowedurls_config(self.allowed_urls)
+        squid_config_template = pkg_resources.read_text(templates, "squid.conf")
+        return Template(squid_config_template).render(allowed_urls=allowed_urls_string)
+
+    @property
+    def allowed_urls(self):
+        return eval(
+            self.relation.data[self.framework.model.app].get(
+                "allowed_urls", repr(set())
+            )
+        )
+
+    def update_allowed_urls(self, allowed_urls: set):
+        self.relation.data[self.framework.model.app]["allowed_urls"] = repr(
+            allowed_urls
+        )
+
+    def is_ready(self):
+        return self.relation is not None
+
+    def _generate_allowedurls_config(self, allowed_urls: set):
+        allowed_urls_text = ""
+        for url in allowed_urls:
+            allowed_urls_text += f"acl allowedurls dstdomain {url}\n"
+        if allowed_urls:
+            allowed_urls_text += "http_access allow allowedurls\n"
+        return allowed_urls_text
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/templates/__init__.py b/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/templates/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/templates/squid.conf b/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/templates/squid.conf
new file mode 100644
index 0000000000000000000000000000000000000000..073d3cdfd95c4a84759fb77f09ecb3a155f6a995
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/src/lib/squid/templates/squid.conf
@@ -0,0 +1,26 @@
+acl SSL_ports port 443
+acl Safe_ports port 80		# http
+acl Safe_ports port 21		# ftp
+acl Safe_ports port 443		# https
+acl Safe_ports port 70		# gopher
+acl Safe_ports port 210		# wais
+acl Safe_ports port 1025-65535	# unregistered ports
+acl Safe_ports port 280		# http-mgmt
+acl Safe_ports port 488		# gss-http
+acl Safe_ports port 591		# filemaker
+acl Safe_ports port 777		# multiling http
+acl CONNECT method CONNECT
+http_access deny !Safe_ports
+http_access deny CONNECT !SSL_ports
+http_access allow localhost manager
+http_access deny manager
+http_access allow localhost
+{{ allowed_urls }}
+http_access deny all
+http_port 3128
+coredump_dir /var/spool/squid
+refresh_pattern ^ftp:		1440	20%	10080
+refresh_pattern ^gopher:	1440	0%	1440
+refresh_pattern -i (/cgi-bin/|\?) 0	0%	0
+refresh_pattern (Release|Packages(.gz)*)$      0       20%     2880
+refresh_pattern .		0	20%	4320
\ No newline at end of file
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/tests/test_charm.py b/squid_cnf/juju-bundles/charms/squid-operator/tests/test_charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..90ec4525e5a0cbada9bcecc5829fd5ceca49cf97
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/tests/test_charm.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+import sys
+from typing import NoReturn
+import unittest
+from ops.testing import Harness
+
+from charm import SquidCharm
+
+
+class TestCharm(unittest.TestCase):
+    """Prometheus Charm unit tests."""
+
+    def setUp(self) -> NoReturn:
+        """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
+        self.harness = Harness(SquidCharm)
+        self.harness.set_leader(is_leader=True)
+        self.harness.begin()
+        self.config = {
+            "port": 3128,
+            "enable-exporter": True,
+        }
+        self.harness.update_config(self.config)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/squid_cnf/juju-bundles/charms/squid-operator/tox.ini b/squid_cnf/juju-bundles/charms/squid-operator/tox.ini
new file mode 100644
index 0000000000000000000000000000000000000000..b2ba7dfadd45dcd58750369e7211eda711421d9e
--- /dev/null
+++ b/squid_cnf/juju-bundles/charms/squid-operator/tox.ini
@@ -0,0 +1,119 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+#######################################################################################
+
+[tox]
+envlist = flake8, cover, pylint, safety, yamllint
+skipsdist = True
+
+[testenv]
+basepython = python3.8
+setenv =
+  VIRTUAL_ENV={envdir}
+  PYTHONHASHSEED=0
+  PYTHONPATH = {toxinidir}/src
+deps =  -r{toxinidir}/requirements.txt
+
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
+[testenv:build]
+passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
+whitelist_externals =
+  charmcraft
+  cp
+  mv
+commands =
+  charmcraft build
+  rm -r ../../squid-operator
+  mv build ../../squid-operator
+
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
+
+max-complexity = 10
+import-order-style = google
diff --git a/squid_cnf/squid_vnfd.yaml b/squid_cnf/squid_vnfd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f397f3281adb81718e8d1a7eb5b2ece98fe1196b
--- /dev/null
+++ b/squid_cnf/squid_vnfd.yaml
@@ -0,0 +1,50 @@
+vnfd:
+  product-name: squid_cnf
+  version: "1.0"
+  provider: Canonical
+  description: |
+    K8s container deployment of Squid Web Proxy exporting metrics to Prometheus
+  id: squid_cnf
+  mgmt-cp: mgmtnet-ext
+  ext-cpd:
+    - id: mgmtnet-ext
+      k8s-cluster-net: mgmtnet
+  kdu:
+    - name: squid-metrics-kdu
+      juju-bundle: bundle.yaml
+  k8s-cluster:
+    nets:
+      - id: mgmtnet
+  df:
+    - id: default-df
+      lcm-operations-configuration:
+        operate-vnf-op-config:
+          day1-2:
+            - id: squid-metrics-kdu
+              initial-config-primitive:
+                - seq: 0
+                  name: add-url
+                  parameter:
+                    - name: application-name
+                      data-type: STRING
+                      value: squid
+                    - name: url
+                      data-type: STRING
+                      value: "osm.etsi.org"
+              config-primitive:
+                - name: add-url
+                  parameter:
+                    - name: application-name
+                      data-type: STRING
+                      default-value: squid
+                    - name: url
+                      data-type: STRING
+                      default-value: ""
+                - name: delete-url
+                  parameter:
+                    - name: application-name
+                      data-type: STRING
+                      default-value: squid
+                    - name: url
+                      data-type: STRING
+                      default-value: ""
diff --git a/squid_cnf_ns/squid_cnf_nsd.yaml b/squid_cnf_ns/squid_cnf_nsd.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..871a35feeb39b777e13f0848c925f77894fc8bc0
--- /dev/null
+++ b/squid_cnf_ns/squid_cnf_nsd.yaml
@@ -0,0 +1,23 @@
+nsd:
+  nsd:
+  - description: NS with 1 KDU connected to the mgmtnet VL
+    designer: Canonical
+    df:
+    - id: default-df
+      vnf-profile:
+      - id: squid_cnf
+        virtual-link-connectivity:
+        - constituent-cpd-id:
+          - constituent-base-element-id: squid_cnf
+            constituent-cpd-id: mgmtnet-ext
+          virtual-link-profile-id: mgmtnet
+        vnfd-id: squid_cnf
+    id: squid_cnf_ns
+    name: squid_cnf_ns
+    version: '1.0'
+    virtual-link-desc:
+    - id: mgmtnet
+      mgmt-network: true
+      vim-network-name: mgmt
+    vnfd-id:
+    - squid_cnf