diff --git a/magma/build_webhost.sh b/magma/build_webhost.sh
new file mode 100755
index 0000000000000000000000000000000000000000..fcbc0701ee09f7177ccce849cd9615ed436761ef
--- /dev/null
+++ b/magma/build_webhost.sh
@@ -0,0 +1,10 @@
+tar -cvzf hackfest_squid_cnf_ns.tar.gz hackfest_squid_cnf_ns/
+tar -cvzf hackfest_squid_cnf.tar.gz hackfest_squid_cnf/
+
+osm nsd-delete squid-cnf-ns 
+osm vnfd-delete squid-vnf
+
+osm vnfd-create hackfest_squid_cnf.tar.gz
+osm nsd-create hackfest_squid_cnf_ns.tar.gz
+
+osm ns-create --ns_name webhost --nsd_name squid-cnf-ns --vim_account whitecloud_highcost --config '{vld: [ {name: mgmtnet, vim-network-name: osm-ext} ] }'
\ No newline at end of file
diff --git a/magma/hackfest_squid_cnf/charms/squid/README.md b/magma/hackfest_squid_cnf/charms/squid/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1b226214256240e150d1cb34fdc75fb0b0bf412b
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/README.md
@@ -0,0 +1,47 @@
+# squid-k8s Charm
+
+## Overview
+
+This is a Kuberentes Charm to deploy [Squid Cache](http://www.squid-cache.org/).
+
+Sugested Actions for this charm:
+* Set allowed URLs
+  Possible way to run action: `juju run-action squid/0 addurl url=google.com`
+* Stop/Start/Restart the squid service - done
+  Run like this: `juju run-action squid/0 restart`
+* Set ftp, http, https proxies
+
+## Quickstart
+
+If you don't have microk8s and juju installed executing the following commands:
+```
+sudo snap install juju --classic
+sudo snap install microk8s --classic
+juju bootstrap microk8s
+juju add-model squid
+```
+
+Afterwards clone the repository and deploy the charm
+```
+git clone https://github.com/DomFleischmann/charm-squid-k8s.git
+cd charm-squid-k8s
+git submodule update --init
+juju deploy .
+```
+Check if the charm is deployed correctly with `juju status`
+
+To test the `addurl` action open another terminal and type the following command:
+`export https_proxy=http://<squid-ip>:3128`
+
+Where squid-ip is the Squid App Address shown in `juju status`
+
+Now when executing `curl https://www.google.com` squid will block access to the url
+
+Execute the `addurl` action:
+`juju run-action squid/0 addurl url=google.com`
+
+Now when executing `curl https://www.google.com` it will give you the google output.
+
+## Contact
+ - Author: Dominik Fleischmann <dominik.fleischmann@canonical.com>
+ - Bug Tracker: [here](https://github.com/DomFleischmann/charm-squid-k8s)
diff --git a/magma/hackfest_squid_cnf/charms/squid/actions.yaml b/magma/hackfest_squid_cnf/charms/squid/actions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..687a92f472077e9331574ece57077fa2fd2b40c1
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/actions.yaml
@@ -0,0 +1,13 @@
+addurl:
+    description: "Add squid config"
+    params:
+        url:
+            description: "URL that will be allowed"
+            type: string
+            default: ""
+start:
+    description: "Start squid service"
+restart:
+    description: "Retart squid service"
+stop:
+    description: "Stop squid service"
diff --git a/magma/hackfest_squid_cnf/charms/squid/actions/addurl b/magma/hackfest_squid_cnf/charms/squid/actions/addurl
new file mode 100755
index 0000000000000000000000000000000000000000..e12bf9fef6d3bbf2fedb07eac0f2737a14e0ae8d
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/actions/addurl
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+URL=`action-get url`
+
+if ! grep -Fxq "http_access allow allowedurls" /etc/squid/squid.conf
+then
+    sed  -i '/^# And finally deny all .*/i http_access allow allowedurls\n' /etc/squid/squid.conf
+fi
+
+sed -i "/^http_access allow allowedurls.*/i acl allowedurls dstdomain \.$URL" /etc/squid/squid.conf
+
+kill -HUP `cat /var/run/squid.pid`
+
diff --git a/magma/hackfest_squid_cnf/charms/squid/actions/restart b/magma/hackfest_squid_cnf/charms/squid/actions/restart
new file mode 100755
index 0000000000000000000000000000000000000000..d6c3f2e332da657e91b00dff2a9d1eb0ac7a2cab
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/actions/restart
@@ -0,0 +1,2 @@
+#!/bin/bash
+service squid restart
diff --git a/magma/hackfest_squid_cnf/charms/squid/actions/start b/magma/hackfest_squid_cnf/charms/squid/actions/start
new file mode 100755
index 0000000000000000000000000000000000000000..1f62c7bda6c6728185ceaefcc61b7eb4beb7c892
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/actions/start
@@ -0,0 +1,2 @@
+#!/bin/bash
+service squid start
diff --git a/magma/hackfest_squid_cnf/charms/squid/actions/stop b/magma/hackfest_squid_cnf/charms/squid/actions/stop
new file mode 100755
index 0000000000000000000000000000000000000000..536232dda6aed199a676a9ac2ff7eb212d413d84
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/actions/stop
@@ -0,0 +1,2 @@
+#!/bin/bash
+service squid stop
diff --git a/magma/hackfest_squid_cnf/charms/squid/config.yaml b/magma/hackfest_squid_cnf/charms/squid/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7efafff5e232f3cf3e7b1e8d0decdb6cd6e3f224
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/config.yaml
@@ -0,0 +1,9 @@
+options:
+    image:
+        type: string
+        description: 'Docker image for squid'
+        default: 'sameersbn/squid:latest'
+    port:
+        type: int
+        description: 'Port'
+        default: 3128
diff --git a/magma/hackfest_squid_cnf/charms/squid/hooks/install b/magma/hackfest_squid_cnf/charms/squid/hooks/install
new file mode 120000
index 0000000000000000000000000000000000000000..25b1f68fa39d58d33c08ca420c3d439d19be0c55
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/hooks/install
@@ -0,0 +1 @@
+../src/charm.py
\ No newline at end of file
diff --git a/magma/hackfest_squid_cnf/charms/squid/lib/.empty b/magma/hackfest_squid_cnf/charms/squid/lib/.empty
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/magma/hackfest_squid_cnf/charms/squid/lib/ops/__init__.py b/magma/hackfest_squid_cnf/charms/squid/lib/ops/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2089e3803186b5a0b12e9423b1c452596adf3c6
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/lib/ops/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""The Operator Framework."""
+
+# Import here the bare minimum to break the circular import between modules
+from . import charm  # NOQA
diff --git a/magma/hackfest_squid_cnf/charms/squid/lib/ops/charm.py b/magma/hackfest_squid_cnf/charms/squid/lib/ops/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..4682c20e9ff9c41db6ef748a2fd38fecdd331148
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/lib/ops/charm.py
@@ -0,0 +1,562 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import pathlib
+import typing
+
+import yaml
+
+from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents
+from ops import model
+
+
+class HookEvent(EventBase):
+    """A base class for events that trigger because of a Juju hook firing."""
+
+
+class ActionEvent(EventBase):
+    """A base class for events that trigger when a user asks for an Action to be run.
+
+    To read the parameters for the action, see the instance variable `params`.
+    To respond with the result of the action, call `set_results`. To add progress
+    messages that are visible as the action is progressing use `log`.
+
+    :ivar params: The parameters passed to the action (read by action-get)
+    """
+
+    def defer(self):
+        """Action events are not deferable like other events.
+
+        This is because an action runs synchronously and the user is waiting for the result.
+        """
+        raise RuntimeError('cannot defer action events')
+
+    def restore(self, snapshot: dict) -> None:
+        """Used by the operator framework to record the action.
+
+        Not meant to be called directly by Charm code.
+        """
+        env_action_name = os.environ.get('JUJU_ACTION_NAME')
+        event_action_name = self.handle.kind[:-len('_action')].replace('_', '-')
+        if event_action_name != env_action_name:
+            # This could only happen if the dev manually emits the action, or from a bug.
+            raise RuntimeError('action event kind does not match current action')
+        # Params are loaded at restore rather than __init__ because
+        # the model is not available in __init__.
+        self.params = self.framework.model._backend.action_get()
+
+    def set_results(self, results: typing.Mapping) -> None:
+        """Report the result of the action.
+
+        Args:
+            results: The result of the action as a Dict
+        """
+        self.framework.model._backend.action_set(results)
+
+    def log(self, message: str) -> None:
+        """Send a message that a user will see while the action is running.
+
+        Args:
+            message: The message for the user.
+        """
+        self.framework.model._backend.action_log(message)
+
+    def fail(self, message: str = '') -> None:
+        """Report that this action has failed.
+
+        Args:
+            message: Optional message to record why it has failed.
+        """
+        self.framework.model._backend.action_fail(message)
+
+
+class InstallEvent(HookEvent):
+    """Represents the `install` hook from Juju."""
+
+
+class StartEvent(HookEvent):
+    """Represents the `start` hook from Juju."""
+
+
+class StopEvent(HookEvent):
+    """Represents the `stop` hook from Juju."""
+
+
+class RemoveEvent(HookEvent):
+    """Represents the `remove` hook from Juju. """
+
+
+class ConfigChangedEvent(HookEvent):
+    """Represents the `config-changed` hook from Juju."""
+
+
+class UpdateStatusEvent(HookEvent):
+    """Represents the `update-status` hook from Juju."""
+
+
+class UpgradeCharmEvent(HookEvent):
+    """Represents the `upgrade-charm` hook from Juju.
+
+    This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju
+    has unpacked the upgraded charm code, and so this event will be handled with new code.
+    """
+
+
+class PreSeriesUpgradeEvent(HookEvent):
+    """Represents the `pre-series-upgrade` hook from Juju.
+
+    This happens when a user has run `juju upgrade-series MACHINE prepare` and
+    will fire for each unit that is running on the machine, telling them that
+    the user is preparing to upgrade the Machine's series (eg trusty->bionic).
+    The charm should take actions to prepare for the upgrade (a database charm
+    would want to write out a version-independent dump of the database, so that
+    when a new version of the database is available in a new series, it can be
+    used.)
+    Once all units on a machine have run `pre-series-upgrade`, the user will
+    initiate the steps to actually upgrade the machine (eg `do-release-upgrade`).
+    When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire.
+    """
+
+
+class PostSeriesUpgradeEvent(HookEvent):
+    """Represents the `post-series-upgrade` hook from Juju.
+
+    This is run after the user has done a distribution upgrade (or rolled back
+    and kept the same series). It is called in response to
+    `juju upgrade-series MACHINE complete`. Charms are expected to do whatever
+    steps are necessary to reconfigure their applications for the new series.
+    """
+
+
+class LeaderElectedEvent(HookEvent):
+    """Represents the `leader-elected` hook from Juju.
+
+    Juju will trigger this when a new lead unit is chosen for a given application.
+    This represents the leader of the charm information (not necessarily the primary
+    of a running application). The main utility is that charm authors can know
+    that only one unit will be a leader at any given time, so they can do
+    configuration, etc, that would otherwise require coordination between units.
+    (eg, selecting a password for a new relation)
+    """
+
+
+class LeaderSettingsChangedEvent(HookEvent):
+    """Represents the `leader-settings-changed` hook from Juju.
+
+    Deprecated. This represents when a lead unit would call `leader-set` to inform
+    the other units of an application that they have new information to handle.
+    This has been deprecated in favor of using a Peer relation, and having the
+    leader set a value in the Application data bag for that peer relation.
+    (see :class:`RelationChangedEvent`).
+    """
+
+
+class CollectMetricsEvent(HookEvent):
+    """Represents the `collect-metrics` hook from Juju.
+
+    Note that events firing during a CollectMetricsEvent are currently
+    sandboxed in how they can interact with Juju. To report metrics
+    use :meth:`.add_metrics`.
+    """
+
+    def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None:
+        """Record metrics that have been gathered by the charm for this unit.
+
+        Args:
+            metrics: A collection of {key: float} pairs that contains the
+              metrics that have been gathered
+            labels: {key:value} strings that can be applied to the
+                metrics that are being gathered
+        """
+        self.framework.model._backend.add_metrics(metrics, labels)
+
+
+class RelationEvent(HookEvent):
+    """A base class representing the various relation lifecycle events.
+
+    Charmers should not be creating RelationEvents directly. The events will be
+    generated by the framework from Juju related events. Users can observe them
+    from the various `CharmBase.on[relation_name].relation_*` events.
+
+    Attributes:
+        relation: The Relation involved in this event
+        app: The remote application that has triggered this event
+        unit: The remote unit that has triggered this event. This may be None
+              if the relation event was triggered as an Application level event
+    """
+
+    def __init__(self, handle, relation, app=None, unit=None):
+        super().__init__(handle)
+
+        if unit is not None and unit.app != app:
+            raise RuntimeError(
+                'cannot create RelationEvent with application {} and unit {}'.format(app, unit))
+
+        self.relation = relation
+        self.app = app
+        self.unit = unit
+
+    def snapshot(self) -> dict:
+        """Used by the framework to serialize the event to disk.
+
+        Not meant to be called by Charm code.
+        """
+        snapshot = {
+            'relation_name': self.relation.name,
+            'relation_id': self.relation.id,
+        }
+        if self.app:
+            snapshot['app_name'] = self.app.name
+        if self.unit:
+            snapshot['unit_name'] = self.unit.name
+        return snapshot
+
+    def restore(self, snapshot: dict) -> None:
+        """Used by the framework to deserialize the event from disk.
+
+        Not meant to be called by Charm code.
+        """
+        self.relation = self.framework.model.get_relation(
+            snapshot['relation_name'], snapshot['relation_id'])
+
+        app_name = snapshot.get('app_name')
+        if app_name:
+            self.app = self.framework.model.get_app(app_name)
+        else:
+            self.app = None
+
+        unit_name = snapshot.get('unit_name')
+        if unit_name:
+            self.unit = self.framework.model.get_unit(unit_name)
+        else:
+            self.unit = None
+
+
+class RelationCreatedEvent(RelationEvent):
+    """Represents the `relation-created` hook from Juju.
+
+    This is triggered when a new relation to another app is added in Juju. This
+    can occur before units for those applications have started. All existing
+    relations should be established before start.
+    """
+
+
+class RelationJoinedEvent(RelationEvent):
+    """Represents the `relation-joined` hook from Juju.
+
+    This is triggered whenever a new unit of a related application joins the relation.
+    (eg, a unit was added to an existing related app, or a new relation was established
+    with an application that already had units.)
+    """
+
+
+class RelationChangedEvent(RelationEvent):
+    """Represents the `relation-changed` hook from Juju.
+
+    This is triggered whenever there is a change to the data bucket for a related
+    application or unit. Look at `event.relation.data[event.unit/app]` to see the
+    new information.
+    """
+
+
+class RelationDepartedEvent(RelationEvent):
+    """Represents the `relation-departed` hook from Juju.
+
+    This is the inverse of the RelationJoinedEvent, representing when a unit
+    is leaving the relation (the unit is being removed, the app is being removed,
+    the relation is being removed). It is fired once for each unit that is
+    going away.
+    """
+
+
+class RelationBrokenEvent(RelationEvent):
+    """Represents the `relation-broken` hook from Juju.
+
+    If a relation is being removed (`juju remove-relation` or `juju remove-application`),
+    once all the units have been removed, RelationBrokenEvent will fire to signal
+    that the relationship has been fully terminated.
+    """
+
+
+class StorageEvent(HookEvent):
+    """Base class representing Storage related events."""
+
+
+class StorageAttachedEvent(StorageEvent):
+    """Represents the `storage-attached` hook from Juju.
+
+    Called when new storage is available for the charm to use.
+    """
+
+
+class StorageDetachingEvent(StorageEvent):
+    """Represents the `storage-detaching` hook from Juju.
+
+    Called when storage a charm has been using is going away.
+    """
+
+
+class CharmEvents(ObjectEvents):
+    """The events that are generated by Juju in response to the lifecycle of an application."""
+
+    install = EventSource(InstallEvent)
+    start = EventSource(StartEvent)
+    stop = EventSource(StopEvent)
+    remove = EventSource(RemoveEvent)
+    update_status = EventSource(UpdateStatusEvent)
+    config_changed = EventSource(ConfigChangedEvent)
+    upgrade_charm = EventSource(UpgradeCharmEvent)
+    pre_series_upgrade = EventSource(PreSeriesUpgradeEvent)
+    post_series_upgrade = EventSource(PostSeriesUpgradeEvent)
+    leader_elected = EventSource(LeaderElectedEvent)
+    leader_settings_changed = EventSource(LeaderSettingsChangedEvent)
+    collect_metrics = EventSource(CollectMetricsEvent)
+
+
+class CharmBase(Object):
+    """Base class that represents the Charm overall.
+
+    Usually this initialization is done by ops.main.main() rather than Charm authors
+    directly instantiating a Charm.
+
+    Args:
+        framework: The framework responsible for managing the Model and events for this
+            Charm.
+        key: Arbitrary key to distinguish this instance of CharmBase from another.
+            Generally is None when initialized by the framework. For charms instantiated by
+            main.main(), this is currenly None.
+    Attributes:
+        on: Defines all events that the Charm will fire.
+    """
+
+    on = CharmEvents()
+
+    def __init__(self, framework: Framework, key: typing.Optional[str]):
+        """Initialize the Charm with its framework and application name.
+
+        """
+        super().__init__(framework, key)
+
+        for relation_name in self.framework.meta.relations:
+            relation_name = relation_name.replace('-', '_')
+            self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent)
+            self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent)
+            self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent)
+            self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent)
+            self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent)
+
+        for storage_name in self.framework.meta.storages:
+            storage_name = storage_name.replace('-', '_')
+            self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent)
+            self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent)
+
+        for action_name in self.framework.meta.actions:
+            action_name = action_name.replace('-', '_')
+            self.on.define_event(action_name + '_action', ActionEvent)
+
+    @property
+    def app(self) -> model.Application:
+        """Application that this unit is part of."""
+        return self.framework.model.app
+
+    @property
+    def unit(self) -> model.Unit:
+        """Unit that this execution is responsible for."""
+        return self.framework.model.unit
+
+    @property
+    def meta(self) -> 'CharmMeta':
+        """CharmMeta of this charm.
+        """
+        return self.framework.meta
+
+    @property
+    def charm_dir(self) -> pathlib.Path:
+        """Root directory of the Charm as it is running.
+        """
+        return self.framework.charm_dir
+
+
+class CharmMeta:
+    """Object containing the metadata for the charm.
+
+    This is read from metadata.yaml and/or actions.yaml. Generally charms will
+    define this information, rather than reading it at runtime. This class is
+    mostly for the framework to understand what the charm has defined.
+
+    The maintainers, tags, terms, series, and extra_bindings attributes are all
+    lists of strings.  The requires, provides, peers, relations, storage,
+    resources, and payloads attributes are all mappings of names to instances
+    of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta.
+
+    The relations attribute is a convenience accessor which includes all of the
+    requires, provides, and peers RelationMeta items.  If needed, the role of
+    the relation definition can be obtained from its role attribute.
+
+    Attributes:
+        name: The name of this charm
+        summary: Short description of what this charm does
+        description: Long description for this charm
+        maintainers: A list of strings of the email addresses of the maintainers
+                     of this charm.
+        tags: Charm store tag metadata for categories associated with this charm.
+        terms: Charm store terms that should be agreed to before this charm can
+               be deployed. (Used for things like licensing issues.)
+        series: The list of supported OS series that this charm can support.
+                The first entry in the list is the default series that will be
+                used by deploy if no other series is requested by the user.
+        subordinate: True/False whether this charm is intended to be used as a
+                     subordinate charm.
+        min_juju_version: If supplied, indicates this charm needs features that
+                          are not available in older versions of Juju.
+        requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation.
+        provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation.
+        peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation.
+        relations: A dict containing all :class:`RelationMeta` attributes (merged from other
+                   sections)
+        storages: A dict of {name: :class:`StorageMeta`} for each defined storage.
+        resources: A dict of {name: :class:`ResourceMeta`} for each defined resource.
+        payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload.
+        extra_bindings: A dict of additional named bindings that a charm can use
+                        for network configuration.
+        actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined.
+    Args:
+        raw: a mapping containing the contents of metadata.yaml
+        actions_raw: a mapping containing the contents of actions.yaml
+    """
+
+    def __init__(self, raw: dict = {}, actions_raw: dict = {}):
+        self.name = raw.get('name', '')
+        self.summary = raw.get('summary', '')
+        self.description = raw.get('description', '')
+        self.maintainers = []
+        if 'maintainer' in raw:
+            self.maintainers.append(raw['maintainer'])
+        if 'maintainers' in raw:
+            self.maintainers.extend(raw['maintainers'])
+        self.tags = raw.get('tags', [])
+        self.terms = raw.get('terms', [])
+        self.series = raw.get('series', [])
+        self.subordinate = raw.get('subordinate', False)
+        self.min_juju_version = raw.get('min-juju-version')
+        self.requires = {name: RelationMeta('requires', name, rel)
+                         for name, rel in raw.get('requires', {}).items()}
+        self.provides = {name: RelationMeta('provides', name, rel)
+                         for name, rel in raw.get('provides', {}).items()}
+        # TODO: (jam 2020-05-11) The *role* should be 'peer' even though it comes from the
+        #  'peers' section.
+        self.peers = {name: RelationMeta('peers', name, rel)
+                      for name, rel in raw.get('peers', {}).items()}
+        self.relations = {}
+        self.relations.update(self.requires)
+        self.relations.update(self.provides)
+        self.relations.update(self.peers)
+        self.storages = {name: StorageMeta(name, storage)
+                         for name, storage in raw.get('storage', {}).items()}
+        self.resources = {name: ResourceMeta(name, res)
+                          for name, res in raw.get('resources', {}).items()}
+        self.payloads = {name: PayloadMeta(name, payload)
+                         for name, payload in raw.get('payloads', {}).items()}
+        self.extra_bindings = raw.get('extra-bindings', {})
+        self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()}
+
+    @classmethod
+    def from_yaml(
+            cls, metadata: typing.Union[str, typing.TextIO],
+            actions: typing.Optional[typing.Union[str, typing.TextIO]] = None):
+        """Instantiate a CharmMeta from a YAML description of metadata.yaml.
+
+        Args:
+            metadata: A YAML description of charm metadata (name, relations, etc.)
+                This can be a simple string, or a file-like object. (passed to `yaml.safe_load`).
+            actions: YAML description of Actions for this charm (eg actions.yaml)
+        """
+        meta = yaml.safe_load(metadata)
+        raw_actions = {}
+        if actions is not None:
+            raw_actions = yaml.safe_load(actions)
+        return cls(meta, raw_actions)
+
+
+class RelationMeta:
+    """Object containing metadata about a relation definition.
+
+    Should not be constructed directly by Charm code. Is gotten from one of
+    :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`,
+    :attr:`CharmMeta.relations`.
+
+    Attributes:
+        role: This is one of requires/provides/peers
+        relation_name: Name of this relation from metadata.yaml
+        interface_name: Optional definition of the interface protocol.
+        scope: "global" or "container" scope based on how the relation should be used.
+    """
+
+    def __init__(self, role, relation_name, raw):
+        self.role = role
+        self.relation_name = relation_name
+        self.interface_name = raw['interface']
+        self.scope = raw.get('scope')
+
+
+class StorageMeta:
+    """Object containing metadata about a storage definition."""
+
+    def __init__(self, name, raw):
+        self.storage_name = name
+        self.type = raw['type']
+        self.description = raw.get('description', '')
+        self.shared = raw.get('shared', False)
+        self.read_only = raw.get('read-only', False)
+        self.minimum_size = raw.get('minimum-size')
+        self.location = raw.get('location')
+        self.multiple_range = None
+        if 'multiple' in raw:
+            range = raw['multiple']['range']
+            if '-' not in range:
+                self.multiple_range = (int(range), int(range))
+            else:
+                range = range.split('-')
+                self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None)
+
+
+class ResourceMeta:
+    """Object containing metadata about a resource definition."""
+
+    def __init__(self, name, raw):
+        self.resource_name = name
+        self.type = raw['type']
+        self.filename = raw.get('filename', None)
+        self.description = raw.get('description', '')
+
+
+class PayloadMeta:
+    """Object containing metadata about a payload definition."""
+
+    def __init__(self, name, raw):
+        self.payload_name = name
+        self.type = raw['type']
+
+
+class ActionMeta:
+    """Object containing metadata about an action's definition."""
+
+    def __init__(self, name, raw=None):
+        raw = raw or {}
+        self.name = name
+        self.title = raw.get('title', '')
+        self.description = raw.get('description', '')
+        self.parameters = raw.get('params', {})  # {<parameter name>: <JSON Schema definition>}
+        self.required = raw.get('required', [])  # [<parameter name>, ...]
diff --git a/magma/hackfest_squid_cnf/charms/squid/lib/ops/framework.py b/magma/hackfest_squid_cnf/charms/squid/lib/ops/framework.py
new file mode 100755
index 0000000000000000000000000000000000000000..51d46ba16886bfffce0fe7b9ad91f3ac0b5902a4
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/lib/ops/framework.py
@@ -0,0 +1,1134 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import collections.abc
+import inspect
+import keyword
+import marshal
+import os
+import pdb
+import pickle
+import re
+import sqlite3
+import sys
+import types
+import weakref
+from datetime import timedelta
+
+from ops import charm
+
+
+class Handle:
+    """Handle defines a name for an object in the form of a hierarchical path.
+
+    The provided parent is the object (or that object's handle) that this handle
+    sits under, or None if the object identified by this handle stands by itself
+    as the root of its own hierarchy.
+
+    The handle kind is a string that defines a namespace so objects with the
+    same parent and kind will have unique keys.
+
+    The handle key is a string uniquely identifying the object. No other objects
+    under the same parent and kind may have the same key.
+    """
+
+    def __init__(self, parent, kind, key):
+        if parent and not isinstance(parent, Handle):
+            parent = parent.handle
+        self._parent = parent
+        self._kind = kind
+        self._key = key
+        if parent:
+            if key:
+                self._path = "{}/{}[{}]".format(parent, kind, key)
+            else:
+                self._path = "{}/{}".format(parent, kind)
+        else:
+            if key:
+                self._path = "{}[{}]".format(kind, key)
+            else:
+                self._path = "{}".format(kind)
+
+    def nest(self, kind, key):
+        return Handle(self, kind, key)
+
+    def __hash__(self):
+        return hash((self.parent, self.kind, self.key))
+
+    def __eq__(self, other):
+        return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key)
+
+    def __str__(self):
+        return self.path
+
+    @property
+    def parent(self):
+        return self._parent
+
+    @property
+    def kind(self):
+        return self._kind
+
+    @property
+    def key(self):
+        return self._key
+
+    @property
+    def path(self):
+        return self._path
+
+    @classmethod
+    def from_path(cls, path):
+        handle = None
+        for pair in path.split("/"):
+            pair = pair.split("[")
+            good = False
+            if len(pair) == 1:
+                kind, key = pair[0], None
+                good = True
+            elif len(pair) == 2:
+                kind, key = pair
+                if key and key[-1] == ']':
+                    key = key[:-1]
+                    good = True
+            if not good:
+                raise RuntimeError("attempted to restore invalid handle path {}".format(path))
+            handle = Handle(handle, kind, key)
+        return handle
+
+
+class EventBase:
+
+    def __init__(self, handle):
+        self.handle = handle
+        self.deferred = False
+
+    def defer(self):
+        self.deferred = True
+
+    def snapshot(self):
+        """Return the snapshot data that should be persisted.
+
+        Subclasses must override to save any custom state.
+        """
+        return None
+
+    def restore(self, snapshot):
+        """Restore the value state from the given snapshot.
+
+        Subclasses must override to restore their custom state.
+        """
+        self.deferred = False
+
+
+class EventSource:
+    """EventSource wraps an event type with a descriptor to facilitate observing and emitting.
+
+    It is generally used as:
+
+        class SomethingHappened(EventBase):
+            pass
+
+        class SomeObject(Object):
+            something_happened = EventSource(SomethingHappened)
+
+    With that, instances of that type will offer the someobj.something_happened
+    attribute which is a BoundEvent and may be used to emit and observe the event.
+    """
+
+    def __init__(self, event_type):
+        if not isinstance(event_type, type) or not issubclass(event_type, EventBase):
+            raise RuntimeError(
+                'Event requires a subclass of EventBase as an argument, got {}'.format(event_type))
+        self.event_type = event_type
+        self.event_kind = None
+        self.emitter_type = None
+
+    def _set_name(self, emitter_type, event_kind):
+        if self.event_kind is not None:
+            raise RuntimeError(
+                'EventSource({}) reused as {}.{} and {}.{}'.format(
+                    self.event_type.__name__,
+                    self.emitter_type.__name__,
+                    self.event_kind,
+                    emitter_type.__name__,
+                    event_kind,
+                ))
+        self.event_kind = event_kind
+        self.emitter_type = emitter_type
+
+    def __get__(self, emitter, emitter_type=None):
+        if emitter is None:
+            return self
+        # Framework might not be available if accessed as CharmClass.on.event
+        # rather than charm_instance.on.event, but in that case it couldn't be
+        # emitted anyway, so there's no point to registering it.
+        framework = getattr(emitter, 'framework', None)
+        if framework is not None:
+            framework.register_type(self.event_type, emitter, self.event_kind)
+        return BoundEvent(emitter, self.event_type, self.event_kind)
+
+
+class BoundEvent:
+
+    def __repr__(self):
+        return '<BoundEvent {} bound to {}.{} at {}>'.format(
+            self.event_type.__name__,
+            type(self.emitter).__name__,
+            self.event_kind,
+            hex(id(self)),
+        )
+
+    def __init__(self, emitter, event_type, event_kind):
+        self.emitter = emitter
+        self.event_type = event_type
+        self.event_kind = event_kind
+
+    def emit(self, *args, **kwargs):
+        """Emit event to all registered observers.
+
+        The current storage state is committed before and after each observer is notified.
+        """
+        framework = self.emitter.framework
+        key = framework._next_event_key()
+        event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs)
+        framework._emit(event)
+
+
+class HandleKind:
+    """Helper descriptor to define the Object.handle_kind field.
+
+    The handle_kind for an object defaults to its type name, but it may
+    be explicitly overridden if desired.
+    """
+
+    def __get__(self, obj, obj_type):
+        kind = obj_type.__dict__.get("handle_kind")
+        if kind:
+            return kind
+        return obj_type.__name__
+
+
+class _Metaclass(type):
+    """Helper class to ensure proper instantiation of Object-derived classes.
+
+    This class currently has a single purpose: events derived from EventSource
+    that are class attributes of Object-derived classes need to be told what
+    their name is in that class. For example, in
+
+        class SomeObject(Object):
+            something_happened = EventSource(SomethingHappened)
+
+    the instance of EventSource needs to know it's called 'something_happened'.
+
+    Starting from python 3.6 we could use __set_name__ on EventSource for this,
+    but until then this (meta)class does the equivalent work.
+
+    TODO: when we drop support for 3.5 drop this class, and rename _set_name in
+          EventSource to __set_name__; everything should continue to work.
+
+    """
+
+    def __new__(typ, *a, **kw):
+        k = super().__new__(typ, *a, **kw)
+        # k is now the Object-derived class; loop over its class attributes
+        for n, v in vars(k).items():
+            # we could do duck typing here if we want to support
+            # non-EventSource-derived shenanigans. We don't.
+            if isinstance(v, EventSource):
+                # this is what 3.6+ does automatically for us:
+                v._set_name(k, n)
+        return k
+
+
+class Object(metaclass=_Metaclass):
+
+    handle_kind = HandleKind()
+
+    def __init__(self, parent, key):
+        kind = self.handle_kind
+        if isinstance(parent, Framework):
+            self.framework = parent
+            # Avoid Framework instances having a circular reference to themselves.
+            if self.framework is self:
+                self.framework = weakref.proxy(self.framework)
+            self.handle = Handle(None, kind, key)
+        else:
+            self.framework = parent.framework
+            self.handle = Handle(parent, kind, key)
+        self.framework._track(self)
+
+        # TODO Detect conflicting handles here.
+
+    @property
+    def model(self):
+        return self.framework.model
+
+
+class ObjectEvents(Object):
+    """Convenience type to allow defining .on attributes at class level."""
+
+    handle_kind = "on"
+
+    def __init__(self, parent=None, key=None):
+        if parent is not None:
+            super().__init__(parent, key)
+        else:
+            self._cache = weakref.WeakKeyDictionary()
+
+    def __get__(self, emitter, emitter_type):
+        if emitter is None:
+            return self
+        instance = self._cache.get(emitter)
+        if instance is None:
+            # Same type, different instance, more data. Doing this unusual construct
+            # means people can subclass just this one class to have their own 'on'.
+            instance = self._cache[emitter] = type(self)(emitter)
+        return instance
+
+    @classmethod
+    def define_event(cls, event_kind, event_type):
+        """Define an event on this type at runtime.
+
+        cls: a type to define an event on.
+
+        event_kind: an attribute name that will be used to access the
+                    event. Must be a valid python identifier, not be a keyword
+                    or an existing attribute.
+
+        event_type: a type of the event to define.
+
+        """
+        prefix = 'unable to define an event with event_kind that '
+        if not event_kind.isidentifier():
+            raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind)
+        elif keyword.iskeyword(event_kind):
+            raise RuntimeError(prefix + 'is a python keyword: ' + event_kind)
+        try:
+            getattr(cls, event_kind)
+            raise RuntimeError(
+                prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind))
+        except AttributeError:
+            pass
+
+        event_descriptor = EventSource(event_type)
+        event_descriptor._set_name(cls, event_kind)
+        setattr(cls, event_kind, event_descriptor)
+
+    def events(self):
+        """Return a mapping of event_kinds to bound_events for all available events.
+        """
+        events_map = {}
+        # We have to iterate over the class rather than instance to allow for properties which
+        # might call this method (e.g., event views), leading to infinite recursion.
+        for attr_name, attr_value in inspect.getmembers(type(self)):
+            if isinstance(attr_value, EventSource):
+                # We actually care about the bound_event, however, since it
+                # provides the most info for users of this method.
+                event_kind = attr_name
+                bound_event = getattr(self, event_kind)
+                events_map[event_kind] = bound_event
+        return events_map
+
+    def __getitem__(self, key):
+        return PrefixedEvents(self, key)
+
+
+class PrefixedEvents:
+
+    def __init__(self, emitter, key):
+        self._emitter = emitter
+        self._prefix = key.replace("-", "_") + '_'
+
+    def __getattr__(self, name):
+        return getattr(self._emitter, self._prefix + name)
+
+
+class PreCommitEvent(EventBase):
+    pass
+
+
+class CommitEvent(EventBase):
+    pass
+
+
+class FrameworkEvents(ObjectEvents):
+    pre_commit = EventSource(PreCommitEvent)
+    commit = EventSource(CommitEvent)
+
+
+class NoSnapshotError(Exception):
+
+    def __init__(self, handle_path):
+        self.handle_path = handle_path
+
+    def __str__(self):
+        return 'no snapshot data found for {} object'.format(self.handle_path)
+
+
+class NoTypeError(Exception):
+
+    def __init__(self, handle_path):
+        self.handle_path = handle_path
+
+    def __str__(self):
+        return "cannot restore {} since no class was registered for it".format(self.handle_path)
+
+
+class SQLiteStorage:
+
+    DB_LOCK_TIMEOUT = timedelta(hours=1)
+
+    def __init__(self, filename):
+        # The isolation_level argument is set to None such that the implicit
+        # transaction management behavior of the sqlite3 module is disabled.
+        self._db = sqlite3.connect(str(filename),
+                                   isolation_level=None,
+                                   timeout=self.DB_LOCK_TIMEOUT.total_seconds())
+        self._setup()
+
+    def _setup(self):
+        # Make sure that the database is locked until the connection is closed,
+        # not until the transaction ends.
+        self._db.execute("PRAGMA locking_mode=EXCLUSIVE")
+        c = self._db.execute("BEGIN")
+        c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'")
+        if c.fetchone()[0] == 0:
+            # Keep in mind what might happen if the process dies somewhere below.
+            # The system must not be rendered permanently broken by that.
+            self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)")
+            self._db.execute('''
+                CREATE TABLE notice (
+                  sequence INTEGER PRIMARY KEY AUTOINCREMENT,
+                  event_path TEXT,
+                  observer_path TEXT,
+                  method_name TEXT)
+                ''')
+            self._db.commit()
+
+    def close(self):
+        self._db.close()
+
+    def commit(self):
+        self._db.commit()
+
+    # There's commit but no rollback. For abort to be supported, we'll need logic that
+    # can rollback decisions made by third-party code in terms of the internal state
+    # of objects that have been snapshotted, and hooks to let them know about it and
+    # take the needed actions to undo their logic until the last snapshot.
+    # This is doable but will increase significantly the chances for mistakes.
+
+    def save_snapshot(self, handle_path, snapshot_data):
+        self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, snapshot_data))
+
+    def load_snapshot(self, handle_path):
+        c = self._db.cursor()
+        c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,))
+        row = c.fetchone()
+        if row:
+            return row[0]
+        return None
+
+    def drop_snapshot(self, handle_path):
+        self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,))
+
+    def save_notice(self, event_path, observer_path, method_name):
+        self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)',
+                         (event_path, observer_path, method_name))
+
+    def drop_notice(self, event_path, observer_path, method_name):
+        self._db.execute('''
+            DELETE FROM notice
+             WHERE event_path=?
+               AND observer_path=?
+               AND method_name=?
+            ''', (event_path, observer_path, method_name))
+
+    def notices(self, event_path):
+        if event_path:
+            c = self._db.execute('''
+                SELECT event_path, observer_path, method_name
+                  FROM notice
+                 WHERE event_path=?
+                 ORDER BY sequence
+                ''', (event_path,))
+        else:
+            c = self._db.execute('''
+                SELECT event_path, observer_path, method_name
+                  FROM notice
+                 ORDER BY sequence
+                ''')
+        while True:
+            rows = c.fetchmany()
+            if not rows:
+                break
+            for row in rows:
+                yield tuple(row)
+
+
+# the message to show to the user when a pdb breakpoint goes active
+_BREAKPOINT_WELCOME_MESSAGE = """
+Starting pdb to debug charm operator.
+Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort.
+Future breakpoints may interrupt execution again.
+More details at https://discourse.jujucharms.com/t/debugging-charm-hooks
+
+"""
+
+
+class Framework(Object):
+
+    on = FrameworkEvents()
+
+    # Override properties from Object so that we can set them in __init__.
+    model = None
+    meta = None
+    charm_dir = None
+
+    def __init__(self, data_path, charm_dir, meta, model):
+
+        super().__init__(self, None)
+
+        self._data_path = data_path
+        self.charm_dir = charm_dir
+        self.meta = meta
+        self.model = model
+        self._observers = []      # [(observer_path, method_name, parent_path, event_key)]
+        self._observer = weakref.WeakValueDictionary()       # {observer_path: observer}
+        self._objects = weakref.WeakValueDictionary()
+        self._type_registry = {}  # {(parent_path, kind): cls}
+        self._type_known = set()  # {cls}
+
+        self._storage = SQLiteStorage(data_path)
+
+        # We can't use the higher-level StoredState because it relies on events.
+        self.register_type(StoredStateData, None, StoredStateData.handle_kind)
+        stored_handle = Handle(None, StoredStateData.handle_kind, '_stored')
+        try:
+            self._stored = self.load_snapshot(stored_handle)
+        except NoSnapshotError:
+            self._stored = StoredStateData(self, '_stored')
+            self._stored['event_count'] = 0
+
+        # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do
+        # breakpoint(); if Python < 3.7, this doesn't affect anything
+        sys.breakpointhook = self.breakpoint
+
+        # Flag to indicate that we already presented the welcome message in a debugger breakpoint
+        self._breakpoint_welcomed = False
+
+        # Parse once the env var, which may be used multiple times later
+        debug_at = os.environ.get('JUJU_DEBUG_AT')
+        self._juju_debug_at = debug_at.split(',') if debug_at else ()
+
+    def close(self):
+        self._storage.close()
+
+    def _track(self, obj):
+        """Track object and ensure it is the only object created using its handle path."""
+        if obj is self:
+            # Framework objects don't track themselves
+            return
+        if obj.handle.path in self.framework._objects:
+            raise RuntimeError(
+                'two objects claiming to be {} have been created'.format(obj.handle.path))
+        self._objects[obj.handle.path] = obj
+
+    def _forget(self, obj):
+        """Stop tracking the given object. See also _track."""
+        self._objects.pop(obj.handle.path, None)
+
+    def commit(self):
+        # Give a chance for objects to persist data they want to before a commit is made.
+        self.on.pre_commit.emit()
+        # Make sure snapshots are saved by instances of StoredStateData. Any possible state
+        # modifications in on_commit handlers of instances of other classes will not be persisted.
+        self.on.commit.emit()
+        # Save our event count after all events have been emitted.
+        self.save_snapshot(self._stored)
+        self._storage.commit()
+
+    def register_type(self, cls, parent, kind=None):
+        if parent and not isinstance(parent, Handle):
+            parent = parent.handle
+        if parent:
+            parent_path = parent.path
+        else:
+            parent_path = None
+        if not kind:
+            kind = cls.handle_kind
+        self._type_registry[(parent_path, kind)] = cls
+        self._type_known.add(cls)
+
+    def save_snapshot(self, value):
+        """Save a persistent snapshot of the provided value.
+
+        The provided value must implement the following interface:
+
+        value.handle = Handle(...)
+        value.snapshot() => {...}  # Simple builtin types only.
+        value.restore(snapshot)    # Restore custom state from prior snapshot.
+        """
+        if type(value) not in self._type_known:
+            raise RuntimeError(
+                'cannot save {} values before registering that type'.format(type(value).__name__))
+        data = value.snapshot()
+
+        # Use marshal as a validator, enforcing the use of simple types, as we later the
+        # information is really pickled, which is too error prone for future evolution of the
+        # stored data (e.g. if the developer stores a custom object and later changes its
+        # class name; when unpickling the original class will not be there and event
+        # data loading will fail).
+        try:
+            marshal.dumps(data)
+        except ValueError:
+            msg = "unable to save the data for {}, it must contain only simple types: {!r}"
+            raise ValueError(msg.format(value.__class__.__name__, data))
+
+        # Use pickle for serialization, so the value remains portable.
+        raw_data = pickle.dumps(data)
+        self._storage.save_snapshot(value.handle.path, raw_data)
+
+    def load_snapshot(self, handle):
+        parent_path = None
+        if handle.parent:
+            parent_path = handle.parent.path
+        cls = self._type_registry.get((parent_path, handle.kind))
+        if not cls:
+            raise NoTypeError(handle.path)
+        raw_data = self._storage.load_snapshot(handle.path)
+        if not raw_data:
+            raise NoSnapshotError(handle.path)
+        data = pickle.loads(raw_data)
+        obj = cls.__new__(cls)
+        obj.framework = self
+        obj.handle = handle
+        obj.restore(data)
+        self._track(obj)
+        return obj
+
+    def drop_snapshot(self, handle):
+        self._storage.drop_snapshot(handle.path)
+
+    def observe(self, bound_event, observer):
+        """Register observer to be called when bound_event is emitted.
+
+        The bound_event is generally provided as an attribute of the object that emits
+        the event, and is created in this style:
+
+            class SomeObject:
+                something_happened = Event(SomethingHappened)
+
+        That event may be observed as:
+
+            framework.observe(someobj.something_happened, self.on_something_happened)
+
+        If the method to be called follows the name convention "on_<event name>", it
+        may be omitted from the observe call. That means the above is equivalent to:
+
+            framework.observe(someobj.something_happened, self)
+
+        """
+        if not isinstance(bound_event, BoundEvent):
+            raise RuntimeError(
+                'Framework.observe requires a BoundEvent as second parameter, got {}'.format(
+                    bound_event))
+
+        event_type = bound_event.event_type
+        event_kind = bound_event.event_kind
+        emitter = bound_event.emitter
+
+        self.register_type(event_type, emitter, event_kind)
+
+        if hasattr(emitter, "handle"):
+            emitter_path = emitter.handle.path
+        else:
+            raise RuntimeError(
+                'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__))
+
+        method_name = None
+        if isinstance(observer, types.MethodType):
+            method_name = observer.__name__
+            observer = observer.__self__
+        else:
+            method_name = "on_" + event_kind
+            if not hasattr(observer, method_name):
+                raise RuntimeError(
+                    'Observer method not provided explicitly'
+                    ' and {} type has no "{}" method'.format(type(observer).__name__,
+                                                             method_name))
+
+        # Validate that the method has an acceptable call signature.
+        sig = inspect.signature(getattr(observer, method_name))
+        # Self isn't included in the params list, so the first arg will be the event.
+        extra_params = list(sig.parameters.values())[1:]
+        if not sig.parameters:
+            raise TypeError(
+                '{}.{} must accept event parameter'.format(type(observer).__name__, method_name))
+        elif any(param.default is inspect.Parameter.empty for param in extra_params):
+            # Allow for additional optional params, since there's no reason to exclude them, but
+            # required params will break.
+            raise TypeError(
+                '{}.{} has extra required parameter'.format(type(observer).__name__, method_name))
+
+        # TODO Prevent the exact same parameters from being registered more than once.
+
+        self._observer[observer.handle.path] = observer
+        self._observers.append((observer.handle.path, method_name, emitter_path, event_kind))
+
+    def _next_event_key(self):
+        """Return the next event key that should be used, incrementing the internal counter."""
+        # Increment the count first; this means the keys will start at 1, and 0
+        # means no events have been emitted.
+        self._stored['event_count'] += 1
+        return str(self._stored['event_count'])
+
+    def _emit(self, event):
+        """See BoundEvent.emit for the public way to call this."""
+
+        # Save the event for all known observers before the first notification
+        # takes place, so that either everyone interested sees it, or nobody does.
+        self.save_snapshot(event)
+        event_path = event.handle.path
+        event_kind = event.handle.kind
+        parent_path = event.handle.parent.path
+        # TODO Track observers by (parent_path, event_kind) rather than as a list of
+        # all observers. Avoiding linear search through all observers for every event
+        for observer_path, method_name, _parent_path, _event_kind in self._observers:
+            if _parent_path != parent_path:
+                continue
+            if _event_kind and _event_kind != event_kind:
+                continue
+            # Again, only commit this after all notices are saved.
+            self._storage.save_notice(event_path, observer_path, method_name)
+        self._reemit(event_path)
+
+    def reemit(self):
+        """Reemit previously deferred events to the observers that deferred them.
+
+        Only the specific observers that have previously deferred the event will be
+        notified again. Observers that asked to be notified about events after it's
+        been first emitted won't be notified, as that would mean potentially observing
+        events out of order.
+        """
+        self._reemit()
+
+    def _reemit(self, single_event_path=None):
+        last_event_path = None
+        deferred = True
+        for event_path, observer_path, method_name in self._storage.notices(single_event_path):
+            event_handle = Handle.from_path(event_path)
+
+            if last_event_path != event_path:
+                if not deferred:
+                    self._storage.drop_snapshot(last_event_path)
+                last_event_path = event_path
+                deferred = False
+
+            try:
+                event = self.load_snapshot(event_handle)
+            except NoTypeError:
+                self._storage.drop_notice(event_path, observer_path, method_name)
+                continue
+
+            event.deferred = False
+            observer = self._observer.get(observer_path)
+            if observer:
+                custom_handler = getattr(observer, method_name, None)
+                if custom_handler:
+                    event_is_from_juju = isinstance(event, charm.HookEvent)
+                    event_is_action = isinstance(event, charm.ActionEvent)
+                    if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at:
+                        # Present the welcome message and run under PDB.
+                        self._show_debug_code_message()
+                        pdb.runcall(custom_handler, event)
+                    else:
+                        # Regular call to the registered method.
+                        custom_handler(event)
+
+            if event.deferred:
+                deferred = True
+            else:
+                self._storage.drop_notice(event_path, observer_path, method_name)
+            # We intentionally consider this event to be dead and reload it from
+            # scratch in the next path.
+            self.framework._forget(event)
+
+        if not deferred:
+            self._storage.drop_snapshot(last_event_path)
+
+    def _show_debug_code_message(self):
+        """Present the welcome message (only once!) when using debugger functionality."""
+        if not self._breakpoint_welcomed:
+            self._breakpoint_welcomed = True
+            print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='')
+
+    def breakpoint(self, name=None):
+        """Add breakpoint, optionally named, at the place where this method is called.
+
+        For the breakpoint to be activated the JUJU_DEBUG_AT environment variable
+        must be set to "all" or to the specific name parameter provided, if any. In every
+        other situation calling this method does nothing.
+
+        The framework also provides a standard breakpoint named "hook", that will
+        stop execution when a hook event is about to be handled.
+
+        For those reasons, the "all" and "hook" breakpoint names are reserved.
+        """
+        # If given, validate the name comply with all the rules
+        if name is not None:
+            if not isinstance(name, str):
+                raise TypeError('breakpoint names must be strings')
+            if name in ('hook', 'all'):
+                raise ValueError('breakpoint names "all" and "hook" are reserved')
+            if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name):
+                raise ValueError('breakpoint names must look like "foo" or "foo-bar"')
+
+        indicated_breakpoints = self._juju_debug_at
+        if 'all' in indicated_breakpoints or name in indicated_breakpoints:
+            self._show_debug_code_message()
+
+            # If we call set_trace() directly it will open the debugger *here*, so indicating
+            # it to use our caller's frame
+            code_frame = inspect.currentframe().f_back
+            pdb.Pdb().set_trace(code_frame)
+
+
+class StoredStateData(Object):
+
+    def __init__(self, parent, attr_name):
+        super().__init__(parent, attr_name)
+        self._cache = {}
+        self.dirty = False
+
+    def __getitem__(self, key):
+        return self._cache.get(key)
+
+    def __setitem__(self, key, value):
+        self._cache[key] = value
+        self.dirty = True
+
+    def __contains__(self, key):
+        return key in self._cache
+
+    def snapshot(self):
+        return self._cache
+
+    def restore(self, snapshot):
+        self._cache = snapshot
+        self.dirty = False
+
+    def on_commit(self, event):
+        if self.dirty:
+            self.framework.save_snapshot(self)
+            self.dirty = False
+
+
+class BoundStoredState:
+
+    def __init__(self, parent, attr_name):
+        parent.framework.register_type(StoredStateData, parent)
+
+        handle = Handle(parent, StoredStateData.handle_kind, attr_name)
+        try:
+            data = parent.framework.load_snapshot(handle)
+        except NoSnapshotError:
+            data = StoredStateData(parent, attr_name)
+
+        # __dict__ is used to avoid infinite recursion.
+        self.__dict__["_data"] = data
+        self.__dict__["_attr_name"] = attr_name
+
+        parent.framework.observe(parent.framework.on.commit, self._data)
+
+    def __getattr__(self, key):
+        # "on" is the only reserved key that can't be used in the data map.
+        if key == "on":
+            return self._data.on
+        if key not in self._data:
+            raise AttributeError("attribute '{}' is not stored".format(key))
+        return _wrap_stored(self._data, self._data[key])
+
+    def __setattr__(self, key, value):
+        if key == "on":
+            raise AttributeError("attribute 'on' is reserved and cannot be set")
+
+        value = _unwrap_stored(self._data, value)
+
+        if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)):
+            raise AttributeError(
+                'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format(
+                    key, type(value).__name__))
+
+        self._data[key] = _unwrap_stored(self._data, value)
+
+    def set_default(self, **kwargs):
+        """"Set the value of any given key if it has not already been set"""
+        for k, v in kwargs.items():
+            if k not in self._data:
+                self._data[k] = v
+
+
+class StoredState:
+    """A class used to store data the charm needs persisted across invocations.
+
+    Example::
+
+        class MyClass(Object):
+            _stored = StoredState()
+
+    Instances of `MyClass` can transparently save state between invocations by
+    setting attributes on `_stored`. Initial state should be set with
+    `set_default` on the bound object, that is::
+
+        class MyClass(Object):
+            _stored = StoredState()
+
+        def __init__(self, parent, key):
+            super().__init__(parent, key)
+            self._stored.set_default(seen=set())
+            self.framework.observe(self.on.seen, self._on_seen)
+
+        def _on_seen(self, event):
+            self._stored.seen.add(event.uuid)
+
+    """
+
+    def __init__(self):
+        self.parent_type = None
+        self.attr_name = None
+
+    def __get__(self, parent, parent_type=None):
+        if self.parent_type is not None and self.parent_type not in parent_type.mro():
+            # the StoredState instance is being shared between two unrelated classes
+            # -> unclear what is exepcted of us -> bail out
+            raise RuntimeError(
+                'StoredState shared by {} and {}'.format(
+                    self.parent_type.__name__, parent_type.__name__))
+
+        if parent is None:
+            # accessing via the class directly (e.g. MyClass.stored)
+            return self
+
+        bound = None
+        if self.attr_name is not None:
+            bound = parent.__dict__.get(self.attr_name)
+            if bound is not None:
+                # we already have the thing from a previous pass, huzzah
+                return bound
+
+        # need to find ourselves amongst the parent's bases
+        for cls in parent_type.mro():
+            for attr_name, attr_value in cls.__dict__.items():
+                if attr_value is not self:
+                    continue
+                # we've found ourselves! is it the first time?
+                if bound is not None:
+                    # the StoredState instance is being stored in two different
+                    # attributes -> unclear what is expected of us -> bail out
+                    raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format(
+                        cls.__name__, self.attr_name, attr_name))
+                # we've found ourselves for the first time; save where, and bind the object
+                self.attr_name = attr_name
+                self.parent_type = cls
+                bound = BoundStoredState(parent, attr_name)
+
+        if bound is not None:
+            # cache the bound object to avoid the expensive lookup the next time
+            # (don't use setattr, to keep things symmetric with the fast-path lookup above)
+            parent.__dict__[self.attr_name] = bound
+            return bound
+
+        raise AttributeError(
+            'cannot find {} attribute in type {}'.format(
+                self.__class__.__name__, parent_type.__name__))
+
+
+def _wrap_stored(parent_data, value):
+    t = type(value)
+    if t is dict:
+        return StoredDict(parent_data, value)
+    if t is list:
+        return StoredList(parent_data, value)
+    if t is set:
+        return StoredSet(parent_data, value)
+    return value
+
+
+def _unwrap_stored(parent_data, value):
+    t = type(value)
+    if t is StoredDict or t is StoredList or t is StoredSet:
+        return value._under
+    return value
+
+
+class StoredDict(collections.abc.MutableMapping):
+
+    def __init__(self, stored_data, under):
+        self._stored_data = stored_data
+        self._under = under
+
+    def __getitem__(self, key):
+        return _wrap_stored(self._stored_data, self._under[key])
+
+    def __setitem__(self, key, value):
+        self._under[key] = _unwrap_stored(self._stored_data, value)
+        self._stored_data.dirty = True
+
+    def __delitem__(self, key):
+        del self._under[key]
+        self._stored_data.dirty = True
+
+    def __iter__(self):
+        return self._under.__iter__()
+
+    def __len__(self):
+        return len(self._under)
+
+    def __eq__(self, other):
+        if isinstance(other, StoredDict):
+            return self._under == other._under
+        elif isinstance(other, collections.abc.Mapping):
+            return self._under == other
+        else:
+            return NotImplemented
+
+
+class StoredList(collections.abc.MutableSequence):
+
+    def __init__(self, stored_data, under):
+        self._stored_data = stored_data
+        self._under = under
+
+    def __getitem__(self, index):
+        return _wrap_stored(self._stored_data, self._under[index])
+
+    def __setitem__(self, index, value):
+        self._under[index] = _unwrap_stored(self._stored_data, value)
+        self._stored_data.dirty = True
+
+    def __delitem__(self, index):
+        del self._under[index]
+        self._stored_data.dirty = True
+
+    def __len__(self):
+        return len(self._under)
+
+    def insert(self, index, value):
+        self._under.insert(index, value)
+        self._stored_data.dirty = True
+
+    def append(self, value):
+        self._under.append(value)
+        self._stored_data.dirty = True
+
+    def __eq__(self, other):
+        if isinstance(other, StoredList):
+            return self._under == other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under == other
+        else:
+            return NotImplemented
+
+    def __lt__(self, other):
+        if isinstance(other, StoredList):
+            return self._under < other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under < other
+        else:
+            return NotImplemented
+
+    def __le__(self, other):
+        if isinstance(other, StoredList):
+            return self._under <= other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under <= other
+        else:
+            return NotImplemented
+
+    def __gt__(self, other):
+        if isinstance(other, StoredList):
+            return self._under > other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under > other
+        else:
+            return NotImplemented
+
+    def __ge__(self, other):
+        if isinstance(other, StoredList):
+            return self._under >= other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under >= other
+        else:
+            return NotImplemented
+
+
+class StoredSet(collections.abc.MutableSet):
+
+    def __init__(self, stored_data, under):
+        self._stored_data = stored_data
+        self._under = under
+
+    def add(self, key):
+        self._under.add(key)
+        self._stored_data.dirty = True
+
+    def discard(self, key):
+        self._under.discard(key)
+        self._stored_data.dirty = True
+
+    def __contains__(self, key):
+        return key in self._under
+
+    def __iter__(self):
+        return self._under.__iter__()
+
+    def __len__(self):
+        return len(self._under)
+
+    @classmethod
+    def _from_iterable(cls, it):
+        """Construct an instance of the class from any iterable input.
+
+        Per https://docs.python.org/3/library/collections.abc.html
+        if the Set mixin is being used in a class with a different constructor signature,
+        you will need to override _from_iterable() with a classmethod that can construct
+        new instances from an iterable argument.
+        """
+        return set(it)
+
+    def __le__(self, other):
+        if isinstance(other, StoredSet):
+            return self._under <= other._under
+        elif isinstance(other, collections.abc.Set):
+            return self._under <= other
+        else:
+            return NotImplemented
+
+    def __ge__(self, other):
+        if isinstance(other, StoredSet):
+            return self._under >= other._under
+        elif isinstance(other, collections.abc.Set):
+            return self._under >= other
+        else:
+            return NotImplemented
+
+    def __eq__(self, other):
+        if isinstance(other, StoredSet):
+            return self._under == other._under
+        elif isinstance(other, collections.abc.Set):
+            return self._under == other
+        else:
+            return NotImplemented
diff --git a/magma/hackfest_squid_cnf/charms/squid/lib/ops/jujuversion.py b/magma/hackfest_squid_cnf/charms/squid/lib/ops/jujuversion.py
new file mode 100755
index 0000000000000000000000000000000000000000..4517886218c143f8c0249ac7285dc594976f9b01
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/lib/ops/jujuversion.py
@@ -0,0 +1,85 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from functools import total_ordering
+
+
+@total_ordering
+class JujuVersion:
+
+    PATTERN = r'''^
+    (?P<major>\d{1,9})\.(?P<minor>\d{1,9})       # <major> and <minor> numbers are always there
+    ((?:\.|-(?P<tag>[a-z]+))(?P<patch>\d{1,9}))? # sometimes with .<patch> or -<tag><patch>
+    (\.(?P<build>\d{1,9}))?$                     # and sometimes with a <build> number.
+    '''
+
+    def __init__(self, version):
+        m = re.match(self.PATTERN, version, re.VERBOSE)
+        if not m:
+            raise RuntimeError('"{}" is not a valid Juju version string'.format(version))
+
+        d = m.groupdict()
+        self.major = int(m.group('major'))
+        self.minor = int(m.group('minor'))
+        self.tag = d['tag'] or ''
+        self.patch = int(d['patch'] or 0)
+        self.build = int(d['build'] or 0)
+
+    def __repr__(self):
+        if self.tag:
+            s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch)
+        else:
+            s = '{}.{}.{}'.format(self.major, self.minor, self.patch)
+        if self.build > 0:
+            s += '.{}'.format(self.build)
+        return s
+
+    def __eq__(self, other):
+        if self is other:
+            return True
+        if isinstance(other, str):
+            other = type(self)(other)
+        elif not isinstance(other, JujuVersion):
+            raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+        return (
+            self.major == other.major
+            and self.minor == other.minor
+            and self.tag == other.tag
+            and self.build == other.build
+            and self.patch == other.patch)
+
+    def __lt__(self, other):
+        if self is other:
+            return False
+        if isinstance(other, str):
+            other = type(self)(other)
+        elif not isinstance(other, JujuVersion):
+            raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+
+        if self.major != other.major:
+            return self.major < other.major
+        elif self.minor != other.minor:
+            return self.minor < other.minor
+        elif self.tag != other.tag:
+            if not self.tag:
+                return False
+            elif not other.tag:
+                return True
+            return self.tag < other.tag
+        elif self.patch != other.patch:
+            return self.patch < other.patch
+        elif self.build != other.build:
+            return self.build < other.build
+        return False
diff --git a/magma/hackfest_squid_cnf/charms/squid/lib/ops/log.py b/magma/hackfest_squid_cnf/charms/squid/lib/ops/log.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3f76a375a98e23c718e47bcde5c33b49f4031c7
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/lib/ops/log.py
@@ -0,0 +1,47 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+
+class JujuLogHandler(logging.Handler):
+    """A handler for sending logs to Juju via juju-log."""
+
+    def __init__(self, model_backend, level=logging.DEBUG):
+        super().__init__(level)
+        self.model_backend = model_backend
+
+    def emit(self, record):
+        self.model_backend.juju_log(record.levelname, self.format(record))
+
+
+def setup_root_logging(model_backend, debug=False):
+    """Setup python logging to forward messages to juju-log.
+
+    By default, logging is set to DEBUG level, and messages will be filtered by Juju.
+    Charmers can also set their own default log level with::
+
+      logging.getLogger().setLevel(logging.INFO)
+
+    model_backend -- a ModelBackend to use for juju-log
+    debug -- if True, write logs to stderr as well as to juju-log.
+    """
+    logger = logging.getLogger()
+    logger.setLevel(logging.DEBUG)
+    logger.addHandler(JujuLogHandler(model_backend))
+    if debug:
+        handler = logging.StreamHandler()
+        formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
+        handler.setFormatter(formatter)
+        logger.addHandler(handler)
diff --git a/magma/hackfest_squid_cnf/charms/squid/lib/ops/main.py b/magma/hackfest_squid_cnf/charms/squid/lib/ops/main.py
new file mode 100755
index 0000000000000000000000000000000000000000..0f5391d76e45ba32dc652adfc99b2c7716d8af36
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/lib/ops/main.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python3
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+import sys
+from pathlib import Path
+
+import yaml
+
+import ops.charm
+import ops.framework
+import ops.model
+import logging
+
+from ops.log import setup_root_logging
+
+CHARM_STATE_FILE = '.unit-state.db'
+
+
+logger = logging.getLogger()
+
+
+def _get_charm_dir():
+    charm_dir = os.environ.get("JUJU_CHARM_DIR")
+    if charm_dir is None:
+        # Assume $JUJU_CHARM_DIR/lib/op/main.py structure.
+        charm_dir = Path('{}/../../..'.format(__file__)).resolve()
+    else:
+        charm_dir = Path(charm_dir).resolve()
+    return charm_dir
+
+
+def _load_metadata(charm_dir):
+    metadata = yaml.safe_load((charm_dir / 'metadata.yaml').read_text())
+
+    actions_meta = charm_dir / 'actions.yaml'
+    if actions_meta.exists():
+        actions_metadata = yaml.safe_load(actions_meta.read_text())
+    else:
+        actions_metadata = {}
+    return metadata, actions_metadata
+
+
+def _create_event_link(charm, bound_event):
+    """Create a symlink for a particular event.
+
+    charm -- A charm object.
+    bound_event -- An event for which to create a symlink.
+    """
+    if issubclass(bound_event.event_type, ops.charm.HookEvent):
+        event_dir = charm.framework.charm_dir / 'hooks'
+        event_path = event_dir / bound_event.event_kind.replace('_', '-')
+    elif issubclass(bound_event.event_type, ops.charm.ActionEvent):
+        if not bound_event.event_kind.endswith("_action"):
+            raise RuntimeError(
+                'action event name {} needs _action suffix'.format(bound_event.event_kind))
+        event_dir = charm.framework.charm_dir / 'actions'
+        # The event_kind is suffixed with "_action" while the executable is not.
+        event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-')
+    else:
+        raise RuntimeError(
+            'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type))
+
+    event_dir.mkdir(exist_ok=True)
+    if not event_path.exists():
+        # CPython has different implementations for populating sys.argv[0] for Linux and Windows.
+        # For Windows it is always an absolute path (any symlinks are resolved)
+        # while for Linux it can be a relative path.
+        target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir))
+
+        # Ignore the non-symlink files or directories
+        # assuming the charm author knows what they are doing.
+        logger.debug(
+            'Creating a new relative symlink at %s pointing to %s',
+            event_path, target_path)
+        event_path.symlink_to(target_path)
+
+
+def _setup_event_links(charm_dir, charm):
+    """Set up links for supported events that originate from Juju.
+
+    Whether a charm can handle an event or not can be determined by
+    introspecting which events are defined on it.
+
+    Hooks or actions are created as symlinks to the charm code file
+    which is determined by inspecting symlinks provided by the charm
+    author at hooks/install or hooks/start.
+
+    charm_dir -- A root directory of the charm.
+    charm -- An instance of the Charm class.
+
+    """
+    for bound_event in charm.on.events().values():
+        # Only events that originate from Juju need symlinks.
+        if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)):
+            _create_event_link(charm, bound_event)
+
+
+def _emit_charm_event(charm, event_name):
+    """Emits a charm event based on a Juju event name.
+
+    charm -- A charm instance to emit an event from.
+    event_name -- A Juju event name to emit on a charm.
+    """
+    event_to_emit = None
+    try:
+        event_to_emit = getattr(charm.on, event_name)
+    except AttributeError:
+        logger.debug("event %s not defined for %s", event_name, charm)
+
+    # If the event is not supported by the charm implementation, do
+    # not error out or try to emit it. This is to support rollbacks.
+    if event_to_emit is not None:
+        args, kwargs = _get_event_args(charm, event_to_emit)
+        logger.debug('Emitting Juju event %s', event_name)
+        event_to_emit.emit(*args, **kwargs)
+
+
+def _get_event_args(charm, bound_event):
+    event_type = bound_event.event_type
+    model = charm.framework.model
+
+    if issubclass(event_type, ops.charm.RelationEvent):
+        relation_name = os.environ['JUJU_RELATION']
+        relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
+        relation = model.get_relation(relation_name, relation_id)
+    else:
+        relation = None
+
+    remote_app_name = os.environ.get('JUJU_REMOTE_APP', '')
+    remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '')
+    if remote_app_name or remote_unit_name:
+        if not remote_app_name:
+            if '/' not in remote_unit_name:
+                raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name))
+            remote_app_name = remote_unit_name.split('/')[0]
+        args = [relation, model.get_app(remote_app_name)]
+        if remote_unit_name:
+            args.append(model.get_unit(remote_unit_name))
+        return args, {}
+    elif relation:
+        return [relation], {}
+    return [], {}
+
+
+def main(charm_class):
+    """Setup the charm and dispatch the observed event.
+
+    The event name is based on the way this executable was called (argv[0]).
+    """
+    charm_dir = _get_charm_dir()
+
+    model_backend = ops.model.ModelBackend()
+    debug = ('JUJU_DEBUG' in os.environ)
+    setup_root_logging(model_backend, debug=debug)
+
+    # Process the Juju event relevant to the current hook execution
+    # JUJU_HOOK_NAME, JUJU_FUNCTION_NAME, and JUJU_ACTION_NAME are not used
+    # in order to support simulation of events from debugging sessions.
+    #
+    # TODO: For Windows, when symlinks are used, this is not a valid
+    #       method of getting an event name (see LP: #1854505).
+    juju_exec_path = Path(sys.argv[0])
+    has_dispatch = juju_exec_path.name == 'dispatch'
+    if has_dispatch:
+        # The executable was 'dispatch', which means the actual hook we want to
+        # run needs to be looked up in the JUJU_DISPATCH_PATH env var, where it
+        # should be a path relative to the charm directory (the directory that
+        # holds `dispatch`). If that path actually exists, we want to run that
+        # before continuing.
+        dispatch_path = juju_exec_path.parent / Path(os.environ['JUJU_DISPATCH_PATH'])
+        if dispatch_path.exists() and dispatch_path.resolve() != juju_exec_path.resolve():
+            argv = sys.argv.copy()
+            argv[0] = str(dispatch_path)
+            try:
+                subprocess.run(argv, check=True)
+            except subprocess.CalledProcessError as e:
+                logger.warning("hook %s exited with status %d", dispatch_path, e.returncode)
+                sys.exit(e.returncode)
+        juju_exec_path = dispatch_path
+    juju_event_name = juju_exec_path.name.replace('-', '_')
+    if juju_exec_path.parent.name == 'actions':
+        juju_event_name = '{}_action'.format(juju_event_name)
+
+    metadata, actions_metadata = _load_metadata(charm_dir)
+    meta = ops.charm.CharmMeta(metadata, actions_metadata)
+    unit_name = os.environ['JUJU_UNIT_NAME']
+    model = ops.model.Model(unit_name, meta, model_backend)
+
+    # TODO: If Juju unit agent crashes after exit(0) from the charm code
+    # the framework will commit the snapshot but Juju will not commit its
+    # operation.
+    charm_state_path = charm_dir / CHARM_STATE_FILE
+    framework = ops.framework.Framework(charm_state_path, charm_dir, meta, model)
+    try:
+        charm = charm_class(framework, None)
+
+        if not has_dispatch:
+            # When a charm is force-upgraded and a unit is in an error state Juju
+            # does not run upgrade-charm and instead runs the failed hook followed
+            # by config-changed. Given the nature of force-upgrading the hook setup
+            # code is not triggered on config-changed.
+            #
+            # 'start' event is included as Juju does not fire the install event for
+            # K8s charms (see LP: #1854635).
+            if (juju_event_name in ('install', 'start', 'upgrade_charm')
+                    or juju_event_name.endswith('_storage_attached')):
+                _setup_event_links(charm_dir, charm)
+
+        # TODO: Remove the collect_metrics check below as soon as the relevant
+        #       Juju changes are made.
+        #
+        # Skip reemission of deferred events for collect-metrics events because
+        # they do not have the full access to all hook tools.
+        if juju_event_name != 'collect_metrics':
+            framework.reemit()
+
+        _emit_charm_event(charm, juju_event_name)
+
+        framework.commit()
+    finally:
+        framework.close()
diff --git a/magma/hackfest_squid_cnf/charms/squid/lib/ops/model.py b/magma/hackfest_squid_cnf/charms/squid/lib/ops/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d19ba8bff166aa0e3c0acc7dd4fe4b42690918c
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/lib/ops/model.py
@@ -0,0 +1,915 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import weakref
+import os
+import shutil
+import tempfile
+import time
+import datetime
+import re
+import ipaddress
+import decimal
+
+from abc import ABC, abstractmethod
+from collections.abc import Mapping, MutableMapping
+from pathlib import Path
+from subprocess import run, PIPE, CalledProcessError
+
+
+class Model:
+
+    def __init__(self, unit_name, meta, backend):
+        self._cache = _ModelCache(backend)
+        self._backend = backend
+        self.unit = self.get_unit(unit_name)
+        self.app = self.unit.app
+        self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache)
+        self.config = ConfigData(self._backend)
+        self.resources = Resources(list(meta.resources), self._backend)
+        self.pod = Pod(self._backend)
+        self.storages = StorageMapping(list(meta.storages), self._backend)
+        self._bindings = BindingMapping(self._backend)
+
+    def get_unit(self, unit_name):
+        return self._cache.get(Unit, unit_name)
+
+    def get_app(self, app_name):
+        return self._cache.get(Application, app_name)
+
+    def get_relation(self, relation_name, relation_id=None):
+        """Get a specific Relation instance.
+
+        If relation_id is given, this will return that Relation instance.
+
+        If relation_id is not given, this will return the Relation instance if the
+        relation is established only once or None if it is not established. If this
+        same relation is established multiple times the error TooManyRelatedAppsError is raised.
+        """
+        return self.relations._get_unique(relation_name, relation_id)
+
+    def get_binding(self, binding_key):
+        """Get a network space binding.
+
+        binding_key -- The relation name or instance to obtain bindings for.
+
+        If binding_key is a relation name, the method returns the default binding for that
+        relation. If a relation instance is provided, the method first looks up a more specific
+        binding for that specific relation ID, and if none is found falls back to the default
+        binding for the relation name.
+        """
+        return self._bindings.get(binding_key)
+
+
+class _ModelCache:
+
+    def __init__(self, backend):
+        self._backend = backend
+        self._weakrefs = weakref.WeakValueDictionary()
+
+    def get(self, entity_type, *args):
+        key = (entity_type,) + args
+        entity = self._weakrefs.get(key)
+        if entity is None:
+            entity = entity_type(*args, backend=self._backend, cache=self)
+            self._weakrefs[key] = entity
+        return entity
+
+
+class Application:
+
+    def __init__(self, name, backend, cache):
+        self.name = name
+        self._backend = backend
+        self._cache = cache
+        self._is_our_app = self.name == self._backend.app_name
+        self._status = None
+
+    @property
+    def status(self):
+        if not self._is_our_app:
+            return UnknownStatus()
+
+        if not self._backend.is_leader():
+            raise RuntimeError('cannot get application status as a non-leader unit')
+
+        if self._status:
+            return self._status
+
+        s = self._backend.status_get(is_app=True)
+        self._status = StatusBase.from_name(s['status'], s['message'])
+        return self._status
+
+    @status.setter
+    def status(self, value):
+        if not isinstance(value, StatusBase):
+            raise InvalidStatusError(
+                'invalid value provided for application {} status: {}'.format(self, value)
+            )
+
+        if not self._is_our_app:
+            raise RuntimeError('cannot to set status for a remote application {}'.format(self))
+
+        if not self._backend.is_leader():
+            raise RuntimeError('cannot set application status as a non-leader unit')
+
+        self._backend.status_set(value.name, value.message, is_app=True)
+        self._status = value
+
+    def __repr__(self):
+        return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+
+class Unit:
+
+    def __init__(self, name, backend, cache):
+        self.name = name
+
+        app_name = name.split('/')[0]
+        self.app = cache.get(Application, app_name)
+
+        self._backend = backend
+        self._cache = cache
+        self._is_our_unit = self.name == self._backend.unit_name
+        self._status = None
+
+    @property
+    def status(self):
+        if not self._is_our_unit:
+            return UnknownStatus()
+
+        if self._status:
+            return self._status
+
+        s = self._backend.status_get(is_app=False)
+        self._status = StatusBase.from_name(s['status'], s['message'])
+        return self._status
+
+    @status.setter
+    def status(self, value):
+        if not isinstance(value, StatusBase):
+            raise InvalidStatusError(
+                'invalid value provided for unit {} status: {}'.format(self, value)
+            )
+
+        if not self._is_our_unit:
+            raise RuntimeError('cannot set status for a remote unit {}'.format(self))
+
+        self._backend.status_set(value.name, value.message, is_app=False)
+        self._status = value
+
+    def __repr__(self):
+        return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+    def is_leader(self):
+        if self._is_our_unit:
+            # This value is not cached as it is not guaranteed to persist for the whole duration
+            # of a hook execution.
+            return self._backend.is_leader()
+        else:
+            raise RuntimeError(
+                'cannot determine leadership status for remote applications: {}'.format(self)
+            )
+
+    def set_workload_version(self, version):
+        """Record the version of the software running as the workload.
+
+        This shouldn't be confused with the revision of the charm. This is informative only;
+        shown in the output of 'juju status'.
+        """
+        if not isinstance(version, str):
+            raise TypeError("workload version must be a str, not {}: {!r}".format(
+                type(version).__name__, version))
+        self._backend.application_version_set(version)
+
+
+class LazyMapping(Mapping, ABC):
+
+    _lazy_data = None
+
+    @abstractmethod
+    def _load(self):
+        raise NotImplementedError()
+
+    @property
+    def _data(self):
+        data = self._lazy_data
+        if data is None:
+            data = self._lazy_data = self._load()
+        return data
+
+    def _invalidate(self):
+        self._lazy_data = None
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, key):
+        return self._data[key]
+
+
+class RelationMapping(Mapping):
+    """Map of relation names to lists of Relation instances."""
+
+    def __init__(self, relations_meta, our_unit, backend, cache):
+        self._peers = set()
+        for name, relation_meta in relations_meta.items():
+            if relation_meta.role == 'peers':
+                self._peers.add(name)
+        self._our_unit = our_unit
+        self._backend = backend
+        self._cache = cache
+        self._data = {relation_name: None for relation_name in relations_meta}
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, relation_name):
+        is_peer = relation_name in self._peers
+        relation_list = self._data[relation_name]
+        if relation_list is None:
+            relation_list = self._data[relation_name] = []
+            for rid in self._backend.relation_ids(relation_name):
+                relation = Relation(relation_name, rid, is_peer,
+                                    self._our_unit, self._backend, self._cache)
+                relation_list.append(relation)
+        return relation_list
+
+    def _invalidate(self, relation_name):
+        self._data[relation_name] = None
+
+    def _get_unique(self, relation_name, relation_id=None):
+        if relation_id is not None:
+            if not isinstance(relation_id, int):
+                raise ModelError('relation id {} must be int or None not {}'.format(
+                    relation_id,
+                    type(relation_id).__name__))
+            for relation in self[relation_name]:
+                if relation.id == relation_id:
+                    return relation
+            else:
+                # The relation may be dead, but it is not forgotten.
+                is_peer = relation_name in self._peers
+                return Relation(relation_name, relation_id, is_peer,
+                                self._our_unit, self._backend, self._cache)
+        num_related = len(self[relation_name])
+        if num_related == 0:
+            return None
+        elif num_related == 1:
+            return self[relation_name][0]
+        else:
+            # TODO: We need something in the framework to catch and gracefully handle
+            # errors, ideally integrating the error catching with Juju's mechanisms.
+            raise TooManyRelatedAppsError(relation_name, num_related, 1)
+
+
+class BindingMapping:
+
+    def __init__(self, backend):
+        self._backend = backend
+        self._data = {}
+
+    def get(self, binding_key):
+        if isinstance(binding_key, Relation):
+            binding_name = binding_key.name
+            relation_id = binding_key.id
+        elif isinstance(binding_key, str):
+            binding_name = binding_key
+            relation_id = None
+        else:
+            raise ModelError('binding key must be str or relation instance, not {}'
+                             ''.format(type(binding_key).__name__))
+        binding = self._data.get(binding_key)
+        if binding is None:
+            binding = Binding(binding_name, relation_id, self._backend)
+            self._data[binding_key] = binding
+        return binding
+
+
+class Binding:
+    """Binding to a network space."""
+
+    def __init__(self, name, relation_id, backend):
+        self.name = name
+        self._relation_id = relation_id
+        self._backend = backend
+        self._network = None
+
+    @property
+    def network(self):
+        if self._network is None:
+            try:
+                self._network = Network(self._backend.network_get(self.name, self._relation_id))
+            except RelationNotFoundError:
+                if self._relation_id is None:
+                    raise
+                # If a relation is dead, we can still get network info associated with an
+                # endpoint itself
+                self._network = Network(self._backend.network_get(self.name))
+        return self._network
+
+
+class Network:
+    """Network space details."""
+
+    def __init__(self, network_info):
+        self.interfaces = []
+        # Treat multiple addresses on an interface as multiple logical
+        # interfaces with the same name.
+        for interface_info in network_info['bind-addresses']:
+            interface_name = interface_info['interface-name']
+            for address_info in interface_info['addresses']:
+                self.interfaces.append(NetworkInterface(interface_name, address_info))
+        self.ingress_addresses = []
+        for address in network_info['ingress-addresses']:
+            self.ingress_addresses.append(ipaddress.ip_address(address))
+        self.egress_subnets = []
+        for subnet in network_info['egress-subnets']:
+            self.egress_subnets.append(ipaddress.ip_network(subnet))
+
+    @property
+    def bind_address(self):
+        return self.interfaces[0].address
+
+    @property
+    def ingress_address(self):
+        return self.ingress_addresses[0]
+
+
+class NetworkInterface:
+
+    def __init__(self, name, address_info):
+        self.name = name
+        # TODO: expose a hardware address here, see LP: #1864070.
+        self.address = ipaddress.ip_address(address_info['value'])
+        cidr = address_info['cidr']
+        if not cidr:
+            # The cidr field may be empty, see LP: #1864102.
+            # In this case, make it a /32 or /128 IP network.
+            self.subnet = ipaddress.ip_network(address_info['value'])
+        else:
+            self.subnet = ipaddress.ip_network(cidr)
+        # TODO: expose a hostname/canonical name for the address here, see LP: #1864086.
+
+
+class Relation:
+    def __init__(self, relation_name, relation_id, is_peer, our_unit, backend, cache):
+        self.name = relation_name
+        self.id = relation_id
+        self.app = None
+        self.units = set()
+
+        # For peer relations, both the remote and the local app are the same.
+        if is_peer:
+            self.app = our_unit.app
+        try:
+            for unit_name in backend.relation_list(self.id):
+                unit = cache.get(Unit, unit_name)
+                self.units.add(unit)
+                if self.app is None:
+                    self.app = unit.app
+        except RelationNotFoundError:
+            # If the relation is dead, just treat it as if it has no remote units.
+            pass
+        self.data = RelationData(self, our_unit, backend)
+
+    def __repr__(self):
+        return '<{}.{} {}:{}>'.format(type(self).__module__,
+                                      type(self).__name__,
+                                      self.name,
+                                      self.id)
+
+
+class RelationData(Mapping):
+    def __init__(self, relation, our_unit, backend):
+        self.relation = weakref.proxy(relation)
+        self._data = {
+            our_unit: RelationDataContent(self.relation, our_unit, backend),
+            our_unit.app: RelationDataContent(self.relation, our_unit.app, backend),
+        }
+        self._data.update({
+            unit: RelationDataContent(self.relation, unit, backend)
+            for unit in self.relation.units})
+        # The relation might be dead so avoid a None key here.
+        if self.relation.app is not None:
+            self._data.update({
+                self.relation.app: RelationDataContent(self.relation, self.relation.app, backend),
+            })
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, key):
+        return self._data[key]
+
+
+# We mix in MutableMapping here to get some convenience implementations, but whether it's actually
+# mutable or not is controlled by the flag.
+class RelationDataContent(LazyMapping, MutableMapping):
+
+    def __init__(self, relation, entity, backend):
+        self.relation = relation
+        self._entity = entity
+        self._backend = backend
+        self._is_app = isinstance(entity, Application)
+
+    def _load(self):
+        try:
+            return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app)
+        except RelationNotFoundError:
+            # Dead relations tell no tales (and have no data).
+            return {}
+
+    def _is_mutable(self):
+        if self._is_app:
+            is_our_app = self._backend.app_name == self._entity.name
+            if not is_our_app:
+                return False
+            # Whether the application data bag is mutable or not depends on
+            # whether this unit is a leader or not, but this is not guaranteed
+            # to be always true during the same hook execution.
+            return self._backend.is_leader()
+        else:
+            is_our_unit = self._backend.unit_name == self._entity.name
+            if is_our_unit:
+                return True
+        return False
+
+    def __setitem__(self, key, value):
+        if not self._is_mutable():
+            raise RelationDataError('cannot set relation data for {}'.format(self._entity.name))
+        if not isinstance(value, str):
+            raise RelationDataError('relation data values must be strings')
+
+        self._backend.relation_set(self.relation.id, key, value, self._is_app)
+
+        # Don't load data unnecessarily if we're only updating.
+        if self._lazy_data is not None:
+            if value == '':
+                # Match the behavior of Juju, which is that setting the value to an
+                # empty string will remove the key entirely from the relation data.
+                del self._data[key]
+            else:
+                self._data[key] = value
+
+    def __delitem__(self, key):
+        # Match the behavior of Juju, which is that setting the value to an empty
+        # string will remove the key entirely from the relation data.
+        self.__setitem__(key, '')
+
+
+class ConfigData(LazyMapping):
+
+    def __init__(self, backend):
+        self._backend = backend
+
+    def _load(self):
+        return self._backend.config_get()
+
+
+class StatusBase:
+    """Status values specific to applications and units."""
+
+    _statuses = {}
+
+    def __init__(self, message):
+        self.message = message
+
+    def __new__(cls, *args, **kwargs):
+        if cls is StatusBase:
+            raise TypeError("cannot instantiate a base class")
+        cls._statuses[cls.name] = cls
+        return super().__new__(cls)
+
+    @classmethod
+    def from_name(cls, name, message):
+        return cls._statuses[name](message)
+
+
+class ActiveStatus(StatusBase):
+    """The unit is ready.
+
+    The unit believes it is correctly offering all the services it has been asked to offer.
+    """
+    name = 'active'
+
+    def __init__(self, message=None):
+        super().__init__(message or '')
+
+
+class BlockedStatus(StatusBase):
+    """The unit requires manual intervention.
+
+    An operator has to manually intervene to unblock the unit and let it proceed.
+    """
+    name = 'blocked'
+
+
+class MaintenanceStatus(StatusBase):
+    """The unit is performing maintenance tasks.
+
+    The unit is not yet providing services, but is actively doing work in preparation
+    for providing those services.  This is a "spinning" state, not an error state. It
+    reflects activity on the unit itself, not on peers or related units.
+
+    """
+    name = 'maintenance'
+
+
+class UnknownStatus(StatusBase):
+    """The unit status is unknown.
+
+    A unit-agent has finished calling install, config-changed and start, but the
+    charm has not called status-set yet.
+
+    """
+    name = 'unknown'
+
+    def __init__(self):
+        # Unknown status cannot be set and does not have a message associated with it.
+        super().__init__('')
+
+
+class WaitingStatus(StatusBase):
+    """A unit is unable to progress.
+
+    The unit is unable to progress to an active state because an application to which
+    it is related is not running.
+
+    """
+    name = 'waiting'
+
+
+class Resources:
+    """Object representing resources for the charm.
+    """
+
+    def __init__(self, names, backend):
+        self._backend = backend
+        self._paths = {name: None for name in names}
+
+    def fetch(self, name):
+        """Fetch the resource from the controller or store.
+
+        If successfully fetched, this returns a Path object to where the resource is stored
+        on disk, otherwise it raises a ModelError.
+        """
+        if name not in self._paths:
+            raise RuntimeError('invalid resource name: {}'.format(name))
+        if self._paths[name] is None:
+            self._paths[name] = Path(self._backend.resource_get(name))
+        return self._paths[name]
+
+
+class Pod:
+    def __init__(self, backend):
+        self._backend = backend
+
+    def set_spec(self, spec, k8s_resources=None):
+        if not self._backend.is_leader():
+            raise ModelError('cannot set a pod spec as this unit is not a leader')
+        self._backend.pod_spec_set(spec, k8s_resources)
+
+
+class StorageMapping(Mapping):
+    """Map of storage names to lists of Storage instances."""
+
+    def __init__(self, storage_names, backend):
+        self._backend = backend
+        self._storage_map = {storage_name: None for storage_name in storage_names}
+
+    def __contains__(self, key):
+        return key in self._storage_map
+
+    def __len__(self):
+        return len(self._storage_map)
+
+    def __iter__(self):
+        return iter(self._storage_map)
+
+    def __getitem__(self, storage_name):
+        storage_list = self._storage_map[storage_name]
+        if storage_list is None:
+            storage_list = self._storage_map[storage_name] = []
+            for storage_id in self._backend.storage_list(storage_name):
+                storage_list.append(Storage(storage_name, storage_id, self._backend))
+        return storage_list
+
+    def request(self, storage_name, count=1):
+        """Requests new storage instances of a given name.
+
+        Uses storage-add tool to request additional storage. Juju will notify the unit
+        via <storage-name>-storage-attached events when it becomes available.
+        """
+        if storage_name not in self._storage_map:
+            raise ModelError(('cannot add storage {!r}:'
+                              ' it is not present in the charm metadata').format(storage_name))
+        self._backend.storage_add(storage_name, count)
+
+
+class Storage:
+
+    def __init__(self, storage_name, storage_id, backend):
+        self.name = storage_name
+        self.id = storage_id
+        self._backend = backend
+        self._location = None
+
+    @property
+    def location(self):
+        if self._location is None:
+            raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location")
+            self._location = Path(raw)
+        return self._location
+
+
+class ModelError(Exception):
+    pass
+
+
+class TooManyRelatedAppsError(ModelError):
+    def __init__(self, relation_name, num_related, max_supported):
+        super().__init__('Too many remote applications on {} ({} > {})'.format(
+            relation_name, num_related, max_supported))
+        self.relation_name = relation_name
+        self.num_related = num_related
+        self.max_supported = max_supported
+
+
+class RelationDataError(ModelError):
+    pass
+
+
+class RelationNotFoundError(ModelError):
+    pass
+
+
+class InvalidStatusError(ModelError):
+    pass
+
+
+class ModelBackend:
+
+    LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30)
+
+    def __init__(self):
+        self.unit_name = os.environ['JUJU_UNIT_NAME']
+        self.app_name = self.unit_name.split('/')[0]
+
+        self._is_leader = None
+        self._leader_check_time = None
+
+    def _run(self, *args, return_output=False, use_json=False):
+        kwargs = dict(stdout=PIPE, stderr=PIPE)
+        if use_json:
+            args += ('--format=json',)
+        try:
+            result = run(args, check=True, **kwargs)
+        except CalledProcessError as e:
+            raise ModelError(e.stderr)
+        if return_output:
+            if result.stdout is None:
+                return ''
+            else:
+                text = result.stdout.decode('utf8')
+                if use_json:
+                    return json.loads(text)
+                else:
+                    return text
+
+    def relation_ids(self, relation_name):
+        relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True)
+        return [int(relation_id.split(':')[-1]) for relation_id in relation_ids]
+
+    def relation_list(self, relation_id):
+        try:
+            return self._run('relation-list', '-r', str(relation_id),
+                             return_output=True, use_json=True)
+        except ModelError as e:
+            if 'relation not found' in str(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def relation_get(self, relation_id, member_name, is_app):
+        if not isinstance(is_app, bool):
+            raise TypeError('is_app parameter to relation_get must be a boolean')
+
+        try:
+            return self._run('relation-get', '-r', str(relation_id),
+                             '-', member_name, '--app={}'.format(is_app),
+                             return_output=True, use_json=True)
+        except ModelError as e:
+            if 'relation not found' in str(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def relation_set(self, relation_id, key, value, is_app):
+        if not isinstance(is_app, bool):
+            raise TypeError('is_app parameter to relation_set must be a boolean')
+
+        try:
+            return self._run('relation-set', '-r', str(relation_id),
+                             '{}={}'.format(key, value), '--app={}'.format(is_app))
+        except ModelError as e:
+            if 'relation not found' in str(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def config_get(self):
+        return self._run('config-get', return_output=True, use_json=True)
+
+    def is_leader(self):
+        """Obtain the current leadership status for the unit the charm code is executing on.
+
+        The value is cached for the duration of a lease which is 30s in Juju.
+        """
+        now = time.monotonic()
+        if self._leader_check_time is None:
+            check = True
+        else:
+            time_since_check = datetime.timedelta(seconds=now - self._leader_check_time)
+            check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None)
+        if check:
+            # Current time MUST be saved before running is-leader to ensure the cache
+            # is only used inside the window that is-leader itself asserts.
+            self._leader_check_time = now
+            self._is_leader = self._run('is-leader', return_output=True, use_json=True)
+
+        return self._is_leader
+
+    def resource_get(self, resource_name):
+        return self._run('resource-get', resource_name, return_output=True).strip()
+
+    def pod_spec_set(self, spec, k8s_resources):
+        tmpdir = Path(tempfile.mkdtemp('-pod-spec-set'))
+        try:
+            spec_path = tmpdir / 'spec.json'
+            spec_path.write_text(json.dumps(spec))
+            args = ['--file', str(spec_path)]
+            if k8s_resources:
+                k8s_res_path = tmpdir / 'k8s-resources.json'
+                k8s_res_path.write_text(json.dumps(k8s_resources))
+                args.extend(['--k8s-resources', str(k8s_res_path)])
+            self._run('pod-spec-set', *args)
+        finally:
+            shutil.rmtree(str(tmpdir))
+
+    def status_get(self, *, is_app=False):
+        """Get a status of a unit or an application.
+
+        app -- A boolean indicating whether the status should be retrieved for a unit
+               or an application.
+        """
+        return self._run('status-get', '--include-data', '--application={}'.format(is_app))
+
+    def status_set(self, status, message='', *, is_app=False):
+        """Set a status of a unit or an application.
+
+        app -- A boolean indicating whether the status should be set for a unit or an
+               application.
+        """
+        if not isinstance(is_app, bool):
+            raise TypeError('is_app parameter must be boolean')
+        return self._run('status-set', '--application={}'.format(is_app), status, message)
+
+    def storage_list(self, name):
+        return [int(s.split('/')[1]) for s in self._run('storage-list', name,
+                                                        return_output=True, use_json=True)]
+
+    def storage_get(self, storage_name_id, attribute):
+        return self._run('storage-get', '-s', storage_name_id, attribute,
+                         return_output=True, use_json=True)
+
+    def storage_add(self, name, count=1):
+        if not isinstance(count, int) or isinstance(count, bool):
+            raise TypeError('storage count must be integer, got: {} ({})'.format(count,
+                                                                                 type(count)))
+        self._run('storage-add', '{}={}'.format(name, count))
+
+    def action_get(self):
+        return self._run('action-get', return_output=True, use_json=True)
+
+    def action_set(self, results):
+        self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()])
+
+    def action_log(self, message):
+        self._run('action-log', message)
+
+    def action_fail(self, message=''):
+        self._run('action-fail', message)
+
+    def application_version_set(self, version):
+        self._run('application-version-set', '--', version)
+
+    def juju_log(self, level, message):
+        self._run('juju-log', '--log-level', level, message)
+
+    def network_get(self, binding_name, relation_id=None):
+        """Return network info provided by network-get for a given binding.
+
+        binding_name -- A name of a binding (relation name or extra-binding name).
+        relation_id -- An optional relation id to get network info for.
+        """
+        cmd = ['network-get', binding_name]
+        if relation_id is not None:
+            cmd.extend(['-r', str(relation_id)])
+        try:
+            return self._run(*cmd, return_output=True, use_json=True)
+        except ModelError as e:
+            if 'relation not found' in str(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def add_metrics(self, metrics, labels=None):
+        cmd = ['add-metric']
+
+        if labels:
+            label_args = []
+            for k, v in labels.items():
+                _ModelBackendValidator.validate_metric_label(k)
+                _ModelBackendValidator.validate_label_value(k, v)
+                label_args.append('{}={}'.format(k, v))
+            cmd.extend(['--labels', ','.join(label_args)])
+
+        metric_args = []
+        for k, v in metrics.items():
+            _ModelBackendValidator.validate_metric_key(k)
+            metric_value = _ModelBackendValidator.format_metric_value(v)
+            metric_args.append('{}={}'.format(k, metric_value))
+        cmd.extend(metric_args)
+        self._run(*cmd)
+
+
+class _ModelBackendValidator:
+    """Provides facilities for validating inputs and formatting them for model backends."""
+
+    METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$')
+
+    @classmethod
+    def validate_metric_key(cls, key):
+        if cls.METRIC_KEY_REGEX.match(key) is None:
+            raise ModelError(
+                'invalid metric key {!r}: must match {}'.format(
+                    key, cls.METRIC_KEY_REGEX.pattern))
+
+    @classmethod
+    def validate_metric_label(cls, label_name):
+        if cls.METRIC_KEY_REGEX.match(label_name) is None:
+            raise ModelError(
+                'invalid metric label name {!r}: must match {}'.format(
+                    label_name, cls.METRIC_KEY_REGEX.pattern))
+
+    @classmethod
+    def format_metric_value(cls, value):
+        try:
+            decimal_value = decimal.Decimal.from_float(value)
+        except TypeError as e:
+            e2 = ModelError('invalid metric value {!r} provided:'
+                            ' must be a positive finite float'.format(value))
+            raise e2 from e
+        if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0:
+            raise ModelError('invalid metric value {!r} provided:'
+                             ' must be a positive finite float'.format(value))
+        return str(decimal_value)
+
+    @classmethod
+    def validate_label_value(cls, label, value):
+        # Label values cannot be empty, contain commas or equal signs as those are
+        # used by add-metric as separators.
+        if not value:
+            raise ModelError(
+                'metric label {} has an empty value, which is not allowed'.format(label))
+        v = str(value)
+        if re.search('[,=]', v) is not None:
+            raise ModelError(
+                'metric label values must not contain "," or "=": {}={!r}'.format(label, value))
diff --git a/magma/hackfest_squid_cnf/charms/squid/lib/ops/testing.py b/magma/hackfest_squid_cnf/charms/squid/lib/ops/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..72d840c4b10a2fbbdd53594c0752c63da3da14dd
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/lib/ops/testing.py
@@ -0,0 +1,477 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+import pathlib
+from textwrap import dedent
+import typing
+
+from ops import charm, framework, model
+
+
+# OptionalYAML is something like metadata.yaml or actions.yaml. You can
+# pass in a file-like object or the string directly.
+OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]]
+
+
+# noinspection PyProtectedMember
+class Harness:
+    """This class represents a way to build up the model that will drive a test suite.
+
+    The model that is created is from the viewpoint of the charm that you are testing.
+
+    Example::
+
+        harness = Harness(MyCharm)
+        # Do initial setup here
+        relation_id = harness.add_relation('db', 'postgresql')
+        # Now instantiate the charm to see events as the model changes
+        harness.begin()
+        harness.add_relation_unit(relation_id, 'postgresql/0')
+        harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+        # Check that charm has properly handled the relation_joined event for postgresql/0
+        self.assertEqual(harness.charm. ...)
+
+    Args:
+        charm_cls: The Charm class that you'll be testing.
+        meta: charm.CharmBase is a A string or file-like object containing the contents of
+            metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the
+            parent directory of the Charm, and if not found fall back to a trivial
+            'name: test-charm' metadata.
+        actions: A string or file-like object containing the contents of
+            actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the
+            parent directory of the Charm.
+    """
+
+    def __init__(
+            self,
+            charm_cls: typing.Type[charm.CharmBase],
+            *,
+            meta: OptionalYAML = None,
+            actions: OptionalYAML = None):
+        # TODO: jam 2020-03-05 We probably want to take config as a parameter as well, since
+        #       it would define the default values of config that the charm would see.
+        self._charm_cls = charm_cls
+        self._charm = None
+        self._charm_dir = 'no-disk-path'  # this may be updated by _create_meta
+        self._meta = self._create_meta(meta, actions)
+        self._unit_name = self._meta.name + '/0'
+        self._framework = None
+        self._hooks_enabled = True
+        self._relation_id_counter = 0
+        self._backend = _TestingModelBackend(self._unit_name, self._meta)
+        self._model = model.Model(self._unit_name, self._meta, self._backend)
+        self._framework = framework.Framework(":memory:", self._charm_dir, self._meta, self._model)
+
+    @property
+    def charm(self) -> charm.CharmBase:
+        """Return the instance of the charm class that was passed to __init__.
+
+        Note that the Charm is not instantiated until you have called
+        :meth:`.begin()`.
+        """
+        return self._charm
+
+    @property
+    def model(self) -> model.Model:
+        """Return the :class:`~ops.model.Model` that is being driven by this Harness."""
+        return self._model
+
+    @property
+    def framework(self) -> framework.Framework:
+        """Return the Framework that is being driven by this Harness."""
+        return self._framework
+
+    def begin(self) -> None:
+        """Instantiate the Charm and start handling events.
+
+        Before calling begin(), there is no Charm instance, so changes to the Model won't emit
+        events. You must call begin before :attr:`.charm` is valid.
+        """
+        if self._charm is not None:
+            raise RuntimeError('cannot call the begin method on the harness more than once')
+
+        # The Framework adds attributes to class objects for events, etc. As such, we can't re-use
+        # the original class against multiple Frameworks. So create a locally defined class
+        # and register it.
+        # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of
+        #       Class attributes which should clean up this ugliness. The API can stay the same
+        class TestEvents(self._charm_cls.on.__class__):
+            pass
+
+        TestEvents.__name__ = self._charm_cls.on.__class__.__name__
+
+        class TestCharm(self._charm_cls):
+            on = TestEvents()
+
+        # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo,
+        # rather than TestCharm has no attribute foo.
+        TestCharm.__name__ = self._charm_cls.__name__
+        self._charm = TestCharm(self._framework, self._framework.meta.name)
+
+    def _create_meta(self, charm_metadata, action_metadata):
+        """Create a CharmMeta object.
+
+        Handle the cases where a user doesn't supply explicit metadata snippets.
+        """
+        filename = inspect.getfile(self._charm_cls)
+        charm_dir = pathlib.Path(filename).parents[1]
+
+        if charm_metadata is None:
+            metadata_path = charm_dir / 'metadata.yaml'
+            if metadata_path.is_file():
+                charm_metadata = metadata_path.read_text()
+                self._charm_dir = charm_dir
+            else:
+                # The simplest of metadata that the framework can support
+                charm_metadata = 'name: test-charm'
+        elif isinstance(charm_metadata, str):
+            charm_metadata = dedent(charm_metadata)
+
+        if action_metadata is None:
+            actions_path = charm_dir / 'actions.yaml'
+            if actions_path.is_file():
+                action_metadata = actions_path.read_text()
+                self._charm_dir = charm_dir
+        elif isinstance(action_metadata, str):
+            action_metadata = dedent(action_metadata)
+
+        return charm.CharmMeta.from_yaml(charm_metadata, action_metadata)
+
+    def disable_hooks(self) -> None:
+        """Stop emitting hook events when the model changes.
+
+        This can be used by developers to stop changes to the model from emitting events that
+        the charm will react to. Call :meth:`.enable_hooks`
+        to re-enable them.
+        """
+        self._hooks_enabled = False
+
+    def enable_hooks(self) -> None:
+        """Re-enable hook events from charm.on when the model is changed.
+
+        By default hook events are enabled once you call :meth:`.begin`,
+        but if you have used :meth:`.disable_hooks`, this can be used to
+        enable them again.
+        """
+        self._hooks_enabled = True
+
+    def _next_relation_id(self):
+        rel_id = self._relation_id_counter
+        self._relation_id_counter += 1
+        return rel_id
+
+    def add_relation(self, relation_name: str, remote_app: str) -> int:
+        """Declare that there is a new relation between this app and `remote_app`.
+
+        Args:
+            relation_name: The relation on Charm that is being related to
+            remote_app: The name of the application that is being related to
+
+        Return:
+            The relation_id created by this add_relation.
+        """
+        rel_id = self._next_relation_id()
+        self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id)
+        self._backend._relation_names[rel_id] = relation_name
+        self._backend._relation_list_map[rel_id] = []
+        self._backend._relation_data[rel_id] = {
+            remote_app: {},
+            self._backend.unit_name: {},
+            self._backend.app_name: {},
+        }
+        # Reload the relation_ids list
+        if self._model is not None:
+            self._model.relations._invalidate(relation_name)
+        if self._charm is None or not self._hooks_enabled:
+            return rel_id
+        relation = self._model.get_relation(relation_name, rel_id)
+        app = self._model.get_app(remote_app)
+        self._charm.on[relation_name].relation_created.emit(
+            relation, app)
+        return rel_id
+
+    def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None:
+        """Add a new unit to a relation.
+
+        Example::
+
+          rel_id = harness.add_relation('db', 'postgresql')
+          harness.add_relation_unit(rel_id, 'postgresql/0')
+
+        This will trigger a `relation_joined` event and a `relation_changed` event.
+
+        Args:
+            relation_id: The integer relation identifier (as returned by add_relation).
+            remote_unit_name: A string representing the remote unit that is being added.
+        Return:
+            None
+        """
+        self._backend._relation_list_map[relation_id].append(remote_unit_name)
+        self._backend._relation_data[relation_id][remote_unit_name] = {}
+        relation_name = self._backend._relation_names[relation_id]
+        # Make sure that the Model reloads the relation_list for this relation_id, as well as
+        # reloading the relation data for this unit.
+        if self._model is not None:
+            self._model.relations._invalidate(relation_name)
+            remote_unit = self._model.get_unit(remote_unit_name)
+            relation = self._model.get_relation(relation_name, relation_id)
+            relation.data[remote_unit]._invalidate()
+        if self._charm is None or not self._hooks_enabled:
+            return
+        self._charm.on[relation_name].relation_joined.emit(
+            relation, remote_unit.app, remote_unit)
+
+    def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping:
+        """Get the relation data bucket for a single app or unit in a given relation.
+
+        This ignores all of the safety checks of who can and can't see data in relations (eg,
+        non-leaders can't read their own application's relation data because there are no events
+        that keep that data up-to-date for the unit).
+
+        Args:
+            relation_id: The relation whose content we want to look at.
+            app_or_unit: The name of the application or unit whose data we want to read
+        Return:
+            a dict containing the relation data for `app_or_unit` or None.
+        Raises:
+            KeyError: if relation_id doesn't exist
+        """
+        return self._backend._relation_data[relation_id].get(app_or_unit, None)
+
+    def get_workload_version(self) -> str:
+        """Read the workload version that was set by the unit."""
+        return self._backend._workload_version
+
+    def update_relation_data(
+            self,
+            relation_id: int,
+            app_or_unit: str,
+            key_values: typing.Mapping,
+    ) -> None:
+        """Update the relation data for a given unit or application in a given relation.
+
+        This also triggers the `relation_changed` event for this relation_id.
+
+        Args:
+            relation_id: The integer relation_id representing this relation.
+            app_or_unit: The unit or application name that is being updated.
+                This can be the local or remote application.
+            key_values: Each key/value will be updated in the relation data.
+        """
+        relation_name = self._backend._relation_names[relation_id]
+        relation = self._model.get_relation(relation_name, relation_id)
+        if '/' in app_or_unit:
+            entity = self._model.get_unit(app_or_unit)
+        else:
+            entity = self._model.get_app(app_or_unit)
+        rel_data = relation.data.get(entity, None)
+        if rel_data is not None:
+            # rel_data may have cached now-stale data, so _invalidate() it.
+            # Note, this won't cause the data to be loaded if it wasn't already.
+            rel_data._invalidate()
+
+        new_values = self._backend._relation_data[relation_id][app_or_unit].copy()
+        for k, v in key_values.items():
+            if v == '':
+                new_values.pop(k, None)
+            else:
+                new_values[k] = v
+        self._backend._relation_data[relation_id][app_or_unit] = new_values
+
+        if app_or_unit == self._model.unit.name:
+            # No events for our own unit
+            return
+        if app_or_unit == self._model.app.name:
+            # updating our own app only generates an event if it is a peer relation and we
+            # aren't the leader
+            is_peer = self._meta.relations[relation_name].role == 'peers'
+            if not is_peer:
+                return
+            if self._model.unit.is_leader():
+                return
+        self._emit_relation_changed(relation_id, app_or_unit)
+
+    def _emit_relation_changed(self, relation_id, app_or_unit):
+        if self._charm is None or not self._hooks_enabled:
+            return
+        rel_name = self._backend._relation_names[relation_id]
+        relation = self.model.get_relation(rel_name, relation_id)
+        if '/' in app_or_unit:
+            app_name = app_or_unit.split('/')[0]
+            unit_name = app_or_unit
+            app = self.model.get_app(app_name)
+            unit = self.model.get_unit(unit_name)
+            args = (relation, app, unit)
+        else:
+            app_name = app_or_unit
+            app = self.model.get_app(app_name)
+            args = (relation, app)
+        self._charm.on[rel_name].relation_changed.emit(*args)
+
+    def update_config(
+            self,
+            key_values: typing.Mapping[str, str] = None,
+            unset: typing.Iterable[str] = (),
+    ) -> None:
+        """Update the config as seen by the charm.
+
+        This will trigger a `config_changed` event.
+
+        Args:
+            key_values: A Mapping of key:value pairs to update in config.
+            unset: An iterable of keys to remove from Config. (Note that this does
+                not currently reset the config values to the default defined in config.yaml.)
+        """
+        config = self._backend._config
+        if key_values is not None:
+            for key, value in key_values.items():
+                config[key] = value
+        for key in unset:
+            config.pop(key, None)
+        # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config
+        # is a LazyMapping, but its _load returns a dict and this method mutates
+        # the dict that Config is caching. Arguably we should be doing some sort
+        # of charm.framework.model.config._invalidate()
+        if self._charm is None or not self._hooks_enabled:
+            return
+        self._charm.on.config_changed.emit()
+
+    def set_leader(self, is_leader: bool = True) -> None:
+        """Set whether this unit is the leader or not.
+
+        If this charm becomes a leader then `leader_elected` will be triggered.
+
+        Args:
+            is_leader: True/False as to whether this unit is the leader.
+        """
+        was_leader = self._backend._is_leader
+        self._backend._is_leader = is_leader
+        # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in
+        # the Model objects, so this automatically gets noticed.
+        if is_leader and not was_leader and self._charm is not None and self._hooks_enabled:
+            self._charm.on.leader_elected.emit()
+
+
+class _TestingModelBackend:
+    """This conforms to the interface for ModelBackend but provides canned data.
+
+    DO NOT use this class directly, it is used by `Harness`_ to drive the model.
+    `Harness`_ is responsible for maintaining the internal consistency of the values here,
+    as the only public methods of this type are for implementing ModelBackend.
+    """
+
+    def __init__(self, unit_name, meta):
+        self.unit_name = unit_name
+        self.app_name = self.unit_name.split('/')[0]
+        self._calls = []
+        self._meta = meta
+        self._is_leader = None
+        self._relation_ids_map = {}  # relation name to [relation_ids,...]
+        self._relation_names = {}  # reverse map from relation_id to relation_name
+        self._relation_list_map = {}  # relation_id: [unit_name,...]
+        self._relation_data = {}  # {relation_id: {name: data}}
+        self._config = {}
+        self._is_leader = False
+        self._resources_map = {}
+        self._pod_spec = None
+        self._app_status = None
+        self._unit_status = None
+        self._workload_version = None
+
+    def relation_ids(self, relation_name):
+        try:
+            return self._relation_ids_map[relation_name]
+        except KeyError as e:
+            if relation_name not in self._meta.relations:
+                raise model.ModelError('{} is not a known relation'.format(relation_name)) from e
+            return []
+
+    def relation_list(self, relation_id):
+        try:
+            return self._relation_list_map[relation_id]
+        except KeyError as e:
+            raise model.RelationNotFoundError from e
+
+    def relation_get(self, relation_id, member_name, is_app):
+        if is_app and '/' in member_name:
+            member_name = member_name.split('/')[0]
+        if relation_id not in self._relation_data:
+            raise model.RelationNotFoundError()
+        return self._relation_data[relation_id][member_name].copy()
+
+    def relation_set(self, relation_id, key, value, is_app):
+        relation = self._relation_data[relation_id]
+        if is_app:
+            bucket_key = self.app_name
+        else:
+            bucket_key = self.unit_name
+        if bucket_key not in relation:
+            relation[bucket_key] = {}
+        bucket = relation[bucket_key]
+        if value == '':
+            bucket.pop(key, None)
+        else:
+            bucket[key] = value
+
+    def config_get(self):
+        return self._config
+
+    def is_leader(self):
+        return self._is_leader
+
+    def application_version_set(self, version):
+        self._workload_version = version
+
+    def resource_get(self, resource_name):
+        return self._resources_map[resource_name]
+
+    def pod_spec_set(self, spec, k8s_resources):
+        self._pod_spec = (spec, k8s_resources)
+
+    def status_get(self, *, is_app=False):
+        if is_app:
+            return self._app_status
+        else:
+            return self._unit_status
+
+    def status_set(self, status, message='', *, is_app=False):
+        if is_app:
+            self._app_status = (status, message)
+        else:
+            self._unit_status = (status, message)
+
+    def storage_list(self, name):
+        raise NotImplementedError(self.storage_list)
+
+    def storage_get(self, storage_name_id, attribute):
+        raise NotImplementedError(self.storage_get)
+
+    def storage_add(self, name, count=1):
+        raise NotImplementedError(self.storage_add)
+
+    def action_get(self):
+        raise NotImplementedError(self.action_get)
+
+    def action_set(self, results):
+        raise NotImplementedError(self.action_set)
+
+    def action_log(self, message):
+        raise NotImplementedError(self.action_log)
+
+    def action_fail(self, message=''):
+        raise NotImplementedError(self.action_fail)
+
+    def network_get(self, endpoint_name, relation_id=None):
+        raise NotImplementedError(self.network_get)
diff --git a/magma/hackfest_squid_cnf/charms/squid/metadata.yaml b/magma/hackfest_squid_cnf/charms/squid/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..30b0d0148b792b693f167c319a303cbe24e23136
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/metadata.yaml
@@ -0,0 +1,26 @@
+name: squid
+summary: Describe your charm here
+maintainers:
+    - Dominik Fleischmann <dominik.fleischmann@canonical.com>
+description: |
+    A nice long description of what the product does and any
+    high level information about how the charm provides the product,
+    on which platform, etc.
+tags:
+    - misc
+series:
+    - kubernetes
+storage:
+    docker:
+        type: filesystem
+        location: /srv/docker/squid
+    spool:
+        type: filesystem
+        location: /var/spool/squid
+# -- example relations, delete if unneeded
+#requires:
+#    db:
+#        interface: mysql
+#provides:
+#    website:
+#        interface: http
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/.empty b/magma/hackfest_squid_cnf/charms/squid/mod/.empty
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/.flake8 b/magma/hackfest_squid_cnf/charms/squid/mod/operator/.flake8
new file mode 100644
index 0000000000000000000000000000000000000000..61d908155588a7968dd25c90cff349377305a789
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/.flake8
@@ -0,0 +1,2 @@
+[flake8]
+max-line-length = 99
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/.gitignore b/magma/hackfest_squid_cnf/charms/squid/mod/operator/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..5596c502618d6428de0199c4cec069c151c0edd3
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/.gitignore
@@ -0,0 +1,4 @@
+__pycache__
+/sandbox
+.idea
+docs/_build
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/.readthedocs.yaml b/magma/hackfest_squid_cnf/charms/squid/mod/operator/.readthedocs.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ee5cf263e50a8aa864554613c9aa6214ad043c0c
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/.readthedocs.yaml
@@ -0,0 +1,7 @@
+version: 2                      # required
+formats: []                     # i.e. no extra formats (for now)
+python:
+  version: "3.5"
+  install:
+    - requirements: docs/requirements.txt
+    - requirements: requirements.txt
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/.travis.yml b/magma/hackfest_squid_cnf/charms/squid/mod/operator/.travis.yml
new file mode 100644
index 0000000000000000000000000000000000000000..f222ba8b2fa4603f457c6e2f2dee82c0cbbe8f11
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/.travis.yml
@@ -0,0 +1,24 @@
+dist: bionic
+
+language: python
+
+arch:
+    - amd64
+    - arm64
+
+python:
+    - "3.5"
+    - "3.6"
+    - "3.7"
+    - "3.8"
+
+matrix:
+    include:
+        - os: osx
+          language: generic
+
+install:
+    - pip3 install pyyaml autopep8 flake8
+
+script:
+    - ./run_tests
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/LICENSE.txt b/magma/hackfest_squid_cnf/charms/squid/mod/operator/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/LICENSE.txt
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/README.md b/magma/hackfest_squid_cnf/charms/squid/mod/operator/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..347bfc8effe4b26adff4268688ade2f28d502d49
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/README.md
@@ -0,0 +1,137 @@
+# The Operator Framework
+
+The Operator Framework provides a simple, lightweight, and powerful way of encapsulating operational experience in code.
+
+The framework will help you to:
+
+* model the integration of your services
+* manage the lifecycle of your application
+* create reusable and scalable components
+* keep your code simple and readable
+
+## Getting Started
+
+The following overall structure for your charm directory is recommended:
+
+```
+.
+├── config.yaml
+├── metadata.yaml
+├── mod/
+├── lib/
+│   └── ops -> ../mod/operator/ops
+├── src/
+│   └── charm.py
+└── hooks/
+    ├── install -> ../src/charm.py
+    └── start -> ../src/charm.py  # for k8s charms per below
+```
+
+The `mod/` directory should contain the operator framework dependency as a git
+submodule:
+
+```
+git submodule add https://github.com/canonical/operator mod/operator
+```
+
+Then symlink from the git submodule for the operator framework into the `lib/`
+directory of your charm so it can be imported at run time:
+
+```
+ln -s ../mod/operator/ops lib/ops
+```
+
+Other dependencies included as git submodules can be added in the `mod/`
+directory and symlinked into `lib/` as well.
+
+You can sync subsequent changes from the framework and other submodule
+dependencies by running:
+
+```
+git submodule update
+```
+
+Those cloning and checking out the source for your charm for the first time
+will need to run:
+
+```
+git submodule update --init
+```
+
+Your `src/charm.py` is the entry point for your charm logic. It should be set
+to executable and use Python 3.6 or greater. At a minimum, it needs to define
+a subclass of `CharmBase` and pass that into the framework's `main` function:
+
+```python
+import sys
+sys.path.append('lib')  # noqa: E402
+
+from ops.charm import CharmBase
+from ops.main import main
+
+
+class MyCharm(CharmBase):
+    pass
+
+
+if __name__ == "__main__":
+    main(MyCharm)
+```
+
+This charm does nothing, because the `MyCharm` class passed to the operator
+framework's `main` function is empty. Functionality can be added to the charm
+by instructing it to observe particular Juju events when the `MyCharm` object
+is initialized. For example,
+
+```python
+class MyCharm(CharmBase):
+    def __init__(self, *args):
+        super().__init__(*args)
+        self.framework.observe(self.on.start, self.on_start)
+
+    def on_start(self, event):
+        # Handle the start event here.
+```
+
+Every standard event in Juju may be observed that way, and you can also easily
+define your own events in your custom types.
+
+> The second argument to `observe` can be either the handler as a bound
+> method, or the observer itself if the handler is a method of the observer
+> that follows the conventional naming pattern. That is, in this case, we
+> could have called just `self.framework.observe(self.on.start, self)`.
+
+The `hooks/` directory must contain a symlink to your `src/charm.py` entry
+point so that Juju can call it. You only need to set up the `hooks/install` link
+(`hooks/start` for K8s charms, until [lp#1854635](https://bugs.launchpad.net/juju/+bug/1854635)
+is resolved), and the framework will create all others at runtime.
+
+Once your charm is ready, upload it to the charm store and deploy it as
+normal with:
+
+```
+# Replace ${CHARM} with the name of the charm.
+charm push . cs:~${USER}/${CHARM}
+# Replace ${VERSION} with the version created by `charm push`.
+charm release cs:~${USER}/${CHARM}-${VERSION}
+charm grant cs:~${USER}/${CHARM}-${VERSION} everyone
+# And now deploy your charm.
+juju deploy cs:~${USER}/$CHARM
+```
+
+Alternatively, to deploy directly from local disk, run:
+
+```
+juju deploy .
+```
+
+# Operator Framework development
+
+If you want to work in the framework *itself* you will need the following depenencies installed in your system:
+
+- Python >= 3.5
+- PyYAML
+- autopep8
+- flake8
+
+Then you can try `./run_tests`, it should all go green.
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/build_docs b/magma/hackfest_squid_cnf/charms/squid/mod/operator/build_docs
new file mode 100755
index 0000000000000000000000000000000000000000..af8b892f7568bdda5ac892beaeafad5696e607d3
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/build_docs
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set -e
+
+flavour=html
+
+if [ "$1" ]; then
+    if [ "$1" = "--help" ] || [ "$1" = "-h" ]; then
+        flavour=help
+    else
+        flavour="$1"
+    fi
+    shift
+fi
+
+cd docs
+
+sphinx-build -M "$flavour" . _build "$@"
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/docs/conf.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/docs/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd78ed2a505f7fe66a0c4e86c0e0708beba4bff9
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/docs/conf.py
@@ -0,0 +1,89 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# For a full list of options see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+
+# -- Path setup --------------------------------------------------------------
+
+from pathlib import Path
+import sys
+sys.path.insert(0, str(Path(__file__).parent.parent))
+
+
+# -- Project information -----------------------------------------------------
+
+project = 'The Operator Framework'
+copyright = '2019-2020, Canonical Ltd.'
+author = 'Canonical Ltd'
+
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.napoleon',
+    'sphinx.ext.todo',
+    'sphinx.ext.viewcode',
+]
+
+# The document name of the “master” document, that is, the document
+# that contains the root toctree directive.
+master_doc = 'index'
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
+
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+# html_theme = 'nature' # 'alabaster'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+
+# -- Options for sphinx.ext.todo ---------------------------------------------
+
+#  If this is True, todo and todolist produce output, else they
+#  produce nothing. The default is False.
+todo_include_todos = False
+
+
+# -- Options for sphinx.ext.autodoc ------------------------------------------
+
+# This value controls how to represents typehints. The setting takes the
+# following values:
+#     'signature' – Show typehints as its signature (default)
+#     'description' – Show typehints as content of function or method
+#     'none' – Do not show typehints
+autodoc_typehints = 'description'
+
+# This value selects what content will be inserted into the main body of an
+# autoclass directive. The possible values are:
+#     'class' - Only the class’ docstring is inserted. This is the
+#               default. You can still document __init__ as a separate method
+#               using automethod or the members option to autoclass.
+#     'both' - Both the class’ and the __init__ method’s docstring are
+#              concatenated and inserted.
+#     'init' - Only the __init__ method’s docstring is inserted.
+autoclass_content = 'both'
+
+autodoc_default_options = {
+    'members': None,            # None here means "yes"
+    'undoc-members': None,
+    'show-inheritance': None,
+}
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/docs/index.rst b/magma/hackfest_squid_cnf/charms/squid/mod/operator/docs/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..424d78d423e1e7dd5d4049c9f88ffaec994c1714
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/docs/index.rst
@@ -0,0 +1,58 @@
+
+Welcome to The Operator Framework's documentation!
+==================================================
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Contents:
+
+ops package
+===========
+
+.. automodule:: ops
+
+Submodules
+----------
+
+ops.charm module
+----------------
+
+.. automodule:: ops.charm
+
+ops.framework module
+--------------------
+
+.. automodule:: ops.framework
+
+ops.jujuversion module
+----------------------
+
+.. automodule:: ops.jujuversion
+
+ops.log module
+--------------
+
+.. automodule:: ops.log
+
+ops.main module
+---------------
+
+.. automodule:: ops.main
+
+ops.model module
+----------------
+
+.. automodule:: ops.model
+
+ops.testing module
+------------------
+
+.. automodule:: ops.testing
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/docs/requirements.txt b/magma/hackfest_squid_cnf/charms/squid/mod/operator/docs/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..37b11036df8f354e8510b77a87bba554cfafe887
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/docs/requirements.txt
@@ -0,0 +1 @@
+sphinx<2
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/__init__.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2089e3803186b5a0b12e9423b1c452596adf3c6
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""The Operator Framework."""
+
+# Import here the bare minimum to break the circular import between modules
+from . import charm  # NOQA
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/charm.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..4682c20e9ff9c41db6ef748a2fd38fecdd331148
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/charm.py
@@ -0,0 +1,562 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import pathlib
+import typing
+
+import yaml
+
+from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents
+from ops import model
+
+
+class HookEvent(EventBase):
+    """A base class for events that trigger because of a Juju hook firing."""
+
+
+class ActionEvent(EventBase):
+    """A base class for events that trigger when a user asks for an Action to be run.
+
+    To read the parameters for the action, see the instance variable `params`.
+    To respond with the result of the action, call `set_results`. To add progress
+    messages that are visible as the action is progressing use `log`.
+
+    :ivar params: The parameters passed to the action (read by action-get)
+    """
+
+    def defer(self):
+        """Action events are not deferable like other events.
+
+        This is because an action runs synchronously and the user is waiting for the result.
+        """
+        raise RuntimeError('cannot defer action events')
+
+    def restore(self, snapshot: dict) -> None:
+        """Used by the operator framework to record the action.
+
+        Not meant to be called directly by Charm code.
+        """
+        env_action_name = os.environ.get('JUJU_ACTION_NAME')
+        event_action_name = self.handle.kind[:-len('_action')].replace('_', '-')
+        if event_action_name != env_action_name:
+            # This could only happen if the dev manually emits the action, or from a bug.
+            raise RuntimeError('action event kind does not match current action')
+        # Params are loaded at restore rather than __init__ because
+        # the model is not available in __init__.
+        self.params = self.framework.model._backend.action_get()
+
+    def set_results(self, results: typing.Mapping) -> None:
+        """Report the result of the action.
+
+        Args:
+            results: The result of the action as a Dict
+        """
+        self.framework.model._backend.action_set(results)
+
+    def log(self, message: str) -> None:
+        """Send a message that a user will see while the action is running.
+
+        Args:
+            message: The message for the user.
+        """
+        self.framework.model._backend.action_log(message)
+
+    def fail(self, message: str = '') -> None:
+        """Report that this action has failed.
+
+        Args:
+            message: Optional message to record why it has failed.
+        """
+        self.framework.model._backend.action_fail(message)
+
+
+class InstallEvent(HookEvent):
+    """Represents the `install` hook from Juju."""
+
+
+class StartEvent(HookEvent):
+    """Represents the `start` hook from Juju."""
+
+
+class StopEvent(HookEvent):
+    """Represents the `stop` hook from Juju."""
+
+
+class RemoveEvent(HookEvent):
+    """Represents the `remove` hook from Juju. """
+
+
+class ConfigChangedEvent(HookEvent):
+    """Represents the `config-changed` hook from Juju."""
+
+
+class UpdateStatusEvent(HookEvent):
+    """Represents the `update-status` hook from Juju."""
+
+
+class UpgradeCharmEvent(HookEvent):
+    """Represents the `upgrade-charm` hook from Juju.
+
+    This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju
+    has unpacked the upgraded charm code, and so this event will be handled with new code.
+    """
+
+
+class PreSeriesUpgradeEvent(HookEvent):
+    """Represents the `pre-series-upgrade` hook from Juju.
+
+    This happens when a user has run `juju upgrade-series MACHINE prepare` and
+    will fire for each unit that is running on the machine, telling them that
+    the user is preparing to upgrade the Machine's series (eg trusty->bionic).
+    The charm should take actions to prepare for the upgrade (a database charm
+    would want to write out a version-independent dump of the database, so that
+    when a new version of the database is available in a new series, it can be
+    used.)
+    Once all units on a machine have run `pre-series-upgrade`, the user will
+    initiate the steps to actually upgrade the machine (eg `do-release-upgrade`).
+    When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire.
+    """
+
+
+class PostSeriesUpgradeEvent(HookEvent):
+    """Represents the `post-series-upgrade` hook from Juju.
+
+    This is run after the user has done a distribution upgrade (or rolled back
+    and kept the same series). It is called in response to
+    `juju upgrade-series MACHINE complete`. Charms are expected to do whatever
+    steps are necessary to reconfigure their applications for the new series.
+    """
+
+
+class LeaderElectedEvent(HookEvent):
+    """Represents the `leader-elected` hook from Juju.
+
+    Juju will trigger this when a new lead unit is chosen for a given application.
+    This represents the leader of the charm information (not necessarily the primary
+    of a running application). The main utility is that charm authors can know
+    that only one unit will be a leader at any given time, so they can do
+    configuration, etc, that would otherwise require coordination between units.
+    (eg, selecting a password for a new relation)
+    """
+
+
+class LeaderSettingsChangedEvent(HookEvent):
+    """Represents the `leader-settings-changed` hook from Juju.
+
+    Deprecated. This represents when a lead unit would call `leader-set` to inform
+    the other units of an application that they have new information to handle.
+    This has been deprecated in favor of using a Peer relation, and having the
+    leader set a value in the Application data bag for that peer relation.
+    (see :class:`RelationChangedEvent`).
+    """
+
+
+class CollectMetricsEvent(HookEvent):
+    """Represents the `collect-metrics` hook from Juju.
+
+    Note that events firing during a CollectMetricsEvent are currently
+    sandboxed in how they can interact with Juju. To report metrics
+    use :meth:`.add_metrics`.
+    """
+
+    def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None:
+        """Record metrics that have been gathered by the charm for this unit.
+
+        Args:
+            metrics: A collection of {key: float} pairs that contains the
+              metrics that have been gathered
+            labels: {key:value} strings that can be applied to the
+                metrics that are being gathered
+        """
+        self.framework.model._backend.add_metrics(metrics, labels)
+
+
+class RelationEvent(HookEvent):
+    """A base class representing the various relation lifecycle events.
+
+    Charmers should not be creating RelationEvents directly. The events will be
+    generated by the framework from Juju related events. Users can observe them
+    from the various `CharmBase.on[relation_name].relation_*` events.
+
+    Attributes:
+        relation: The Relation involved in this event
+        app: The remote application that has triggered this event
+        unit: The remote unit that has triggered this event. This may be None
+              if the relation event was triggered as an Application level event
+    """
+
+    def __init__(self, handle, relation, app=None, unit=None):
+        super().__init__(handle)
+
+        if unit is not None and unit.app != app:
+            raise RuntimeError(
+                'cannot create RelationEvent with application {} and unit {}'.format(app, unit))
+
+        self.relation = relation
+        self.app = app
+        self.unit = unit
+
+    def snapshot(self) -> dict:
+        """Used by the framework to serialize the event to disk.
+
+        Not meant to be called by Charm code.
+        """
+        snapshot = {
+            'relation_name': self.relation.name,
+            'relation_id': self.relation.id,
+        }
+        if self.app:
+            snapshot['app_name'] = self.app.name
+        if self.unit:
+            snapshot['unit_name'] = self.unit.name
+        return snapshot
+
+    def restore(self, snapshot: dict) -> None:
+        """Used by the framework to deserialize the event from disk.
+
+        Not meant to be called by Charm code.
+        """
+        self.relation = self.framework.model.get_relation(
+            snapshot['relation_name'], snapshot['relation_id'])
+
+        app_name = snapshot.get('app_name')
+        if app_name:
+            self.app = self.framework.model.get_app(app_name)
+        else:
+            self.app = None
+
+        unit_name = snapshot.get('unit_name')
+        if unit_name:
+            self.unit = self.framework.model.get_unit(unit_name)
+        else:
+            self.unit = None
+
+
+class RelationCreatedEvent(RelationEvent):
+    """Represents the `relation-created` hook from Juju.
+
+    This is triggered when a new relation to another app is added in Juju. This
+    can occur before units for those applications have started. All existing
+    relations should be established before start.
+    """
+
+
+class RelationJoinedEvent(RelationEvent):
+    """Represents the `relation-joined` hook from Juju.
+
+    This is triggered whenever a new unit of a related application joins the relation.
+    (eg, a unit was added to an existing related app, or a new relation was established
+    with an application that already had units.)
+    """
+
+
+class RelationChangedEvent(RelationEvent):
+    """Represents the `relation-changed` hook from Juju.
+
+    This is triggered whenever there is a change to the data bucket for a related
+    application or unit. Look at `event.relation.data[event.unit/app]` to see the
+    new information.
+    """
+
+
+class RelationDepartedEvent(RelationEvent):
+    """Represents the `relation-departed` hook from Juju.
+
+    This is the inverse of the RelationJoinedEvent, representing when a unit
+    is leaving the relation (the unit is being removed, the app is being removed,
+    the relation is being removed). It is fired once for each unit that is
+    going away.
+    """
+
+
+class RelationBrokenEvent(RelationEvent):
+    """Represents the `relation-broken` hook from Juju.
+
+    If a relation is being removed (`juju remove-relation` or `juju remove-application`),
+    once all the units have been removed, RelationBrokenEvent will fire to signal
+    that the relationship has been fully terminated.
+    """
+
+
+class StorageEvent(HookEvent):
+    """Base class representing Storage related events."""
+
+
+class StorageAttachedEvent(StorageEvent):
+    """Represents the `storage-attached` hook from Juju.
+
+    Called when new storage is available for the charm to use.
+    """
+
+
+class StorageDetachingEvent(StorageEvent):
+    """Represents the `storage-detaching` hook from Juju.
+
+    Called when storage a charm has been using is going away.
+    """
+
+
+class CharmEvents(ObjectEvents):
+    """The events that are generated by Juju in response to the lifecycle of an application."""
+
+    install = EventSource(InstallEvent)
+    start = EventSource(StartEvent)
+    stop = EventSource(StopEvent)
+    remove = EventSource(RemoveEvent)
+    update_status = EventSource(UpdateStatusEvent)
+    config_changed = EventSource(ConfigChangedEvent)
+    upgrade_charm = EventSource(UpgradeCharmEvent)
+    pre_series_upgrade = EventSource(PreSeriesUpgradeEvent)
+    post_series_upgrade = EventSource(PostSeriesUpgradeEvent)
+    leader_elected = EventSource(LeaderElectedEvent)
+    leader_settings_changed = EventSource(LeaderSettingsChangedEvent)
+    collect_metrics = EventSource(CollectMetricsEvent)
+
+
+class CharmBase(Object):
+    """Base class that represents the Charm overall.
+
+    Usually this initialization is done by ops.main.main() rather than Charm authors
+    directly instantiating a Charm.
+
+    Args:
+        framework: The framework responsible for managing the Model and events for this
+            Charm.
+        key: Arbitrary key to distinguish this instance of CharmBase from another.
+            Generally is None when initialized by the framework. For charms instantiated by
+            main.main(), this is currenly None.
+    Attributes:
+        on: Defines all events that the Charm will fire.
+    """
+
+    on = CharmEvents()
+
+    def __init__(self, framework: Framework, key: typing.Optional[str]):
+        """Initialize the Charm with its framework and application name.
+
+        """
+        super().__init__(framework, key)
+
+        for relation_name in self.framework.meta.relations:
+            relation_name = relation_name.replace('-', '_')
+            self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent)
+            self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent)
+            self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent)
+            self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent)
+            self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent)
+
+        for storage_name in self.framework.meta.storages:
+            storage_name = storage_name.replace('-', '_')
+            self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent)
+            self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent)
+
+        for action_name in self.framework.meta.actions:
+            action_name = action_name.replace('-', '_')
+            self.on.define_event(action_name + '_action', ActionEvent)
+
+    @property
+    def app(self) -> model.Application:
+        """Application that this unit is part of."""
+        return self.framework.model.app
+
+    @property
+    def unit(self) -> model.Unit:
+        """Unit that this execution is responsible for."""
+        return self.framework.model.unit
+
+    @property
+    def meta(self) -> 'CharmMeta':
+        """CharmMeta of this charm.
+        """
+        return self.framework.meta
+
+    @property
+    def charm_dir(self) -> pathlib.Path:
+        """Root directory of the Charm as it is running.
+        """
+        return self.framework.charm_dir
+
+
+class CharmMeta:
+    """Object containing the metadata for the charm.
+
+    This is read from metadata.yaml and/or actions.yaml. Generally charms will
+    define this information, rather than reading it at runtime. This class is
+    mostly for the framework to understand what the charm has defined.
+
+    The maintainers, tags, terms, series, and extra_bindings attributes are all
+    lists of strings.  The requires, provides, peers, relations, storage,
+    resources, and payloads attributes are all mappings of names to instances
+    of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta.
+
+    The relations attribute is a convenience accessor which includes all of the
+    requires, provides, and peers RelationMeta items.  If needed, the role of
+    the relation definition can be obtained from its role attribute.
+
+    Attributes:
+        name: The name of this charm
+        summary: Short description of what this charm does
+        description: Long description for this charm
+        maintainers: A list of strings of the email addresses of the maintainers
+                     of this charm.
+        tags: Charm store tag metadata for categories associated with this charm.
+        terms: Charm store terms that should be agreed to before this charm can
+               be deployed. (Used for things like licensing issues.)
+        series: The list of supported OS series that this charm can support.
+                The first entry in the list is the default series that will be
+                used by deploy if no other series is requested by the user.
+        subordinate: True/False whether this charm is intended to be used as a
+                     subordinate charm.
+        min_juju_version: If supplied, indicates this charm needs features that
+                          are not available in older versions of Juju.
+        requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation.
+        provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation.
+        peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation.
+        relations: A dict containing all :class:`RelationMeta` attributes (merged from other
+                   sections)
+        storages: A dict of {name: :class:`StorageMeta`} for each defined storage.
+        resources: A dict of {name: :class:`ResourceMeta`} for each defined resource.
+        payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload.
+        extra_bindings: A dict of additional named bindings that a charm can use
+                        for network configuration.
+        actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined.
+    Args:
+        raw: a mapping containing the contents of metadata.yaml
+        actions_raw: a mapping containing the contents of actions.yaml
+    """
+
+    def __init__(self, raw: dict = {}, actions_raw: dict = {}):
+        self.name = raw.get('name', '')
+        self.summary = raw.get('summary', '')
+        self.description = raw.get('description', '')
+        self.maintainers = []
+        if 'maintainer' in raw:
+            self.maintainers.append(raw['maintainer'])
+        if 'maintainers' in raw:
+            self.maintainers.extend(raw['maintainers'])
+        self.tags = raw.get('tags', [])
+        self.terms = raw.get('terms', [])
+        self.series = raw.get('series', [])
+        self.subordinate = raw.get('subordinate', False)
+        self.min_juju_version = raw.get('min-juju-version')
+        self.requires = {name: RelationMeta('requires', name, rel)
+                         for name, rel in raw.get('requires', {}).items()}
+        self.provides = {name: RelationMeta('provides', name, rel)
+                         for name, rel in raw.get('provides', {}).items()}
+        # TODO: (jam 2020-05-11) The *role* should be 'peer' even though it comes from the
+        #  'peers' section.
+        self.peers = {name: RelationMeta('peers', name, rel)
+                      for name, rel in raw.get('peers', {}).items()}
+        self.relations = {}
+        self.relations.update(self.requires)
+        self.relations.update(self.provides)
+        self.relations.update(self.peers)
+        self.storages = {name: StorageMeta(name, storage)
+                         for name, storage in raw.get('storage', {}).items()}
+        self.resources = {name: ResourceMeta(name, res)
+                          for name, res in raw.get('resources', {}).items()}
+        self.payloads = {name: PayloadMeta(name, payload)
+                         for name, payload in raw.get('payloads', {}).items()}
+        self.extra_bindings = raw.get('extra-bindings', {})
+        self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()}
+
+    @classmethod
+    def from_yaml(
+            cls, metadata: typing.Union[str, typing.TextIO],
+            actions: typing.Optional[typing.Union[str, typing.TextIO]] = None):
+        """Instantiate a CharmMeta from a YAML description of metadata.yaml.
+
+        Args:
+            metadata: A YAML description of charm metadata (name, relations, etc.)
+                This can be a simple string, or a file-like object. (passed to `yaml.safe_load`).
+            actions: YAML description of Actions for this charm (eg actions.yaml)
+        """
+        meta = yaml.safe_load(metadata)
+        raw_actions = {}
+        if actions is not None:
+            raw_actions = yaml.safe_load(actions)
+        return cls(meta, raw_actions)
+
+
+class RelationMeta:
+    """Object containing metadata about a relation definition.
+
+    Should not be constructed directly by Charm code. Is gotten from one of
+    :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`,
+    :attr:`CharmMeta.relations`.
+
+    Attributes:
+        role: This is one of requires/provides/peers
+        relation_name: Name of this relation from metadata.yaml
+        interface_name: Optional definition of the interface protocol.
+        scope: "global" or "container" scope based on how the relation should be used.
+    """
+
+    def __init__(self, role, relation_name, raw):
+        self.role = role
+        self.relation_name = relation_name
+        self.interface_name = raw['interface']
+        self.scope = raw.get('scope')
+
+
+class StorageMeta:
+    """Object containing metadata about a storage definition."""
+
+    def __init__(self, name, raw):
+        self.storage_name = name
+        self.type = raw['type']
+        self.description = raw.get('description', '')
+        self.shared = raw.get('shared', False)
+        self.read_only = raw.get('read-only', False)
+        self.minimum_size = raw.get('minimum-size')
+        self.location = raw.get('location')
+        self.multiple_range = None
+        if 'multiple' in raw:
+            range = raw['multiple']['range']
+            if '-' not in range:
+                self.multiple_range = (int(range), int(range))
+            else:
+                range = range.split('-')
+                self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None)
+
+
+class ResourceMeta:
+    """Object containing metadata about a resource definition."""
+
+    def __init__(self, name, raw):
+        self.resource_name = name
+        self.type = raw['type']
+        self.filename = raw.get('filename', None)
+        self.description = raw.get('description', '')
+
+
+class PayloadMeta:
+    """Object containing metadata about a payload definition."""
+
+    def __init__(self, name, raw):
+        self.payload_name = name
+        self.type = raw['type']
+
+
+class ActionMeta:
+    """Object containing metadata about an action's definition."""
+
+    def __init__(self, name, raw=None):
+        raw = raw or {}
+        self.name = name
+        self.title = raw.get('title', '')
+        self.description = raw.get('description', '')
+        self.parameters = raw.get('params', {})  # {<parameter name>: <JSON Schema definition>}
+        self.required = raw.get('required', [])  # [<parameter name>, ...]
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/framework.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/framework.py
new file mode 100755
index 0000000000000000000000000000000000000000..51d46ba16886bfffce0fe7b9ad91f3ac0b5902a4
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/framework.py
@@ -0,0 +1,1134 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import collections.abc
+import inspect
+import keyword
+import marshal
+import os
+import pdb
+import pickle
+import re
+import sqlite3
+import sys
+import types
+import weakref
+from datetime import timedelta
+
+from ops import charm
+
+
+class Handle:
+    """Handle defines a name for an object in the form of a hierarchical path.
+
+    The provided parent is the object (or that object's handle) that this handle
+    sits under, or None if the object identified by this handle stands by itself
+    as the root of its own hierarchy.
+
+    The handle kind is a string that defines a namespace so objects with the
+    same parent and kind will have unique keys.
+
+    The handle key is a string uniquely identifying the object. No other objects
+    under the same parent and kind may have the same key.
+    """
+
+    def __init__(self, parent, kind, key):
+        if parent and not isinstance(parent, Handle):
+            parent = parent.handle
+        self._parent = parent
+        self._kind = kind
+        self._key = key
+        if parent:
+            if key:
+                self._path = "{}/{}[{}]".format(parent, kind, key)
+            else:
+                self._path = "{}/{}".format(parent, kind)
+        else:
+            if key:
+                self._path = "{}[{}]".format(kind, key)
+            else:
+                self._path = "{}".format(kind)
+
+    def nest(self, kind, key):
+        return Handle(self, kind, key)
+
+    def __hash__(self):
+        return hash((self.parent, self.kind, self.key))
+
+    def __eq__(self, other):
+        return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key)
+
+    def __str__(self):
+        return self.path
+
+    @property
+    def parent(self):
+        return self._parent
+
+    @property
+    def kind(self):
+        return self._kind
+
+    @property
+    def key(self):
+        return self._key
+
+    @property
+    def path(self):
+        return self._path
+
+    @classmethod
+    def from_path(cls, path):
+        handle = None
+        for pair in path.split("/"):
+            pair = pair.split("[")
+            good = False
+            if len(pair) == 1:
+                kind, key = pair[0], None
+                good = True
+            elif len(pair) == 2:
+                kind, key = pair
+                if key and key[-1] == ']':
+                    key = key[:-1]
+                    good = True
+            if not good:
+                raise RuntimeError("attempted to restore invalid handle path {}".format(path))
+            handle = Handle(handle, kind, key)
+        return handle
+
+
+class EventBase:
+
+    def __init__(self, handle):
+        self.handle = handle
+        self.deferred = False
+
+    def defer(self):
+        self.deferred = True
+
+    def snapshot(self):
+        """Return the snapshot data that should be persisted.
+
+        Subclasses must override to save any custom state.
+        """
+        return None
+
+    def restore(self, snapshot):
+        """Restore the value state from the given snapshot.
+
+        Subclasses must override to restore their custom state.
+        """
+        self.deferred = False
+
+
+class EventSource:
+    """EventSource wraps an event type with a descriptor to facilitate observing and emitting.
+
+    It is generally used as:
+
+        class SomethingHappened(EventBase):
+            pass
+
+        class SomeObject(Object):
+            something_happened = EventSource(SomethingHappened)
+
+    With that, instances of that type will offer the someobj.something_happened
+    attribute which is a BoundEvent and may be used to emit and observe the event.
+    """
+
+    def __init__(self, event_type):
+        if not isinstance(event_type, type) or not issubclass(event_type, EventBase):
+            raise RuntimeError(
+                'Event requires a subclass of EventBase as an argument, got {}'.format(event_type))
+        self.event_type = event_type
+        self.event_kind = None
+        self.emitter_type = None
+
+    def _set_name(self, emitter_type, event_kind):
+        if self.event_kind is not None:
+            raise RuntimeError(
+                'EventSource({}) reused as {}.{} and {}.{}'.format(
+                    self.event_type.__name__,
+                    self.emitter_type.__name__,
+                    self.event_kind,
+                    emitter_type.__name__,
+                    event_kind,
+                ))
+        self.event_kind = event_kind
+        self.emitter_type = emitter_type
+
+    def __get__(self, emitter, emitter_type=None):
+        if emitter is None:
+            return self
+        # Framework might not be available if accessed as CharmClass.on.event
+        # rather than charm_instance.on.event, but in that case it couldn't be
+        # emitted anyway, so there's no point to registering it.
+        framework = getattr(emitter, 'framework', None)
+        if framework is not None:
+            framework.register_type(self.event_type, emitter, self.event_kind)
+        return BoundEvent(emitter, self.event_type, self.event_kind)
+
+
+class BoundEvent:
+
+    def __repr__(self):
+        return '<BoundEvent {} bound to {}.{} at {}>'.format(
+            self.event_type.__name__,
+            type(self.emitter).__name__,
+            self.event_kind,
+            hex(id(self)),
+        )
+
+    def __init__(self, emitter, event_type, event_kind):
+        self.emitter = emitter
+        self.event_type = event_type
+        self.event_kind = event_kind
+
+    def emit(self, *args, **kwargs):
+        """Emit event to all registered observers.
+
+        The current storage state is committed before and after each observer is notified.
+        """
+        framework = self.emitter.framework
+        key = framework._next_event_key()
+        event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs)
+        framework._emit(event)
+
+
+class HandleKind:
+    """Helper descriptor to define the Object.handle_kind field.
+
+    The handle_kind for an object defaults to its type name, but it may
+    be explicitly overridden if desired.
+    """
+
+    def __get__(self, obj, obj_type):
+        kind = obj_type.__dict__.get("handle_kind")
+        if kind:
+            return kind
+        return obj_type.__name__
+
+
+class _Metaclass(type):
+    """Helper class to ensure proper instantiation of Object-derived classes.
+
+    This class currently has a single purpose: events derived from EventSource
+    that are class attributes of Object-derived classes need to be told what
+    their name is in that class. For example, in
+
+        class SomeObject(Object):
+            something_happened = EventSource(SomethingHappened)
+
+    the instance of EventSource needs to know it's called 'something_happened'.
+
+    Starting from python 3.6 we could use __set_name__ on EventSource for this,
+    but until then this (meta)class does the equivalent work.
+
+    TODO: when we drop support for 3.5 drop this class, and rename _set_name in
+          EventSource to __set_name__; everything should continue to work.
+
+    """
+
+    def __new__(typ, *a, **kw):
+        k = super().__new__(typ, *a, **kw)
+        # k is now the Object-derived class; loop over its class attributes
+        for n, v in vars(k).items():
+            # we could do duck typing here if we want to support
+            # non-EventSource-derived shenanigans. We don't.
+            if isinstance(v, EventSource):
+                # this is what 3.6+ does automatically for us:
+                v._set_name(k, n)
+        return k
+
+
+class Object(metaclass=_Metaclass):
+
+    handle_kind = HandleKind()
+
+    def __init__(self, parent, key):
+        kind = self.handle_kind
+        if isinstance(parent, Framework):
+            self.framework = parent
+            # Avoid Framework instances having a circular reference to themselves.
+            if self.framework is self:
+                self.framework = weakref.proxy(self.framework)
+            self.handle = Handle(None, kind, key)
+        else:
+            self.framework = parent.framework
+            self.handle = Handle(parent, kind, key)
+        self.framework._track(self)
+
+        # TODO Detect conflicting handles here.
+
+    @property
+    def model(self):
+        return self.framework.model
+
+
+class ObjectEvents(Object):
+    """Convenience type to allow defining .on attributes at class level."""
+
+    handle_kind = "on"
+
+    def __init__(self, parent=None, key=None):
+        if parent is not None:
+            super().__init__(parent, key)
+        else:
+            self._cache = weakref.WeakKeyDictionary()
+
+    def __get__(self, emitter, emitter_type):
+        if emitter is None:
+            return self
+        instance = self._cache.get(emitter)
+        if instance is None:
+            # Same type, different instance, more data. Doing this unusual construct
+            # means people can subclass just this one class to have their own 'on'.
+            instance = self._cache[emitter] = type(self)(emitter)
+        return instance
+
+    @classmethod
+    def define_event(cls, event_kind, event_type):
+        """Define an event on this type at runtime.
+
+        cls: a type to define an event on.
+
+        event_kind: an attribute name that will be used to access the
+                    event. Must be a valid python identifier, not be a keyword
+                    or an existing attribute.
+
+        event_type: a type of the event to define.
+
+        """
+        prefix = 'unable to define an event with event_kind that '
+        if not event_kind.isidentifier():
+            raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind)
+        elif keyword.iskeyword(event_kind):
+            raise RuntimeError(prefix + 'is a python keyword: ' + event_kind)
+        try:
+            getattr(cls, event_kind)
+            raise RuntimeError(
+                prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind))
+        except AttributeError:
+            pass
+
+        event_descriptor = EventSource(event_type)
+        event_descriptor._set_name(cls, event_kind)
+        setattr(cls, event_kind, event_descriptor)
+
+    def events(self):
+        """Return a mapping of event_kinds to bound_events for all available events.
+        """
+        events_map = {}
+        # We have to iterate over the class rather than instance to allow for properties which
+        # might call this method (e.g., event views), leading to infinite recursion.
+        for attr_name, attr_value in inspect.getmembers(type(self)):
+            if isinstance(attr_value, EventSource):
+                # We actually care about the bound_event, however, since it
+                # provides the most info for users of this method.
+                event_kind = attr_name
+                bound_event = getattr(self, event_kind)
+                events_map[event_kind] = bound_event
+        return events_map
+
+    def __getitem__(self, key):
+        return PrefixedEvents(self, key)
+
+
+class PrefixedEvents:
+
+    def __init__(self, emitter, key):
+        self._emitter = emitter
+        self._prefix = key.replace("-", "_") + '_'
+
+    def __getattr__(self, name):
+        return getattr(self._emitter, self._prefix + name)
+
+
+class PreCommitEvent(EventBase):
+    pass
+
+
+class CommitEvent(EventBase):
+    pass
+
+
+class FrameworkEvents(ObjectEvents):
+    pre_commit = EventSource(PreCommitEvent)
+    commit = EventSource(CommitEvent)
+
+
+class NoSnapshotError(Exception):
+
+    def __init__(self, handle_path):
+        self.handle_path = handle_path
+
+    def __str__(self):
+        return 'no snapshot data found for {} object'.format(self.handle_path)
+
+
+class NoTypeError(Exception):
+
+    def __init__(self, handle_path):
+        self.handle_path = handle_path
+
+    def __str__(self):
+        return "cannot restore {} since no class was registered for it".format(self.handle_path)
+
+
+class SQLiteStorage:
+
+    DB_LOCK_TIMEOUT = timedelta(hours=1)
+
+    def __init__(self, filename):
+        # The isolation_level argument is set to None such that the implicit
+        # transaction management behavior of the sqlite3 module is disabled.
+        self._db = sqlite3.connect(str(filename),
+                                   isolation_level=None,
+                                   timeout=self.DB_LOCK_TIMEOUT.total_seconds())
+        self._setup()
+
+    def _setup(self):
+        # Make sure that the database is locked until the connection is closed,
+        # not until the transaction ends.
+        self._db.execute("PRAGMA locking_mode=EXCLUSIVE")
+        c = self._db.execute("BEGIN")
+        c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'")
+        if c.fetchone()[0] == 0:
+            # Keep in mind what might happen if the process dies somewhere below.
+            # The system must not be rendered permanently broken by that.
+            self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)")
+            self._db.execute('''
+                CREATE TABLE notice (
+                  sequence INTEGER PRIMARY KEY AUTOINCREMENT,
+                  event_path TEXT,
+                  observer_path TEXT,
+                  method_name TEXT)
+                ''')
+            self._db.commit()
+
+    def close(self):
+        self._db.close()
+
+    def commit(self):
+        self._db.commit()
+
+    # There's commit but no rollback. For abort to be supported, we'll need logic that
+    # can rollback decisions made by third-party code in terms of the internal state
+    # of objects that have been snapshotted, and hooks to let them know about it and
+    # take the needed actions to undo their logic until the last snapshot.
+    # This is doable but will increase significantly the chances for mistakes.
+
+    def save_snapshot(self, handle_path, snapshot_data):
+        self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, snapshot_data))
+
+    def load_snapshot(self, handle_path):
+        c = self._db.cursor()
+        c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,))
+        row = c.fetchone()
+        if row:
+            return row[0]
+        return None
+
+    def drop_snapshot(self, handle_path):
+        self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,))
+
+    def save_notice(self, event_path, observer_path, method_name):
+        self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)',
+                         (event_path, observer_path, method_name))
+
+    def drop_notice(self, event_path, observer_path, method_name):
+        self._db.execute('''
+            DELETE FROM notice
+             WHERE event_path=?
+               AND observer_path=?
+               AND method_name=?
+            ''', (event_path, observer_path, method_name))
+
+    def notices(self, event_path):
+        if event_path:
+            c = self._db.execute('''
+                SELECT event_path, observer_path, method_name
+                  FROM notice
+                 WHERE event_path=?
+                 ORDER BY sequence
+                ''', (event_path,))
+        else:
+            c = self._db.execute('''
+                SELECT event_path, observer_path, method_name
+                  FROM notice
+                 ORDER BY sequence
+                ''')
+        while True:
+            rows = c.fetchmany()
+            if not rows:
+                break
+            for row in rows:
+                yield tuple(row)
+
+
+# the message to show to the user when a pdb breakpoint goes active
+_BREAKPOINT_WELCOME_MESSAGE = """
+Starting pdb to debug charm operator.
+Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort.
+Future breakpoints may interrupt execution again.
+More details at https://discourse.jujucharms.com/t/debugging-charm-hooks
+
+"""
+
+
+class Framework(Object):
+
+    on = FrameworkEvents()
+
+    # Override properties from Object so that we can set them in __init__.
+    model = None
+    meta = None
+    charm_dir = None
+
+    def __init__(self, data_path, charm_dir, meta, model):
+
+        super().__init__(self, None)
+
+        self._data_path = data_path
+        self.charm_dir = charm_dir
+        self.meta = meta
+        self.model = model
+        self._observers = []      # [(observer_path, method_name, parent_path, event_key)]
+        self._observer = weakref.WeakValueDictionary()       # {observer_path: observer}
+        self._objects = weakref.WeakValueDictionary()
+        self._type_registry = {}  # {(parent_path, kind): cls}
+        self._type_known = set()  # {cls}
+
+        self._storage = SQLiteStorage(data_path)
+
+        # We can't use the higher-level StoredState because it relies on events.
+        self.register_type(StoredStateData, None, StoredStateData.handle_kind)
+        stored_handle = Handle(None, StoredStateData.handle_kind, '_stored')
+        try:
+            self._stored = self.load_snapshot(stored_handle)
+        except NoSnapshotError:
+            self._stored = StoredStateData(self, '_stored')
+            self._stored['event_count'] = 0
+
+        # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do
+        # breakpoint(); if Python < 3.7, this doesn't affect anything
+        sys.breakpointhook = self.breakpoint
+
+        # Flag to indicate that we already presented the welcome message in a debugger breakpoint
+        self._breakpoint_welcomed = False
+
+        # Parse once the env var, which may be used multiple times later
+        debug_at = os.environ.get('JUJU_DEBUG_AT')
+        self._juju_debug_at = debug_at.split(',') if debug_at else ()
+
+    def close(self):
+        self._storage.close()
+
+    def _track(self, obj):
+        """Track object and ensure it is the only object created using its handle path."""
+        if obj is self:
+            # Framework objects don't track themselves
+            return
+        if obj.handle.path in self.framework._objects:
+            raise RuntimeError(
+                'two objects claiming to be {} have been created'.format(obj.handle.path))
+        self._objects[obj.handle.path] = obj
+
+    def _forget(self, obj):
+        """Stop tracking the given object. See also _track."""
+        self._objects.pop(obj.handle.path, None)
+
+    def commit(self):
+        # Give a chance for objects to persist data they want to before a commit is made.
+        self.on.pre_commit.emit()
+        # Make sure snapshots are saved by instances of StoredStateData. Any possible state
+        # modifications in on_commit handlers of instances of other classes will not be persisted.
+        self.on.commit.emit()
+        # Save our event count after all events have been emitted.
+        self.save_snapshot(self._stored)
+        self._storage.commit()
+
+    def register_type(self, cls, parent, kind=None):
+        if parent and not isinstance(parent, Handle):
+            parent = parent.handle
+        if parent:
+            parent_path = parent.path
+        else:
+            parent_path = None
+        if not kind:
+            kind = cls.handle_kind
+        self._type_registry[(parent_path, kind)] = cls
+        self._type_known.add(cls)
+
+    def save_snapshot(self, value):
+        """Save a persistent snapshot of the provided value.
+
+        The provided value must implement the following interface:
+
+        value.handle = Handle(...)
+        value.snapshot() => {...}  # Simple builtin types only.
+        value.restore(snapshot)    # Restore custom state from prior snapshot.
+        """
+        if type(value) not in self._type_known:
+            raise RuntimeError(
+                'cannot save {} values before registering that type'.format(type(value).__name__))
+        data = value.snapshot()
+
+        # Use marshal as a validator, enforcing the use of simple types, as we later the
+        # information is really pickled, which is too error prone for future evolution of the
+        # stored data (e.g. if the developer stores a custom object and later changes its
+        # class name; when unpickling the original class will not be there and event
+        # data loading will fail).
+        try:
+            marshal.dumps(data)
+        except ValueError:
+            msg = "unable to save the data for {}, it must contain only simple types: {!r}"
+            raise ValueError(msg.format(value.__class__.__name__, data))
+
+        # Use pickle for serialization, so the value remains portable.
+        raw_data = pickle.dumps(data)
+        self._storage.save_snapshot(value.handle.path, raw_data)
+
+    def load_snapshot(self, handle):
+        parent_path = None
+        if handle.parent:
+            parent_path = handle.parent.path
+        cls = self._type_registry.get((parent_path, handle.kind))
+        if not cls:
+            raise NoTypeError(handle.path)
+        raw_data = self._storage.load_snapshot(handle.path)
+        if not raw_data:
+            raise NoSnapshotError(handle.path)
+        data = pickle.loads(raw_data)
+        obj = cls.__new__(cls)
+        obj.framework = self
+        obj.handle = handle
+        obj.restore(data)
+        self._track(obj)
+        return obj
+
+    def drop_snapshot(self, handle):
+        self._storage.drop_snapshot(handle.path)
+
+    def observe(self, bound_event, observer):
+        """Register observer to be called when bound_event is emitted.
+
+        The bound_event is generally provided as an attribute of the object that emits
+        the event, and is created in this style:
+
+            class SomeObject:
+                something_happened = Event(SomethingHappened)
+
+        That event may be observed as:
+
+            framework.observe(someobj.something_happened, self.on_something_happened)
+
+        If the method to be called follows the name convention "on_<event name>", it
+        may be omitted from the observe call. That means the above is equivalent to:
+
+            framework.observe(someobj.something_happened, self)
+
+        """
+        if not isinstance(bound_event, BoundEvent):
+            raise RuntimeError(
+                'Framework.observe requires a BoundEvent as second parameter, got {}'.format(
+                    bound_event))
+
+        event_type = bound_event.event_type
+        event_kind = bound_event.event_kind
+        emitter = bound_event.emitter
+
+        self.register_type(event_type, emitter, event_kind)
+
+        if hasattr(emitter, "handle"):
+            emitter_path = emitter.handle.path
+        else:
+            raise RuntimeError(
+                'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__))
+
+        method_name = None
+        if isinstance(observer, types.MethodType):
+            method_name = observer.__name__
+            observer = observer.__self__
+        else:
+            method_name = "on_" + event_kind
+            if not hasattr(observer, method_name):
+                raise RuntimeError(
+                    'Observer method not provided explicitly'
+                    ' and {} type has no "{}" method'.format(type(observer).__name__,
+                                                             method_name))
+
+        # Validate that the method has an acceptable call signature.
+        sig = inspect.signature(getattr(observer, method_name))
+        # Self isn't included in the params list, so the first arg will be the event.
+        extra_params = list(sig.parameters.values())[1:]
+        if not sig.parameters:
+            raise TypeError(
+                '{}.{} must accept event parameter'.format(type(observer).__name__, method_name))
+        elif any(param.default is inspect.Parameter.empty for param in extra_params):
+            # Allow for additional optional params, since there's no reason to exclude them, but
+            # required params will break.
+            raise TypeError(
+                '{}.{} has extra required parameter'.format(type(observer).__name__, method_name))
+
+        # TODO Prevent the exact same parameters from being registered more than once.
+
+        self._observer[observer.handle.path] = observer
+        self._observers.append((observer.handle.path, method_name, emitter_path, event_kind))
+
+    def _next_event_key(self):
+        """Return the next event key that should be used, incrementing the internal counter."""
+        # Increment the count first; this means the keys will start at 1, and 0
+        # means no events have been emitted.
+        self._stored['event_count'] += 1
+        return str(self._stored['event_count'])
+
+    def _emit(self, event):
+        """See BoundEvent.emit for the public way to call this."""
+
+        # Save the event for all known observers before the first notification
+        # takes place, so that either everyone interested sees it, or nobody does.
+        self.save_snapshot(event)
+        event_path = event.handle.path
+        event_kind = event.handle.kind
+        parent_path = event.handle.parent.path
+        # TODO Track observers by (parent_path, event_kind) rather than as a list of
+        # all observers. Avoiding linear search through all observers for every event
+        for observer_path, method_name, _parent_path, _event_kind in self._observers:
+            if _parent_path != parent_path:
+                continue
+            if _event_kind and _event_kind != event_kind:
+                continue
+            # Again, only commit this after all notices are saved.
+            self._storage.save_notice(event_path, observer_path, method_name)
+        self._reemit(event_path)
+
+    def reemit(self):
+        """Reemit previously deferred events to the observers that deferred them.
+
+        Only the specific observers that have previously deferred the event will be
+        notified again. Observers that asked to be notified about events after it's
+        been first emitted won't be notified, as that would mean potentially observing
+        events out of order.
+        """
+        self._reemit()
+
+    def _reemit(self, single_event_path=None):
+        last_event_path = None
+        deferred = True
+        for event_path, observer_path, method_name in self._storage.notices(single_event_path):
+            event_handle = Handle.from_path(event_path)
+
+            if last_event_path != event_path:
+                if not deferred:
+                    self._storage.drop_snapshot(last_event_path)
+                last_event_path = event_path
+                deferred = False
+
+            try:
+                event = self.load_snapshot(event_handle)
+            except NoTypeError:
+                self._storage.drop_notice(event_path, observer_path, method_name)
+                continue
+
+            event.deferred = False
+            observer = self._observer.get(observer_path)
+            if observer:
+                custom_handler = getattr(observer, method_name, None)
+                if custom_handler:
+                    event_is_from_juju = isinstance(event, charm.HookEvent)
+                    event_is_action = isinstance(event, charm.ActionEvent)
+                    if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at:
+                        # Present the welcome message and run under PDB.
+                        self._show_debug_code_message()
+                        pdb.runcall(custom_handler, event)
+                    else:
+                        # Regular call to the registered method.
+                        custom_handler(event)
+
+            if event.deferred:
+                deferred = True
+            else:
+                self._storage.drop_notice(event_path, observer_path, method_name)
+            # We intentionally consider this event to be dead and reload it from
+            # scratch in the next path.
+            self.framework._forget(event)
+
+        if not deferred:
+            self._storage.drop_snapshot(last_event_path)
+
+    def _show_debug_code_message(self):
+        """Present the welcome message (only once!) when using debugger functionality."""
+        if not self._breakpoint_welcomed:
+            self._breakpoint_welcomed = True
+            print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='')
+
+    def breakpoint(self, name=None):
+        """Add breakpoint, optionally named, at the place where this method is called.
+
+        For the breakpoint to be activated the JUJU_DEBUG_AT environment variable
+        must be set to "all" or to the specific name parameter provided, if any. In every
+        other situation calling this method does nothing.
+
+        The framework also provides a standard breakpoint named "hook", that will
+        stop execution when a hook event is about to be handled.
+
+        For those reasons, the "all" and "hook" breakpoint names are reserved.
+        """
+        # If given, validate the name comply with all the rules
+        if name is not None:
+            if not isinstance(name, str):
+                raise TypeError('breakpoint names must be strings')
+            if name in ('hook', 'all'):
+                raise ValueError('breakpoint names "all" and "hook" are reserved')
+            if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name):
+                raise ValueError('breakpoint names must look like "foo" or "foo-bar"')
+
+        indicated_breakpoints = self._juju_debug_at
+        if 'all' in indicated_breakpoints or name in indicated_breakpoints:
+            self._show_debug_code_message()
+
+            # If we call set_trace() directly it will open the debugger *here*, so indicating
+            # it to use our caller's frame
+            code_frame = inspect.currentframe().f_back
+            pdb.Pdb().set_trace(code_frame)
+
+
+class StoredStateData(Object):
+
+    def __init__(self, parent, attr_name):
+        super().__init__(parent, attr_name)
+        self._cache = {}
+        self.dirty = False
+
+    def __getitem__(self, key):
+        return self._cache.get(key)
+
+    def __setitem__(self, key, value):
+        self._cache[key] = value
+        self.dirty = True
+
+    def __contains__(self, key):
+        return key in self._cache
+
+    def snapshot(self):
+        return self._cache
+
+    def restore(self, snapshot):
+        self._cache = snapshot
+        self.dirty = False
+
+    def on_commit(self, event):
+        if self.dirty:
+            self.framework.save_snapshot(self)
+            self.dirty = False
+
+
+class BoundStoredState:
+
+    def __init__(self, parent, attr_name):
+        parent.framework.register_type(StoredStateData, parent)
+
+        handle = Handle(parent, StoredStateData.handle_kind, attr_name)
+        try:
+            data = parent.framework.load_snapshot(handle)
+        except NoSnapshotError:
+            data = StoredStateData(parent, attr_name)
+
+        # __dict__ is used to avoid infinite recursion.
+        self.__dict__["_data"] = data
+        self.__dict__["_attr_name"] = attr_name
+
+        parent.framework.observe(parent.framework.on.commit, self._data)
+
+    def __getattr__(self, key):
+        # "on" is the only reserved key that can't be used in the data map.
+        if key == "on":
+            return self._data.on
+        if key not in self._data:
+            raise AttributeError("attribute '{}' is not stored".format(key))
+        return _wrap_stored(self._data, self._data[key])
+
+    def __setattr__(self, key, value):
+        if key == "on":
+            raise AttributeError("attribute 'on' is reserved and cannot be set")
+
+        value = _unwrap_stored(self._data, value)
+
+        if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)):
+            raise AttributeError(
+                'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format(
+                    key, type(value).__name__))
+
+        self._data[key] = _unwrap_stored(self._data, value)
+
+    def set_default(self, **kwargs):
+        """"Set the value of any given key if it has not already been set"""
+        for k, v in kwargs.items():
+            if k not in self._data:
+                self._data[k] = v
+
+
+class StoredState:
+    """A class used to store data the charm needs persisted across invocations.
+
+    Example::
+
+        class MyClass(Object):
+            _stored = StoredState()
+
+    Instances of `MyClass` can transparently save state between invocations by
+    setting attributes on `_stored`. Initial state should be set with
+    `set_default` on the bound object, that is::
+
+        class MyClass(Object):
+            _stored = StoredState()
+
+        def __init__(self, parent, key):
+            super().__init__(parent, key)
+            self._stored.set_default(seen=set())
+            self.framework.observe(self.on.seen, self._on_seen)
+
+        def _on_seen(self, event):
+            self._stored.seen.add(event.uuid)
+
+    """
+
+    def __init__(self):
+        self.parent_type = None
+        self.attr_name = None
+
+    def __get__(self, parent, parent_type=None):
+        if self.parent_type is not None and self.parent_type not in parent_type.mro():
+            # the StoredState instance is being shared between two unrelated classes
+            # -> unclear what is exepcted of us -> bail out
+            raise RuntimeError(
+                'StoredState shared by {} and {}'.format(
+                    self.parent_type.__name__, parent_type.__name__))
+
+        if parent is None:
+            # accessing via the class directly (e.g. MyClass.stored)
+            return self
+
+        bound = None
+        if self.attr_name is not None:
+            bound = parent.__dict__.get(self.attr_name)
+            if bound is not None:
+                # we already have the thing from a previous pass, huzzah
+                return bound
+
+        # need to find ourselves amongst the parent's bases
+        for cls in parent_type.mro():
+            for attr_name, attr_value in cls.__dict__.items():
+                if attr_value is not self:
+                    continue
+                # we've found ourselves! is it the first time?
+                if bound is not None:
+                    # the StoredState instance is being stored in two different
+                    # attributes -> unclear what is expected of us -> bail out
+                    raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format(
+                        cls.__name__, self.attr_name, attr_name))
+                # we've found ourselves for the first time; save where, and bind the object
+                self.attr_name = attr_name
+                self.parent_type = cls
+                bound = BoundStoredState(parent, attr_name)
+
+        if bound is not None:
+            # cache the bound object to avoid the expensive lookup the next time
+            # (don't use setattr, to keep things symmetric with the fast-path lookup above)
+            parent.__dict__[self.attr_name] = bound
+            return bound
+
+        raise AttributeError(
+            'cannot find {} attribute in type {}'.format(
+                self.__class__.__name__, parent_type.__name__))
+
+
+def _wrap_stored(parent_data, value):
+    t = type(value)
+    if t is dict:
+        return StoredDict(parent_data, value)
+    if t is list:
+        return StoredList(parent_data, value)
+    if t is set:
+        return StoredSet(parent_data, value)
+    return value
+
+
+def _unwrap_stored(parent_data, value):
+    t = type(value)
+    if t is StoredDict or t is StoredList or t is StoredSet:
+        return value._under
+    return value
+
+
+class StoredDict(collections.abc.MutableMapping):
+
+    def __init__(self, stored_data, under):
+        self._stored_data = stored_data
+        self._under = under
+
+    def __getitem__(self, key):
+        return _wrap_stored(self._stored_data, self._under[key])
+
+    def __setitem__(self, key, value):
+        self._under[key] = _unwrap_stored(self._stored_data, value)
+        self._stored_data.dirty = True
+
+    def __delitem__(self, key):
+        del self._under[key]
+        self._stored_data.dirty = True
+
+    def __iter__(self):
+        return self._under.__iter__()
+
+    def __len__(self):
+        return len(self._under)
+
+    def __eq__(self, other):
+        if isinstance(other, StoredDict):
+            return self._under == other._under
+        elif isinstance(other, collections.abc.Mapping):
+            return self._under == other
+        else:
+            return NotImplemented
+
+
+class StoredList(collections.abc.MutableSequence):
+
+    def __init__(self, stored_data, under):
+        self._stored_data = stored_data
+        self._under = under
+
+    def __getitem__(self, index):
+        return _wrap_stored(self._stored_data, self._under[index])
+
+    def __setitem__(self, index, value):
+        self._under[index] = _unwrap_stored(self._stored_data, value)
+        self._stored_data.dirty = True
+
+    def __delitem__(self, index):
+        del self._under[index]
+        self._stored_data.dirty = True
+
+    def __len__(self):
+        return len(self._under)
+
+    def insert(self, index, value):
+        self._under.insert(index, value)
+        self._stored_data.dirty = True
+
+    def append(self, value):
+        self._under.append(value)
+        self._stored_data.dirty = True
+
+    def __eq__(self, other):
+        if isinstance(other, StoredList):
+            return self._under == other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under == other
+        else:
+            return NotImplemented
+
+    def __lt__(self, other):
+        if isinstance(other, StoredList):
+            return self._under < other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under < other
+        else:
+            return NotImplemented
+
+    def __le__(self, other):
+        if isinstance(other, StoredList):
+            return self._under <= other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under <= other
+        else:
+            return NotImplemented
+
+    def __gt__(self, other):
+        if isinstance(other, StoredList):
+            return self._under > other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under > other
+        else:
+            return NotImplemented
+
+    def __ge__(self, other):
+        if isinstance(other, StoredList):
+            return self._under >= other._under
+        elif isinstance(other, collections.abc.Sequence):
+            return self._under >= other
+        else:
+            return NotImplemented
+
+
+class StoredSet(collections.abc.MutableSet):
+
+    def __init__(self, stored_data, under):
+        self._stored_data = stored_data
+        self._under = under
+
+    def add(self, key):
+        self._under.add(key)
+        self._stored_data.dirty = True
+
+    def discard(self, key):
+        self._under.discard(key)
+        self._stored_data.dirty = True
+
+    def __contains__(self, key):
+        return key in self._under
+
+    def __iter__(self):
+        return self._under.__iter__()
+
+    def __len__(self):
+        return len(self._under)
+
+    @classmethod
+    def _from_iterable(cls, it):
+        """Construct an instance of the class from any iterable input.
+
+        Per https://docs.python.org/3/library/collections.abc.html
+        if the Set mixin is being used in a class with a different constructor signature,
+        you will need to override _from_iterable() with a classmethod that can construct
+        new instances from an iterable argument.
+        """
+        return set(it)
+
+    def __le__(self, other):
+        if isinstance(other, StoredSet):
+            return self._under <= other._under
+        elif isinstance(other, collections.abc.Set):
+            return self._under <= other
+        else:
+            return NotImplemented
+
+    def __ge__(self, other):
+        if isinstance(other, StoredSet):
+            return self._under >= other._under
+        elif isinstance(other, collections.abc.Set):
+            return self._under >= other
+        else:
+            return NotImplemented
+
+    def __eq__(self, other):
+        if isinstance(other, StoredSet):
+            return self._under == other._under
+        elif isinstance(other, collections.abc.Set):
+            return self._under == other
+        else:
+            return NotImplemented
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/jujuversion.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/jujuversion.py
new file mode 100755
index 0000000000000000000000000000000000000000..4517886218c143f8c0249ac7285dc594976f9b01
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/jujuversion.py
@@ -0,0 +1,85 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import re
+from functools import total_ordering
+
+
+@total_ordering
+class JujuVersion:
+
+    PATTERN = r'''^
+    (?P<major>\d{1,9})\.(?P<minor>\d{1,9})       # <major> and <minor> numbers are always there
+    ((?:\.|-(?P<tag>[a-z]+))(?P<patch>\d{1,9}))? # sometimes with .<patch> or -<tag><patch>
+    (\.(?P<build>\d{1,9}))?$                     # and sometimes with a <build> number.
+    '''
+
+    def __init__(self, version):
+        m = re.match(self.PATTERN, version, re.VERBOSE)
+        if not m:
+            raise RuntimeError('"{}" is not a valid Juju version string'.format(version))
+
+        d = m.groupdict()
+        self.major = int(m.group('major'))
+        self.minor = int(m.group('minor'))
+        self.tag = d['tag'] or ''
+        self.patch = int(d['patch'] or 0)
+        self.build = int(d['build'] or 0)
+
+    def __repr__(self):
+        if self.tag:
+            s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch)
+        else:
+            s = '{}.{}.{}'.format(self.major, self.minor, self.patch)
+        if self.build > 0:
+            s += '.{}'.format(self.build)
+        return s
+
+    def __eq__(self, other):
+        if self is other:
+            return True
+        if isinstance(other, str):
+            other = type(self)(other)
+        elif not isinstance(other, JujuVersion):
+            raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+        return (
+            self.major == other.major
+            and self.minor == other.minor
+            and self.tag == other.tag
+            and self.build == other.build
+            and self.patch == other.patch)
+
+    def __lt__(self, other):
+        if self is other:
+            return False
+        if isinstance(other, str):
+            other = type(self)(other)
+        elif not isinstance(other, JujuVersion):
+            raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other))
+
+        if self.major != other.major:
+            return self.major < other.major
+        elif self.minor != other.minor:
+            return self.minor < other.minor
+        elif self.tag != other.tag:
+            if not self.tag:
+                return False
+            elif not other.tag:
+                return True
+            return self.tag < other.tag
+        elif self.patch != other.patch:
+            return self.patch < other.patch
+        elif self.build != other.build:
+            return self.build < other.build
+        return False
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/log.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/log.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3f76a375a98e23c718e47bcde5c33b49f4031c7
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/log.py
@@ -0,0 +1,47 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+
+
+class JujuLogHandler(logging.Handler):
+    """A handler for sending logs to Juju via juju-log."""
+
+    def __init__(self, model_backend, level=logging.DEBUG):
+        super().__init__(level)
+        self.model_backend = model_backend
+
+    def emit(self, record):
+        self.model_backend.juju_log(record.levelname, self.format(record))
+
+
+def setup_root_logging(model_backend, debug=False):
+    """Setup python logging to forward messages to juju-log.
+
+    By default, logging is set to DEBUG level, and messages will be filtered by Juju.
+    Charmers can also set their own default log level with::
+
+      logging.getLogger().setLevel(logging.INFO)
+
+    model_backend -- a ModelBackend to use for juju-log
+    debug -- if True, write logs to stderr as well as to juju-log.
+    """
+    logger = logging.getLogger()
+    logger.setLevel(logging.DEBUG)
+    logger.addHandler(JujuLogHandler(model_backend))
+    if debug:
+        handler = logging.StreamHandler()
+        formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
+        handler.setFormatter(formatter)
+        logger.addHandler(handler)
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/main.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/main.py
new file mode 100755
index 0000000000000000000000000000000000000000..0f5391d76e45ba32dc652adfc99b2c7716d8af36
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/main.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python3
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+import sys
+from pathlib import Path
+
+import yaml
+
+import ops.charm
+import ops.framework
+import ops.model
+import logging
+
+from ops.log import setup_root_logging
+
+CHARM_STATE_FILE = '.unit-state.db'
+
+
+logger = logging.getLogger()
+
+
+def _get_charm_dir():
+    charm_dir = os.environ.get("JUJU_CHARM_DIR")
+    if charm_dir is None:
+        # Assume $JUJU_CHARM_DIR/lib/op/main.py structure.
+        charm_dir = Path('{}/../../..'.format(__file__)).resolve()
+    else:
+        charm_dir = Path(charm_dir).resolve()
+    return charm_dir
+
+
+def _load_metadata(charm_dir):
+    metadata = yaml.safe_load((charm_dir / 'metadata.yaml').read_text())
+
+    actions_meta = charm_dir / 'actions.yaml'
+    if actions_meta.exists():
+        actions_metadata = yaml.safe_load(actions_meta.read_text())
+    else:
+        actions_metadata = {}
+    return metadata, actions_metadata
+
+
+def _create_event_link(charm, bound_event):
+    """Create a symlink for a particular event.
+
+    charm -- A charm object.
+    bound_event -- An event for which to create a symlink.
+    """
+    if issubclass(bound_event.event_type, ops.charm.HookEvent):
+        event_dir = charm.framework.charm_dir / 'hooks'
+        event_path = event_dir / bound_event.event_kind.replace('_', '-')
+    elif issubclass(bound_event.event_type, ops.charm.ActionEvent):
+        if not bound_event.event_kind.endswith("_action"):
+            raise RuntimeError(
+                'action event name {} needs _action suffix'.format(bound_event.event_kind))
+        event_dir = charm.framework.charm_dir / 'actions'
+        # The event_kind is suffixed with "_action" while the executable is not.
+        event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-')
+    else:
+        raise RuntimeError(
+            'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type))
+
+    event_dir.mkdir(exist_ok=True)
+    if not event_path.exists():
+        # CPython has different implementations for populating sys.argv[0] for Linux and Windows.
+        # For Windows it is always an absolute path (any symlinks are resolved)
+        # while for Linux it can be a relative path.
+        target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir))
+
+        # Ignore the non-symlink files or directories
+        # assuming the charm author knows what they are doing.
+        logger.debug(
+            'Creating a new relative symlink at %s pointing to %s',
+            event_path, target_path)
+        event_path.symlink_to(target_path)
+
+
+def _setup_event_links(charm_dir, charm):
+    """Set up links for supported events that originate from Juju.
+
+    Whether a charm can handle an event or not can be determined by
+    introspecting which events are defined on it.
+
+    Hooks or actions are created as symlinks to the charm code file
+    which is determined by inspecting symlinks provided by the charm
+    author at hooks/install or hooks/start.
+
+    charm_dir -- A root directory of the charm.
+    charm -- An instance of the Charm class.
+
+    """
+    for bound_event in charm.on.events().values():
+        # Only events that originate from Juju need symlinks.
+        if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)):
+            _create_event_link(charm, bound_event)
+
+
+def _emit_charm_event(charm, event_name):
+    """Emits a charm event based on a Juju event name.
+
+    charm -- A charm instance to emit an event from.
+    event_name -- A Juju event name to emit on a charm.
+    """
+    event_to_emit = None
+    try:
+        event_to_emit = getattr(charm.on, event_name)
+    except AttributeError:
+        logger.debug("event %s not defined for %s", event_name, charm)
+
+    # If the event is not supported by the charm implementation, do
+    # not error out or try to emit it. This is to support rollbacks.
+    if event_to_emit is not None:
+        args, kwargs = _get_event_args(charm, event_to_emit)
+        logger.debug('Emitting Juju event %s', event_name)
+        event_to_emit.emit(*args, **kwargs)
+
+
+def _get_event_args(charm, bound_event):
+    event_type = bound_event.event_type
+    model = charm.framework.model
+
+    if issubclass(event_type, ops.charm.RelationEvent):
+        relation_name = os.environ['JUJU_RELATION']
+        relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1])
+        relation = model.get_relation(relation_name, relation_id)
+    else:
+        relation = None
+
+    remote_app_name = os.environ.get('JUJU_REMOTE_APP', '')
+    remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '')
+    if remote_app_name or remote_unit_name:
+        if not remote_app_name:
+            if '/' not in remote_unit_name:
+                raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name))
+            remote_app_name = remote_unit_name.split('/')[0]
+        args = [relation, model.get_app(remote_app_name)]
+        if remote_unit_name:
+            args.append(model.get_unit(remote_unit_name))
+        return args, {}
+    elif relation:
+        return [relation], {}
+    return [], {}
+
+
+def main(charm_class):
+    """Setup the charm and dispatch the observed event.
+
+    The event name is based on the way this executable was called (argv[0]).
+    """
+    charm_dir = _get_charm_dir()
+
+    model_backend = ops.model.ModelBackend()
+    debug = ('JUJU_DEBUG' in os.environ)
+    setup_root_logging(model_backend, debug=debug)
+
+    # Process the Juju event relevant to the current hook execution
+    # JUJU_HOOK_NAME, JUJU_FUNCTION_NAME, and JUJU_ACTION_NAME are not used
+    # in order to support simulation of events from debugging sessions.
+    #
+    # TODO: For Windows, when symlinks are used, this is not a valid
+    #       method of getting an event name (see LP: #1854505).
+    juju_exec_path = Path(sys.argv[0])
+    has_dispatch = juju_exec_path.name == 'dispatch'
+    if has_dispatch:
+        # The executable was 'dispatch', which means the actual hook we want to
+        # run needs to be looked up in the JUJU_DISPATCH_PATH env var, where it
+        # should be a path relative to the charm directory (the directory that
+        # holds `dispatch`). If that path actually exists, we want to run that
+        # before continuing.
+        dispatch_path = juju_exec_path.parent / Path(os.environ['JUJU_DISPATCH_PATH'])
+        if dispatch_path.exists() and dispatch_path.resolve() != juju_exec_path.resolve():
+            argv = sys.argv.copy()
+            argv[0] = str(dispatch_path)
+            try:
+                subprocess.run(argv, check=True)
+            except subprocess.CalledProcessError as e:
+                logger.warning("hook %s exited with status %d", dispatch_path, e.returncode)
+                sys.exit(e.returncode)
+        juju_exec_path = dispatch_path
+    juju_event_name = juju_exec_path.name.replace('-', '_')
+    if juju_exec_path.parent.name == 'actions':
+        juju_event_name = '{}_action'.format(juju_event_name)
+
+    metadata, actions_metadata = _load_metadata(charm_dir)
+    meta = ops.charm.CharmMeta(metadata, actions_metadata)
+    unit_name = os.environ['JUJU_UNIT_NAME']
+    model = ops.model.Model(unit_name, meta, model_backend)
+
+    # TODO: If Juju unit agent crashes after exit(0) from the charm code
+    # the framework will commit the snapshot but Juju will not commit its
+    # operation.
+    charm_state_path = charm_dir / CHARM_STATE_FILE
+    framework = ops.framework.Framework(charm_state_path, charm_dir, meta, model)
+    try:
+        charm = charm_class(framework, None)
+
+        if not has_dispatch:
+            # When a charm is force-upgraded and a unit is in an error state Juju
+            # does not run upgrade-charm and instead runs the failed hook followed
+            # by config-changed. Given the nature of force-upgrading the hook setup
+            # code is not triggered on config-changed.
+            #
+            # 'start' event is included as Juju does not fire the install event for
+            # K8s charms (see LP: #1854635).
+            if (juju_event_name in ('install', 'start', 'upgrade_charm')
+                    or juju_event_name.endswith('_storage_attached')):
+                _setup_event_links(charm_dir, charm)
+
+        # TODO: Remove the collect_metrics check below as soon as the relevant
+        #       Juju changes are made.
+        #
+        # Skip reemission of deferred events for collect-metrics events because
+        # they do not have the full access to all hook tools.
+        if juju_event_name != 'collect_metrics':
+            framework.reemit()
+
+        _emit_charm_event(charm, juju_event_name)
+
+        framework.commit()
+    finally:
+        framework.close()
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/model.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d19ba8bff166aa0e3c0acc7dd4fe4b42690918c
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/model.py
@@ -0,0 +1,915 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import weakref
+import os
+import shutil
+import tempfile
+import time
+import datetime
+import re
+import ipaddress
+import decimal
+
+from abc import ABC, abstractmethod
+from collections.abc import Mapping, MutableMapping
+from pathlib import Path
+from subprocess import run, PIPE, CalledProcessError
+
+
+class Model:
+
+    def __init__(self, unit_name, meta, backend):
+        self._cache = _ModelCache(backend)
+        self._backend = backend
+        self.unit = self.get_unit(unit_name)
+        self.app = self.unit.app
+        self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache)
+        self.config = ConfigData(self._backend)
+        self.resources = Resources(list(meta.resources), self._backend)
+        self.pod = Pod(self._backend)
+        self.storages = StorageMapping(list(meta.storages), self._backend)
+        self._bindings = BindingMapping(self._backend)
+
+    def get_unit(self, unit_name):
+        return self._cache.get(Unit, unit_name)
+
+    def get_app(self, app_name):
+        return self._cache.get(Application, app_name)
+
+    def get_relation(self, relation_name, relation_id=None):
+        """Get a specific Relation instance.
+
+        If relation_id is given, this will return that Relation instance.
+
+        If relation_id is not given, this will return the Relation instance if the
+        relation is established only once or None if it is not established. If this
+        same relation is established multiple times the error TooManyRelatedAppsError is raised.
+        """
+        return self.relations._get_unique(relation_name, relation_id)
+
+    def get_binding(self, binding_key):
+        """Get a network space binding.
+
+        binding_key -- The relation name or instance to obtain bindings for.
+
+        If binding_key is a relation name, the method returns the default binding for that
+        relation. If a relation instance is provided, the method first looks up a more specific
+        binding for that specific relation ID, and if none is found falls back to the default
+        binding for the relation name.
+        """
+        return self._bindings.get(binding_key)
+
+
+class _ModelCache:
+
+    def __init__(self, backend):
+        self._backend = backend
+        self._weakrefs = weakref.WeakValueDictionary()
+
+    def get(self, entity_type, *args):
+        key = (entity_type,) + args
+        entity = self._weakrefs.get(key)
+        if entity is None:
+            entity = entity_type(*args, backend=self._backend, cache=self)
+            self._weakrefs[key] = entity
+        return entity
+
+
+class Application:
+
+    def __init__(self, name, backend, cache):
+        self.name = name
+        self._backend = backend
+        self._cache = cache
+        self._is_our_app = self.name == self._backend.app_name
+        self._status = None
+
+    @property
+    def status(self):
+        if not self._is_our_app:
+            return UnknownStatus()
+
+        if not self._backend.is_leader():
+            raise RuntimeError('cannot get application status as a non-leader unit')
+
+        if self._status:
+            return self._status
+
+        s = self._backend.status_get(is_app=True)
+        self._status = StatusBase.from_name(s['status'], s['message'])
+        return self._status
+
+    @status.setter
+    def status(self, value):
+        if not isinstance(value, StatusBase):
+            raise InvalidStatusError(
+                'invalid value provided for application {} status: {}'.format(self, value)
+            )
+
+        if not self._is_our_app:
+            raise RuntimeError('cannot to set status for a remote application {}'.format(self))
+
+        if not self._backend.is_leader():
+            raise RuntimeError('cannot set application status as a non-leader unit')
+
+        self._backend.status_set(value.name, value.message, is_app=True)
+        self._status = value
+
+    def __repr__(self):
+        return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+
+class Unit:
+
+    def __init__(self, name, backend, cache):
+        self.name = name
+
+        app_name = name.split('/')[0]
+        self.app = cache.get(Application, app_name)
+
+        self._backend = backend
+        self._cache = cache
+        self._is_our_unit = self.name == self._backend.unit_name
+        self._status = None
+
+    @property
+    def status(self):
+        if not self._is_our_unit:
+            return UnknownStatus()
+
+        if self._status:
+            return self._status
+
+        s = self._backend.status_get(is_app=False)
+        self._status = StatusBase.from_name(s['status'], s['message'])
+        return self._status
+
+    @status.setter
+    def status(self, value):
+        if not isinstance(value, StatusBase):
+            raise InvalidStatusError(
+                'invalid value provided for unit {} status: {}'.format(self, value)
+            )
+
+        if not self._is_our_unit:
+            raise RuntimeError('cannot set status for a remote unit {}'.format(self))
+
+        self._backend.status_set(value.name, value.message, is_app=False)
+        self._status = value
+
+    def __repr__(self):
+        return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name)
+
+    def is_leader(self):
+        if self._is_our_unit:
+            # This value is not cached as it is not guaranteed to persist for the whole duration
+            # of a hook execution.
+            return self._backend.is_leader()
+        else:
+            raise RuntimeError(
+                'cannot determine leadership status for remote applications: {}'.format(self)
+            )
+
+    def set_workload_version(self, version):
+        """Record the version of the software running as the workload.
+
+        This shouldn't be confused with the revision of the charm. This is informative only;
+        shown in the output of 'juju status'.
+        """
+        if not isinstance(version, str):
+            raise TypeError("workload version must be a str, not {}: {!r}".format(
+                type(version).__name__, version))
+        self._backend.application_version_set(version)
+
+
+class LazyMapping(Mapping, ABC):
+
+    _lazy_data = None
+
+    @abstractmethod
+    def _load(self):
+        raise NotImplementedError()
+
+    @property
+    def _data(self):
+        data = self._lazy_data
+        if data is None:
+            data = self._lazy_data = self._load()
+        return data
+
+    def _invalidate(self):
+        self._lazy_data = None
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, key):
+        return self._data[key]
+
+
+class RelationMapping(Mapping):
+    """Map of relation names to lists of Relation instances."""
+
+    def __init__(self, relations_meta, our_unit, backend, cache):
+        self._peers = set()
+        for name, relation_meta in relations_meta.items():
+            if relation_meta.role == 'peers':
+                self._peers.add(name)
+        self._our_unit = our_unit
+        self._backend = backend
+        self._cache = cache
+        self._data = {relation_name: None for relation_name in relations_meta}
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, relation_name):
+        is_peer = relation_name in self._peers
+        relation_list = self._data[relation_name]
+        if relation_list is None:
+            relation_list = self._data[relation_name] = []
+            for rid in self._backend.relation_ids(relation_name):
+                relation = Relation(relation_name, rid, is_peer,
+                                    self._our_unit, self._backend, self._cache)
+                relation_list.append(relation)
+        return relation_list
+
+    def _invalidate(self, relation_name):
+        self._data[relation_name] = None
+
+    def _get_unique(self, relation_name, relation_id=None):
+        if relation_id is not None:
+            if not isinstance(relation_id, int):
+                raise ModelError('relation id {} must be int or None not {}'.format(
+                    relation_id,
+                    type(relation_id).__name__))
+            for relation in self[relation_name]:
+                if relation.id == relation_id:
+                    return relation
+            else:
+                # The relation may be dead, but it is not forgotten.
+                is_peer = relation_name in self._peers
+                return Relation(relation_name, relation_id, is_peer,
+                                self._our_unit, self._backend, self._cache)
+        num_related = len(self[relation_name])
+        if num_related == 0:
+            return None
+        elif num_related == 1:
+            return self[relation_name][0]
+        else:
+            # TODO: We need something in the framework to catch and gracefully handle
+            # errors, ideally integrating the error catching with Juju's mechanisms.
+            raise TooManyRelatedAppsError(relation_name, num_related, 1)
+
+
+class BindingMapping:
+
+    def __init__(self, backend):
+        self._backend = backend
+        self._data = {}
+
+    def get(self, binding_key):
+        if isinstance(binding_key, Relation):
+            binding_name = binding_key.name
+            relation_id = binding_key.id
+        elif isinstance(binding_key, str):
+            binding_name = binding_key
+            relation_id = None
+        else:
+            raise ModelError('binding key must be str or relation instance, not {}'
+                             ''.format(type(binding_key).__name__))
+        binding = self._data.get(binding_key)
+        if binding is None:
+            binding = Binding(binding_name, relation_id, self._backend)
+            self._data[binding_key] = binding
+        return binding
+
+
+class Binding:
+    """Binding to a network space."""
+
+    def __init__(self, name, relation_id, backend):
+        self.name = name
+        self._relation_id = relation_id
+        self._backend = backend
+        self._network = None
+
+    @property
+    def network(self):
+        if self._network is None:
+            try:
+                self._network = Network(self._backend.network_get(self.name, self._relation_id))
+            except RelationNotFoundError:
+                if self._relation_id is None:
+                    raise
+                # If a relation is dead, we can still get network info associated with an
+                # endpoint itself
+                self._network = Network(self._backend.network_get(self.name))
+        return self._network
+
+
+class Network:
+    """Network space details."""
+
+    def __init__(self, network_info):
+        self.interfaces = []
+        # Treat multiple addresses on an interface as multiple logical
+        # interfaces with the same name.
+        for interface_info in network_info['bind-addresses']:
+            interface_name = interface_info['interface-name']
+            for address_info in interface_info['addresses']:
+                self.interfaces.append(NetworkInterface(interface_name, address_info))
+        self.ingress_addresses = []
+        for address in network_info['ingress-addresses']:
+            self.ingress_addresses.append(ipaddress.ip_address(address))
+        self.egress_subnets = []
+        for subnet in network_info['egress-subnets']:
+            self.egress_subnets.append(ipaddress.ip_network(subnet))
+
+    @property
+    def bind_address(self):
+        return self.interfaces[0].address
+
+    @property
+    def ingress_address(self):
+        return self.ingress_addresses[0]
+
+
+class NetworkInterface:
+
+    def __init__(self, name, address_info):
+        self.name = name
+        # TODO: expose a hardware address here, see LP: #1864070.
+        self.address = ipaddress.ip_address(address_info['value'])
+        cidr = address_info['cidr']
+        if not cidr:
+            # The cidr field may be empty, see LP: #1864102.
+            # In this case, make it a /32 or /128 IP network.
+            self.subnet = ipaddress.ip_network(address_info['value'])
+        else:
+            self.subnet = ipaddress.ip_network(cidr)
+        # TODO: expose a hostname/canonical name for the address here, see LP: #1864086.
+
+
+class Relation:
+    def __init__(self, relation_name, relation_id, is_peer, our_unit, backend, cache):
+        self.name = relation_name
+        self.id = relation_id
+        self.app = None
+        self.units = set()
+
+        # For peer relations, both the remote and the local app are the same.
+        if is_peer:
+            self.app = our_unit.app
+        try:
+            for unit_name in backend.relation_list(self.id):
+                unit = cache.get(Unit, unit_name)
+                self.units.add(unit)
+                if self.app is None:
+                    self.app = unit.app
+        except RelationNotFoundError:
+            # If the relation is dead, just treat it as if it has no remote units.
+            pass
+        self.data = RelationData(self, our_unit, backend)
+
+    def __repr__(self):
+        return '<{}.{} {}:{}>'.format(type(self).__module__,
+                                      type(self).__name__,
+                                      self.name,
+                                      self.id)
+
+
+class RelationData(Mapping):
+    def __init__(self, relation, our_unit, backend):
+        self.relation = weakref.proxy(relation)
+        self._data = {
+            our_unit: RelationDataContent(self.relation, our_unit, backend),
+            our_unit.app: RelationDataContent(self.relation, our_unit.app, backend),
+        }
+        self._data.update({
+            unit: RelationDataContent(self.relation, unit, backend)
+            for unit in self.relation.units})
+        # The relation might be dead so avoid a None key here.
+        if self.relation.app is not None:
+            self._data.update({
+                self.relation.app: RelationDataContent(self.relation, self.relation.app, backend),
+            })
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, key):
+        return self._data[key]
+
+
+# We mix in MutableMapping here to get some convenience implementations, but whether it's actually
+# mutable or not is controlled by the flag.
+class RelationDataContent(LazyMapping, MutableMapping):
+
+    def __init__(self, relation, entity, backend):
+        self.relation = relation
+        self._entity = entity
+        self._backend = backend
+        self._is_app = isinstance(entity, Application)
+
+    def _load(self):
+        try:
+            return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app)
+        except RelationNotFoundError:
+            # Dead relations tell no tales (and have no data).
+            return {}
+
+    def _is_mutable(self):
+        if self._is_app:
+            is_our_app = self._backend.app_name == self._entity.name
+            if not is_our_app:
+                return False
+            # Whether the application data bag is mutable or not depends on
+            # whether this unit is a leader or not, but this is not guaranteed
+            # to be always true during the same hook execution.
+            return self._backend.is_leader()
+        else:
+            is_our_unit = self._backend.unit_name == self._entity.name
+            if is_our_unit:
+                return True
+        return False
+
+    def __setitem__(self, key, value):
+        if not self._is_mutable():
+            raise RelationDataError('cannot set relation data for {}'.format(self._entity.name))
+        if not isinstance(value, str):
+            raise RelationDataError('relation data values must be strings')
+
+        self._backend.relation_set(self.relation.id, key, value, self._is_app)
+
+        # Don't load data unnecessarily if we're only updating.
+        if self._lazy_data is not None:
+            if value == '':
+                # Match the behavior of Juju, which is that setting the value to an
+                # empty string will remove the key entirely from the relation data.
+                del self._data[key]
+            else:
+                self._data[key] = value
+
+    def __delitem__(self, key):
+        # Match the behavior of Juju, which is that setting the value to an empty
+        # string will remove the key entirely from the relation data.
+        self.__setitem__(key, '')
+
+
+class ConfigData(LazyMapping):
+
+    def __init__(self, backend):
+        self._backend = backend
+
+    def _load(self):
+        return self._backend.config_get()
+
+
+class StatusBase:
+    """Status values specific to applications and units."""
+
+    _statuses = {}
+
+    def __init__(self, message):
+        self.message = message
+
+    def __new__(cls, *args, **kwargs):
+        if cls is StatusBase:
+            raise TypeError("cannot instantiate a base class")
+        cls._statuses[cls.name] = cls
+        return super().__new__(cls)
+
+    @classmethod
+    def from_name(cls, name, message):
+        return cls._statuses[name](message)
+
+
+class ActiveStatus(StatusBase):
+    """The unit is ready.
+
+    The unit believes it is correctly offering all the services it has been asked to offer.
+    """
+    name = 'active'
+
+    def __init__(self, message=None):
+        super().__init__(message or '')
+
+
+class BlockedStatus(StatusBase):
+    """The unit requires manual intervention.
+
+    An operator has to manually intervene to unblock the unit and let it proceed.
+    """
+    name = 'blocked'
+
+
+class MaintenanceStatus(StatusBase):
+    """The unit is performing maintenance tasks.
+
+    The unit is not yet providing services, but is actively doing work in preparation
+    for providing those services.  This is a "spinning" state, not an error state. It
+    reflects activity on the unit itself, not on peers or related units.
+
+    """
+    name = 'maintenance'
+
+
+class UnknownStatus(StatusBase):
+    """The unit status is unknown.
+
+    A unit-agent has finished calling install, config-changed and start, but the
+    charm has not called status-set yet.
+
+    """
+    name = 'unknown'
+
+    def __init__(self):
+        # Unknown status cannot be set and does not have a message associated with it.
+        super().__init__('')
+
+
+class WaitingStatus(StatusBase):
+    """A unit is unable to progress.
+
+    The unit is unable to progress to an active state because an application to which
+    it is related is not running.
+
+    """
+    name = 'waiting'
+
+
+class Resources:
+    """Object representing resources for the charm.
+    """
+
+    def __init__(self, names, backend):
+        self._backend = backend
+        self._paths = {name: None for name in names}
+
+    def fetch(self, name):
+        """Fetch the resource from the controller or store.
+
+        If successfully fetched, this returns a Path object to where the resource is stored
+        on disk, otherwise it raises a ModelError.
+        """
+        if name not in self._paths:
+            raise RuntimeError('invalid resource name: {}'.format(name))
+        if self._paths[name] is None:
+            self._paths[name] = Path(self._backend.resource_get(name))
+        return self._paths[name]
+
+
+class Pod:
+    def __init__(self, backend):
+        self._backend = backend
+
+    def set_spec(self, spec, k8s_resources=None):
+        if not self._backend.is_leader():
+            raise ModelError('cannot set a pod spec as this unit is not a leader')
+        self._backend.pod_spec_set(spec, k8s_resources)
+
+
+class StorageMapping(Mapping):
+    """Map of storage names to lists of Storage instances."""
+
+    def __init__(self, storage_names, backend):
+        self._backend = backend
+        self._storage_map = {storage_name: None for storage_name in storage_names}
+
+    def __contains__(self, key):
+        return key in self._storage_map
+
+    def __len__(self):
+        return len(self._storage_map)
+
+    def __iter__(self):
+        return iter(self._storage_map)
+
+    def __getitem__(self, storage_name):
+        storage_list = self._storage_map[storage_name]
+        if storage_list is None:
+            storage_list = self._storage_map[storage_name] = []
+            for storage_id in self._backend.storage_list(storage_name):
+                storage_list.append(Storage(storage_name, storage_id, self._backend))
+        return storage_list
+
+    def request(self, storage_name, count=1):
+        """Requests new storage instances of a given name.
+
+        Uses storage-add tool to request additional storage. Juju will notify the unit
+        via <storage-name>-storage-attached events when it becomes available.
+        """
+        if storage_name not in self._storage_map:
+            raise ModelError(('cannot add storage {!r}:'
+                              ' it is not present in the charm metadata').format(storage_name))
+        self._backend.storage_add(storage_name, count)
+
+
+class Storage:
+
+    def __init__(self, storage_name, storage_id, backend):
+        self.name = storage_name
+        self.id = storage_id
+        self._backend = backend
+        self._location = None
+
+    @property
+    def location(self):
+        if self._location is None:
+            raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location")
+            self._location = Path(raw)
+        return self._location
+
+
+class ModelError(Exception):
+    pass
+
+
+class TooManyRelatedAppsError(ModelError):
+    def __init__(self, relation_name, num_related, max_supported):
+        super().__init__('Too many remote applications on {} ({} > {})'.format(
+            relation_name, num_related, max_supported))
+        self.relation_name = relation_name
+        self.num_related = num_related
+        self.max_supported = max_supported
+
+
+class RelationDataError(ModelError):
+    pass
+
+
+class RelationNotFoundError(ModelError):
+    pass
+
+
+class InvalidStatusError(ModelError):
+    pass
+
+
+class ModelBackend:
+
+    LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30)
+
+    def __init__(self):
+        self.unit_name = os.environ['JUJU_UNIT_NAME']
+        self.app_name = self.unit_name.split('/')[0]
+
+        self._is_leader = None
+        self._leader_check_time = None
+
+    def _run(self, *args, return_output=False, use_json=False):
+        kwargs = dict(stdout=PIPE, stderr=PIPE)
+        if use_json:
+            args += ('--format=json',)
+        try:
+            result = run(args, check=True, **kwargs)
+        except CalledProcessError as e:
+            raise ModelError(e.stderr)
+        if return_output:
+            if result.stdout is None:
+                return ''
+            else:
+                text = result.stdout.decode('utf8')
+                if use_json:
+                    return json.loads(text)
+                else:
+                    return text
+
+    def relation_ids(self, relation_name):
+        relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True)
+        return [int(relation_id.split(':')[-1]) for relation_id in relation_ids]
+
+    def relation_list(self, relation_id):
+        try:
+            return self._run('relation-list', '-r', str(relation_id),
+                             return_output=True, use_json=True)
+        except ModelError as e:
+            if 'relation not found' in str(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def relation_get(self, relation_id, member_name, is_app):
+        if not isinstance(is_app, bool):
+            raise TypeError('is_app parameter to relation_get must be a boolean')
+
+        try:
+            return self._run('relation-get', '-r', str(relation_id),
+                             '-', member_name, '--app={}'.format(is_app),
+                             return_output=True, use_json=True)
+        except ModelError as e:
+            if 'relation not found' in str(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def relation_set(self, relation_id, key, value, is_app):
+        if not isinstance(is_app, bool):
+            raise TypeError('is_app parameter to relation_set must be a boolean')
+
+        try:
+            return self._run('relation-set', '-r', str(relation_id),
+                             '{}={}'.format(key, value), '--app={}'.format(is_app))
+        except ModelError as e:
+            if 'relation not found' in str(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def config_get(self):
+        return self._run('config-get', return_output=True, use_json=True)
+
+    def is_leader(self):
+        """Obtain the current leadership status for the unit the charm code is executing on.
+
+        The value is cached for the duration of a lease which is 30s in Juju.
+        """
+        now = time.monotonic()
+        if self._leader_check_time is None:
+            check = True
+        else:
+            time_since_check = datetime.timedelta(seconds=now - self._leader_check_time)
+            check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None)
+        if check:
+            # Current time MUST be saved before running is-leader to ensure the cache
+            # is only used inside the window that is-leader itself asserts.
+            self._leader_check_time = now
+            self._is_leader = self._run('is-leader', return_output=True, use_json=True)
+
+        return self._is_leader
+
+    def resource_get(self, resource_name):
+        return self._run('resource-get', resource_name, return_output=True).strip()
+
+    def pod_spec_set(self, spec, k8s_resources):
+        tmpdir = Path(tempfile.mkdtemp('-pod-spec-set'))
+        try:
+            spec_path = tmpdir / 'spec.json'
+            spec_path.write_text(json.dumps(spec))
+            args = ['--file', str(spec_path)]
+            if k8s_resources:
+                k8s_res_path = tmpdir / 'k8s-resources.json'
+                k8s_res_path.write_text(json.dumps(k8s_resources))
+                args.extend(['--k8s-resources', str(k8s_res_path)])
+            self._run('pod-spec-set', *args)
+        finally:
+            shutil.rmtree(str(tmpdir))
+
+    def status_get(self, *, is_app=False):
+        """Get a status of a unit or an application.
+
+        app -- A boolean indicating whether the status should be retrieved for a unit
+               or an application.
+        """
+        return self._run('status-get', '--include-data', '--application={}'.format(is_app))
+
+    def status_set(self, status, message='', *, is_app=False):
+        """Set a status of a unit or an application.
+
+        app -- A boolean indicating whether the status should be set for a unit or an
+               application.
+        """
+        if not isinstance(is_app, bool):
+            raise TypeError('is_app parameter must be boolean')
+        return self._run('status-set', '--application={}'.format(is_app), status, message)
+
+    def storage_list(self, name):
+        return [int(s.split('/')[1]) for s in self._run('storage-list', name,
+                                                        return_output=True, use_json=True)]
+
+    def storage_get(self, storage_name_id, attribute):
+        return self._run('storage-get', '-s', storage_name_id, attribute,
+                         return_output=True, use_json=True)
+
+    def storage_add(self, name, count=1):
+        if not isinstance(count, int) or isinstance(count, bool):
+            raise TypeError('storage count must be integer, got: {} ({})'.format(count,
+                                                                                 type(count)))
+        self._run('storage-add', '{}={}'.format(name, count))
+
+    def action_get(self):
+        return self._run('action-get', return_output=True, use_json=True)
+
+    def action_set(self, results):
+        self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()])
+
+    def action_log(self, message):
+        self._run('action-log', message)
+
+    def action_fail(self, message=''):
+        self._run('action-fail', message)
+
+    def application_version_set(self, version):
+        self._run('application-version-set', '--', version)
+
+    def juju_log(self, level, message):
+        self._run('juju-log', '--log-level', level, message)
+
+    def network_get(self, binding_name, relation_id=None):
+        """Return network info provided by network-get for a given binding.
+
+        binding_name -- A name of a binding (relation name or extra-binding name).
+        relation_id -- An optional relation id to get network info for.
+        """
+        cmd = ['network-get', binding_name]
+        if relation_id is not None:
+            cmd.extend(['-r', str(relation_id)])
+        try:
+            return self._run(*cmd, return_output=True, use_json=True)
+        except ModelError as e:
+            if 'relation not found' in str(e):
+                raise RelationNotFoundError() from e
+            raise
+
+    def add_metrics(self, metrics, labels=None):
+        cmd = ['add-metric']
+
+        if labels:
+            label_args = []
+            for k, v in labels.items():
+                _ModelBackendValidator.validate_metric_label(k)
+                _ModelBackendValidator.validate_label_value(k, v)
+                label_args.append('{}={}'.format(k, v))
+            cmd.extend(['--labels', ','.join(label_args)])
+
+        metric_args = []
+        for k, v in metrics.items():
+            _ModelBackendValidator.validate_metric_key(k)
+            metric_value = _ModelBackendValidator.format_metric_value(v)
+            metric_args.append('{}={}'.format(k, metric_value))
+        cmd.extend(metric_args)
+        self._run(*cmd)
+
+
+class _ModelBackendValidator:
+    """Provides facilities for validating inputs and formatting them for model backends."""
+
+    METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$')
+
+    @classmethod
+    def validate_metric_key(cls, key):
+        if cls.METRIC_KEY_REGEX.match(key) is None:
+            raise ModelError(
+                'invalid metric key {!r}: must match {}'.format(
+                    key, cls.METRIC_KEY_REGEX.pattern))
+
+    @classmethod
+    def validate_metric_label(cls, label_name):
+        if cls.METRIC_KEY_REGEX.match(label_name) is None:
+            raise ModelError(
+                'invalid metric label name {!r}: must match {}'.format(
+                    label_name, cls.METRIC_KEY_REGEX.pattern))
+
+    @classmethod
+    def format_metric_value(cls, value):
+        try:
+            decimal_value = decimal.Decimal.from_float(value)
+        except TypeError as e:
+            e2 = ModelError('invalid metric value {!r} provided:'
+                            ' must be a positive finite float'.format(value))
+            raise e2 from e
+        if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0:
+            raise ModelError('invalid metric value {!r} provided:'
+                             ' must be a positive finite float'.format(value))
+        return str(decimal_value)
+
+    @classmethod
+    def validate_label_value(cls, label, value):
+        # Label values cannot be empty, contain commas or equal signs as those are
+        # used by add-metric as separators.
+        if not value:
+            raise ModelError(
+                'metric label {} has an empty value, which is not allowed'.format(label))
+        v = str(value)
+        if re.search('[,=]', v) is not None:
+            raise ModelError(
+                'metric label values must not contain "," or "=": {}={!r}'.format(label, value))
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/testing.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..72d840c4b10a2fbbdd53594c0752c63da3da14dd
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/ops/testing.py
@@ -0,0 +1,477 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+import pathlib
+from textwrap import dedent
+import typing
+
+from ops import charm, framework, model
+
+
+# OptionalYAML is something like metadata.yaml or actions.yaml. You can
+# pass in a file-like object or the string directly.
+OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]]
+
+
+# noinspection PyProtectedMember
+class Harness:
+    """This class represents a way to build up the model that will drive a test suite.
+
+    The model that is created is from the viewpoint of the charm that you are testing.
+
+    Example::
+
+        harness = Harness(MyCharm)
+        # Do initial setup here
+        relation_id = harness.add_relation('db', 'postgresql')
+        # Now instantiate the charm to see events as the model changes
+        harness.begin()
+        harness.add_relation_unit(relation_id, 'postgresql/0')
+        harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'})
+        # Check that charm has properly handled the relation_joined event for postgresql/0
+        self.assertEqual(harness.charm. ...)
+
+    Args:
+        charm_cls: The Charm class that you'll be testing.
+        meta: charm.CharmBase is a A string or file-like object containing the contents of
+            metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the
+            parent directory of the Charm, and if not found fall back to a trivial
+            'name: test-charm' metadata.
+        actions: A string or file-like object containing the contents of
+            actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the
+            parent directory of the Charm.
+    """
+
+    def __init__(
+            self,
+            charm_cls: typing.Type[charm.CharmBase],
+            *,
+            meta: OptionalYAML = None,
+            actions: OptionalYAML = None):
+        # TODO: jam 2020-03-05 We probably want to take config as a parameter as well, since
+        #       it would define the default values of config that the charm would see.
+        self._charm_cls = charm_cls
+        self._charm = None
+        self._charm_dir = 'no-disk-path'  # this may be updated by _create_meta
+        self._meta = self._create_meta(meta, actions)
+        self._unit_name = self._meta.name + '/0'
+        self._framework = None
+        self._hooks_enabled = True
+        self._relation_id_counter = 0
+        self._backend = _TestingModelBackend(self._unit_name, self._meta)
+        self._model = model.Model(self._unit_name, self._meta, self._backend)
+        self._framework = framework.Framework(":memory:", self._charm_dir, self._meta, self._model)
+
+    @property
+    def charm(self) -> charm.CharmBase:
+        """Return the instance of the charm class that was passed to __init__.
+
+        Note that the Charm is not instantiated until you have called
+        :meth:`.begin()`.
+        """
+        return self._charm
+
+    @property
+    def model(self) -> model.Model:
+        """Return the :class:`~ops.model.Model` that is being driven by this Harness."""
+        return self._model
+
+    @property
+    def framework(self) -> framework.Framework:
+        """Return the Framework that is being driven by this Harness."""
+        return self._framework
+
+    def begin(self) -> None:
+        """Instantiate the Charm and start handling events.
+
+        Before calling begin(), there is no Charm instance, so changes to the Model won't emit
+        events. You must call begin before :attr:`.charm` is valid.
+        """
+        if self._charm is not None:
+            raise RuntimeError('cannot call the begin method on the harness more than once')
+
+        # The Framework adds attributes to class objects for events, etc. As such, we can't re-use
+        # the original class against multiple Frameworks. So create a locally defined class
+        # and register it.
+        # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of
+        #       Class attributes which should clean up this ugliness. The API can stay the same
+        class TestEvents(self._charm_cls.on.__class__):
+            pass
+
+        TestEvents.__name__ = self._charm_cls.on.__class__.__name__
+
+        class TestCharm(self._charm_cls):
+            on = TestEvents()
+
+        # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo,
+        # rather than TestCharm has no attribute foo.
+        TestCharm.__name__ = self._charm_cls.__name__
+        self._charm = TestCharm(self._framework, self._framework.meta.name)
+
+    def _create_meta(self, charm_metadata, action_metadata):
+        """Create a CharmMeta object.
+
+        Handle the cases where a user doesn't supply explicit metadata snippets.
+        """
+        filename = inspect.getfile(self._charm_cls)
+        charm_dir = pathlib.Path(filename).parents[1]
+
+        if charm_metadata is None:
+            metadata_path = charm_dir / 'metadata.yaml'
+            if metadata_path.is_file():
+                charm_metadata = metadata_path.read_text()
+                self._charm_dir = charm_dir
+            else:
+                # The simplest of metadata that the framework can support
+                charm_metadata = 'name: test-charm'
+        elif isinstance(charm_metadata, str):
+            charm_metadata = dedent(charm_metadata)
+
+        if action_metadata is None:
+            actions_path = charm_dir / 'actions.yaml'
+            if actions_path.is_file():
+                action_metadata = actions_path.read_text()
+                self._charm_dir = charm_dir
+        elif isinstance(action_metadata, str):
+            action_metadata = dedent(action_metadata)
+
+        return charm.CharmMeta.from_yaml(charm_metadata, action_metadata)
+
+    def disable_hooks(self) -> None:
+        """Stop emitting hook events when the model changes.
+
+        This can be used by developers to stop changes to the model from emitting events that
+        the charm will react to. Call :meth:`.enable_hooks`
+        to re-enable them.
+        """
+        self._hooks_enabled = False
+
+    def enable_hooks(self) -> None:
+        """Re-enable hook events from charm.on when the model is changed.
+
+        By default hook events are enabled once you call :meth:`.begin`,
+        but if you have used :meth:`.disable_hooks`, this can be used to
+        enable them again.
+        """
+        self._hooks_enabled = True
+
+    def _next_relation_id(self):
+        rel_id = self._relation_id_counter
+        self._relation_id_counter += 1
+        return rel_id
+
+    def add_relation(self, relation_name: str, remote_app: str) -> int:
+        """Declare that there is a new relation between this app and `remote_app`.
+
+        Args:
+            relation_name: The relation on Charm that is being related to
+            remote_app: The name of the application that is being related to
+
+        Return:
+            The relation_id created by this add_relation.
+        """
+        rel_id = self._next_relation_id()
+        self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id)
+        self._backend._relation_names[rel_id] = relation_name
+        self._backend._relation_list_map[rel_id] = []
+        self._backend._relation_data[rel_id] = {
+            remote_app: {},
+            self._backend.unit_name: {},
+            self._backend.app_name: {},
+        }
+        # Reload the relation_ids list
+        if self._model is not None:
+            self._model.relations._invalidate(relation_name)
+        if self._charm is None or not self._hooks_enabled:
+            return rel_id
+        relation = self._model.get_relation(relation_name, rel_id)
+        app = self._model.get_app(remote_app)
+        self._charm.on[relation_name].relation_created.emit(
+            relation, app)
+        return rel_id
+
+    def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None:
+        """Add a new unit to a relation.
+
+        Example::
+
+          rel_id = harness.add_relation('db', 'postgresql')
+          harness.add_relation_unit(rel_id, 'postgresql/0')
+
+        This will trigger a `relation_joined` event and a `relation_changed` event.
+
+        Args:
+            relation_id: The integer relation identifier (as returned by add_relation).
+            remote_unit_name: A string representing the remote unit that is being added.
+        Return:
+            None
+        """
+        self._backend._relation_list_map[relation_id].append(remote_unit_name)
+        self._backend._relation_data[relation_id][remote_unit_name] = {}
+        relation_name = self._backend._relation_names[relation_id]
+        # Make sure that the Model reloads the relation_list for this relation_id, as well as
+        # reloading the relation data for this unit.
+        if self._model is not None:
+            self._model.relations._invalidate(relation_name)
+            remote_unit = self._model.get_unit(remote_unit_name)
+            relation = self._model.get_relation(relation_name, relation_id)
+            relation.data[remote_unit]._invalidate()
+        if self._charm is None or not self._hooks_enabled:
+            return
+        self._charm.on[relation_name].relation_joined.emit(
+            relation, remote_unit.app, remote_unit)
+
+    def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping:
+        """Get the relation data bucket for a single app or unit in a given relation.
+
+        This ignores all of the safety checks of who can and can't see data in relations (eg,
+        non-leaders can't read their own application's relation data because there are no events
+        that keep that data up-to-date for the unit).
+
+        Args:
+            relation_id: The relation whose content we want to look at.
+            app_or_unit: The name of the application or unit whose data we want to read
+        Return:
+            a dict containing the relation data for `app_or_unit` or None.
+        Raises:
+            KeyError: if relation_id doesn't exist
+        """
+        return self._backend._relation_data[relation_id].get(app_or_unit, None)
+
+    def get_workload_version(self) -> str:
+        """Read the workload version that was set by the unit."""
+        return self._backend._workload_version
+
+    def update_relation_data(
+            self,
+            relation_id: int,
+            app_or_unit: str,
+            key_values: typing.Mapping,
+    ) -> None:
+        """Update the relation data for a given unit or application in a given relation.
+
+        This also triggers the `relation_changed` event for this relation_id.
+
+        Args:
+            relation_id: The integer relation_id representing this relation.
+            app_or_unit: The unit or application name that is being updated.
+                This can be the local or remote application.
+            key_values: Each key/value will be updated in the relation data.
+        """
+        relation_name = self._backend._relation_names[relation_id]
+        relation = self._model.get_relation(relation_name, relation_id)
+        if '/' in app_or_unit:
+            entity = self._model.get_unit(app_or_unit)
+        else:
+            entity = self._model.get_app(app_or_unit)
+        rel_data = relation.data.get(entity, None)
+        if rel_data is not None:
+            # rel_data may have cached now-stale data, so _invalidate() it.
+            # Note, this won't cause the data to be loaded if it wasn't already.
+            rel_data._invalidate()
+
+        new_values = self._backend._relation_data[relation_id][app_or_unit].copy()
+        for k, v in key_values.items():
+            if v == '':
+                new_values.pop(k, None)
+            else:
+                new_values[k] = v
+        self._backend._relation_data[relation_id][app_or_unit] = new_values
+
+        if app_or_unit == self._model.unit.name:
+            # No events for our own unit
+            return
+        if app_or_unit == self._model.app.name:
+            # updating our own app only generates an event if it is a peer relation and we
+            # aren't the leader
+            is_peer = self._meta.relations[relation_name].role == 'peers'
+            if not is_peer:
+                return
+            if self._model.unit.is_leader():
+                return
+        self._emit_relation_changed(relation_id, app_or_unit)
+
+    def _emit_relation_changed(self, relation_id, app_or_unit):
+        if self._charm is None or not self._hooks_enabled:
+            return
+        rel_name = self._backend._relation_names[relation_id]
+        relation = self.model.get_relation(rel_name, relation_id)
+        if '/' in app_or_unit:
+            app_name = app_or_unit.split('/')[0]
+            unit_name = app_or_unit
+            app = self.model.get_app(app_name)
+            unit = self.model.get_unit(unit_name)
+            args = (relation, app, unit)
+        else:
+            app_name = app_or_unit
+            app = self.model.get_app(app_name)
+            args = (relation, app)
+        self._charm.on[rel_name].relation_changed.emit(*args)
+
+    def update_config(
+            self,
+            key_values: typing.Mapping[str, str] = None,
+            unset: typing.Iterable[str] = (),
+    ) -> None:
+        """Update the config as seen by the charm.
+
+        This will trigger a `config_changed` event.
+
+        Args:
+            key_values: A Mapping of key:value pairs to update in config.
+            unset: An iterable of keys to remove from Config. (Note that this does
+                not currently reset the config values to the default defined in config.yaml.)
+        """
+        config = self._backend._config
+        if key_values is not None:
+            for key, value in key_values.items():
+                config[key] = value
+        for key in unset:
+            config.pop(key, None)
+        # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config
+        # is a LazyMapping, but its _load returns a dict and this method mutates
+        # the dict that Config is caching. Arguably we should be doing some sort
+        # of charm.framework.model.config._invalidate()
+        if self._charm is None or not self._hooks_enabled:
+            return
+        self._charm.on.config_changed.emit()
+
+    def set_leader(self, is_leader: bool = True) -> None:
+        """Set whether this unit is the leader or not.
+
+        If this charm becomes a leader then `leader_elected` will be triggered.
+
+        Args:
+            is_leader: True/False as to whether this unit is the leader.
+        """
+        was_leader = self._backend._is_leader
+        self._backend._is_leader = is_leader
+        # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in
+        # the Model objects, so this automatically gets noticed.
+        if is_leader and not was_leader and self._charm is not None and self._hooks_enabled:
+            self._charm.on.leader_elected.emit()
+
+
+class _TestingModelBackend:
+    """This conforms to the interface for ModelBackend but provides canned data.
+
+    DO NOT use this class directly, it is used by `Harness`_ to drive the model.
+    `Harness`_ is responsible for maintaining the internal consistency of the values here,
+    as the only public methods of this type are for implementing ModelBackend.
+    """
+
+    def __init__(self, unit_name, meta):
+        self.unit_name = unit_name
+        self.app_name = self.unit_name.split('/')[0]
+        self._calls = []
+        self._meta = meta
+        self._is_leader = None
+        self._relation_ids_map = {}  # relation name to [relation_ids,...]
+        self._relation_names = {}  # reverse map from relation_id to relation_name
+        self._relation_list_map = {}  # relation_id: [unit_name,...]
+        self._relation_data = {}  # {relation_id: {name: data}}
+        self._config = {}
+        self._is_leader = False
+        self._resources_map = {}
+        self._pod_spec = None
+        self._app_status = None
+        self._unit_status = None
+        self._workload_version = None
+
+    def relation_ids(self, relation_name):
+        try:
+            return self._relation_ids_map[relation_name]
+        except KeyError as e:
+            if relation_name not in self._meta.relations:
+                raise model.ModelError('{} is not a known relation'.format(relation_name)) from e
+            return []
+
+    def relation_list(self, relation_id):
+        try:
+            return self._relation_list_map[relation_id]
+        except KeyError as e:
+            raise model.RelationNotFoundError from e
+
+    def relation_get(self, relation_id, member_name, is_app):
+        if is_app and '/' in member_name:
+            member_name = member_name.split('/')[0]
+        if relation_id not in self._relation_data:
+            raise model.RelationNotFoundError()
+        return self._relation_data[relation_id][member_name].copy()
+
+    def relation_set(self, relation_id, key, value, is_app):
+        relation = self._relation_data[relation_id]
+        if is_app:
+            bucket_key = self.app_name
+        else:
+            bucket_key = self.unit_name
+        if bucket_key not in relation:
+            relation[bucket_key] = {}
+        bucket = relation[bucket_key]
+        if value == '':
+            bucket.pop(key, None)
+        else:
+            bucket[key] = value
+
+    def config_get(self):
+        return self._config
+
+    def is_leader(self):
+        return self._is_leader
+
+    def application_version_set(self, version):
+        self._workload_version = version
+
+    def resource_get(self, resource_name):
+        return self._resources_map[resource_name]
+
+    def pod_spec_set(self, spec, k8s_resources):
+        self._pod_spec = (spec, k8s_resources)
+
+    def status_get(self, *, is_app=False):
+        if is_app:
+            return self._app_status
+        else:
+            return self._unit_status
+
+    def status_set(self, status, message='', *, is_app=False):
+        if is_app:
+            self._app_status = (status, message)
+        else:
+            self._unit_status = (status, message)
+
+    def storage_list(self, name):
+        raise NotImplementedError(self.storage_list)
+
+    def storage_get(self, storage_name_id, attribute):
+        raise NotImplementedError(self.storage_get)
+
+    def storage_add(self, name, count=1):
+        raise NotImplementedError(self.storage_add)
+
+    def action_get(self):
+        raise NotImplementedError(self.action_get)
+
+    def action_set(self, results):
+        raise NotImplementedError(self.action_set)
+
+    def action_log(self, message):
+        raise NotImplementedError(self.action_log)
+
+    def action_fail(self, message=''):
+        raise NotImplementedError(self.action_fail)
+
+    def network_get(self, endpoint_name, relation_id=None):
+        raise NotImplementedError(self.network_get)
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/requirements.txt b/magma/hackfest_squid_cnf/charms/squid/mod/operator/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5500f007d0bf6c6098afc0f2c6d00915e345a569
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/requirements.txt
@@ -0,0 +1 @@
+PyYAML
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/run_tests b/magma/hackfest_squid_cnf/charms/squid/mod/operator/run_tests
new file mode 100755
index 0000000000000000000000000000000000000000..56411030fdcd8629ffbfbbebb8a8a0650203a934
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/run_tests
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+python3 -m unittest "$@"
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/setup.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc017478d4c6922e4aaf8dc7715152e144cfab7e
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/setup.py
@@ -0,0 +1,39 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from setuptools import setup
+
+with open("README.md", "r") as fh:
+    long_description = fh.read()
+
+setup(
+    name="ops",
+    version="0.0.1",
+    description="The Python library behind great charms",
+    long_description=long_description,
+    long_description_content_type="text/markdown",
+    license="Apache-2.0",
+    url="https://github.com/canonical/operator",
+    packages=["ops"],
+    classifiers=[
+        "Development Status :: 4 - Beta",
+
+        "Programming Language :: Python :: 3",
+        "Programming Language :: Python :: 3.6",
+        "Programming Language :: Python :: 3.7",
+        "Programming Language :: Python :: 3.8",
+
+        "License :: OSI Approved :: Apache Software License",
+    ],
+)
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/__init__.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/bin/relation-ids b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/bin/relation-ids
new file mode 100755
index 0000000000000000000000000000000000000000..a7e0ead2d3182713bd826696fc403b5a8c54faa6
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/bin/relation-ids
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+case $1 in
+    db) echo '["db:1"]' ;;
+    mon) echo '["mon:2"]' ;;
+    ha) echo '[]' ;;
+    db0) echo '[]' ;;
+    db1) echo '["db1:4"]' ;;
+    db2) echo '["db2:5", "db2:6"]' ;;
+    *) echo '[]' ;;
+esac
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/bin/relation-list b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/bin/relation-list
new file mode 100755
index 0000000000000000000000000000000000000000..88490159775624108766a17a35a77599ddea8f03
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/bin/relation-list
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+fail_not_found() {
+    1>&2 echo "ERROR invalid value \"$1\" for option -r: relation not found"
+    exit 2
+}
+
+case $2 in
+    1) echo '["remote/0"]' ;;
+    2) echo '["remote/0"]' ;;
+    3) fail_not_found $2 ;;
+    4) echo '["remoteapp1/0"]' ;;
+    5) echo '["remoteapp1/0"]' ;;
+    6) echo '["remoteapp2/0"]' ;;
+    *) fail_not_found $2 ;;
+esac
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/config.yaml b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ffc0186002391ca52273d39bebcc9c4261c47535
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/config.yaml
@@ -0,0 +1 @@
+"options": {}
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/lib/__init__.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/lib/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/lib/ops b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/lib/ops
new file mode 120000
index 0000000000000000000000000000000000000000..1356790e5ca930db72fe58ee452914193ba47f20
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/lib/ops
@@ -0,0 +1 @@
+../../../../ops
\ No newline at end of file
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/metadata.yaml b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/metadata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3b3aed87e96121224c63916b04009daf40fcab35
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/metadata.yaml
@@ -0,0 +1,26 @@
+name: main
+summary: A charm used for testing the basic operation of the entrypoint code.
+maintainer: Dmitrii Shcherbakov <dmitrii.shcherbakov@canonical.com>
+description: A charm used for testing the basic operation of the entrypoint code.
+tags:
+    - misc
+series:
+    - bionic
+    - cosmic
+    - disco
+min-juju-version: 2.7.1
+provides:
+    db:
+        interface: db
+requires:
+    mon:
+        interface: monitoring
+peers:
+    ha:
+        interface: cluster
+subordinate: false
+storage:
+    disks:
+        type: block
+        multiple:
+            range: 0-
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/src/charm.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..0e066c97bc7223e2731fb51fb0beb62e3d94de3e
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/charms/test_main/src/charm.py
@@ -0,0 +1,208 @@
+#!/usr/bin/env python3
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import base64
+import pickle
+import sys
+import logging
+
+sys.path.append('lib')
+
+from ops.charm import CharmBase  # noqa: E402 (module-level import after non-import code)
+from ops.main import main        # noqa: E402 (ditto)
+
+logger = logging.getLogger()
+
+
+class Charm(CharmBase):
+
+    def __init__(self, *args):
+        super().__init__(*args)
+
+        # This environment variable controls the test charm behavior.
+        charm_config = os.environ.get('CHARM_CONFIG')
+        if charm_config is not None:
+            self._charm_config = pickle.loads(base64.b64decode(charm_config))
+        else:
+            self._charm_config = {}
+
+        # TODO: refactor to use StoredState
+        # (this implies refactoring most of test_main.py)
+        self._state_file = self._charm_config.get('STATE_FILE')
+        try:
+            with open(str(self._state_file), 'rb') as f:
+                self._state = pickle.load(f)
+        except (FileNotFoundError, EOFError):
+            self._state = {
+                'on_install': [],
+                'on_start': [],
+                'on_config_changed': [],
+                'on_update_status': [],
+                'on_leader_settings_changed': [],
+                'on_db_relation_joined': [],
+                'on_mon_relation_changed': [],
+                'on_mon_relation_departed': [],
+                'on_ha_relation_broken': [],
+                'on_foo_bar_action': [],
+                'on_start_action': [],
+                'on_collect_metrics': [],
+
+                'on_log_critical_action': [],
+                'on_log_error_action': [],
+                'on_log_warning_action': [],
+                'on_log_info_action': [],
+                'on_log_debug_action': [],
+
+                # Observed event types per invocation. A list is used to preserve the
+                # order in which charm handlers have observed the events.
+                'observed_event_types': [],
+            }
+
+        self.framework.observe(self.on.install, self)
+        self.framework.observe(self.on.start, self)
+        self.framework.observe(self.on.config_changed, self)
+        self.framework.observe(self.on.update_status, self)
+        self.framework.observe(self.on.leader_settings_changed, self)
+        # Test relation events with endpoints from different
+        # sections (provides, requires, peers) as well.
+        self.framework.observe(self.on.db_relation_joined, self)
+        self.framework.observe(self.on.mon_relation_changed, self)
+        self.framework.observe(self.on.mon_relation_departed, self)
+        self.framework.observe(self.on.ha_relation_broken, self)
+
+        if self._charm_config.get('USE_ACTIONS'):
+            self.framework.observe(self.on.start_action, self)
+            self.framework.observe(self.on.foo_bar_action, self)
+
+        self.framework.observe(self.on.collect_metrics, self)
+
+        if self._charm_config.get('USE_LOG_ACTIONS'):
+            self.framework.observe(self.on.log_critical_action, self)
+            self.framework.observe(self.on.log_error_action, self)
+            self.framework.observe(self.on.log_warning_action, self)
+            self.framework.observe(self.on.log_info_action, self)
+            self.framework.observe(self.on.log_debug_action, self)
+
+    def _write_state(self):
+        """Write state variables so that the parent process can read them.
+
+        Each invocation will override the previous state which is intentional.
+        """
+        if self._state_file is not None:
+            with self._state_file.open('wb') as f:
+                pickle.dump(self._state, f)
+
+    def on_install(self, event):
+        self._state['on_install'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._write_state()
+
+    def on_start(self, event):
+        self._state['on_start'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._write_state()
+
+    def on_config_changed(self, event):
+        self._state['on_config_changed'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        event.defer()
+        self._write_state()
+
+    def on_update_status(self, event):
+        self._state['on_update_status'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._write_state()
+
+    def on_leader_settings_changed(self, event):
+        self._state['on_leader_settings_changed'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._write_state()
+
+    def on_db_relation_joined(self, event):
+        assert event.app is not None, 'application name cannot be None for a relation-joined event'
+        self._state['on_db_relation_joined'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._state['db_relation_joined_data'] = event.snapshot()
+        self._write_state()
+
+    def on_mon_relation_changed(self, event):
+        assert event.app is not None, (
+            'application name cannot be None for a relation-changed event')
+        if os.environ.get('JUJU_REMOTE_UNIT'):
+            assert event.unit is not None, (
+                'a unit name cannot be None for a relation-changed event'
+                ' associated with a remote unit')
+        self._state['on_mon_relation_changed'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._state['mon_relation_changed_data'] = event.snapshot()
+        self._write_state()
+
+    def on_mon_relation_departed(self, event):
+        assert event.app is not None, (
+            'application name cannot be None for a relation-departed event')
+        self._state['on_mon_relation_departed'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._state['mon_relation_departed_data'] = event.snapshot()
+        self._write_state()
+
+    def on_ha_relation_broken(self, event):
+        assert event.app is None, (
+            'relation-broken events cannot have a reference to a remote application')
+        assert event.unit is None, (
+            'relation broken events cannot have a reference to a remote unit')
+        self._state['on_ha_relation_broken'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._state['ha_relation_broken_data'] = event.snapshot()
+        self._write_state()
+
+    def on_start_action(self, event):
+        assert event.handle.kind == 'start_action', (
+            'event action name cannot be different from the one being handled')
+        self._state['on_start_action'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._write_state()
+
+    def on_foo_bar_action(self, event):
+        assert event.handle.kind == 'foo_bar_action', (
+            'event action name cannot be different from the one being handled')
+        self._state['on_foo_bar_action'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        self._write_state()
+
+    def on_collect_metrics(self, event):
+        self._state['on_collect_metrics'].append(type(event))
+        self._state['observed_event_types'].append(type(event))
+        event.add_metrics({'foo': 42}, {'bar': 4.2})
+        self._write_state()
+
+    def on_log_critical_action(self, event):
+        logger.critical('super critical')
+
+    def on_log_error_action(self, event):
+        logger.error('grave error')
+
+    def on_log_warning_action(self, event):
+        logger.warning('wise warning')
+
+    def on_log_info_action(self, event):
+        logger.info('useful info')
+
+    def on_log_debug_action(self, event):
+        logger.debug('insightful debug')
+
+
+if __name__ == '__main__':
+    main(Charm)
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_charm.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..c8d84475cbc7ec832db108bdfcebea2d16c457b3
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_charm.py
@@ -0,0 +1,323 @@
+#!/usr/bin/python3
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import unittest
+import tempfile
+import shutil
+
+from pathlib import Path
+
+from ops.charm import (
+    CharmBase,
+    CharmMeta,
+    CharmEvents,
+)
+from ops.framework import Framework, EventSource, EventBase
+from ops.model import Model, ModelBackend
+
+from .test_helpers import fake_script, fake_script_calls
+
+
+class TestCharm(unittest.TestCase):
+
+    def setUp(self):
+        def restore_env(env):
+            os.environ.clear()
+            os.environ.update(env)
+        self.addCleanup(restore_env, os.environ.copy())
+
+        os.environ['PATH'] = "{}:{}".format(Path(__file__).parent / 'bin', os.environ['PATH'])
+        os.environ['JUJU_UNIT_NAME'] = 'local/0'
+
+        self.tmpdir = Path(tempfile.mkdtemp())
+        self.addCleanup(shutil.rmtree, str(self.tmpdir))
+        self.meta = CharmMeta()
+
+        class CustomEvent(EventBase):
+            pass
+
+        class TestCharmEvents(CharmEvents):
+            custom = EventSource(CustomEvent)
+
+        # Relations events are defined dynamically and modify the class attributes.
+        # We use a subclass temporarily to prevent these side effects from leaking.
+        CharmBase.on = TestCharmEvents()
+
+        def cleanup():
+            CharmBase.on = CharmEvents()
+        self.addCleanup(cleanup)
+
+    def create_framework(self):
+        model = Model('local/0', self.meta, ModelBackend())
+        framework = Framework(self.tmpdir / "framework.data", self.tmpdir, self.meta, model)
+        self.addCleanup(framework.close)
+        return framework
+
+    def test_basic(self):
+
+        class MyCharm(CharmBase):
+
+            def __init__(self, *args):
+                super().__init__(*args)
+
+                self.started = False
+                framework.observe(self.on.start, self)
+
+            def on_start(self, event):
+                self.started = True
+
+        events = list(MyCharm.on.events())
+        self.assertIn('install', events)
+        self.assertIn('custom', events)
+
+        framework = self.create_framework()
+        charm = MyCharm(framework, None)
+        charm.on.start.emit()
+
+        self.assertEqual(charm.started, True)
+
+    def test_helper_properties(self):
+        framework = self.create_framework()
+
+        class MyCharm(CharmBase):
+            pass
+
+        charm = MyCharm(framework, None)
+        self.assertEqual(charm.app, framework.model.app)
+        self.assertEqual(charm.unit, framework.model.unit)
+        self.assertEqual(charm.meta, framework.meta)
+        self.assertEqual(charm.charm_dir, framework.charm_dir)
+
+    def test_relation_events(self):
+
+        class MyCharm(CharmBase):
+            def __init__(self, *args):
+                super().__init__(*args)
+                self.seen = []
+                for rel in ('req1', 'req-2', 'pro1', 'pro-2', 'peer1', 'peer-2'):
+                    # Hook up relation events to generic handler.
+                    self.framework.observe(self.on[rel].relation_joined, self.on_any_relation)
+                    self.framework.observe(self.on[rel].relation_changed, self.on_any_relation)
+                    self.framework.observe(self.on[rel].relation_departed, self.on_any_relation)
+                    self.framework.observe(self.on[rel].relation_broken, self.on_any_relation)
+
+            def on_any_relation(self, event):
+                assert event.relation.name == 'req1'
+                assert event.relation.app.name == 'remote'
+                self.seen.append(type(event).__name__)
+
+        # language=YAML
+        self.meta = CharmMeta.from_yaml(metadata='''
+name: my-charm
+requires:
+ req1:
+   interface: req1
+ req-2:
+   interface: req2
+provides:
+ pro1:
+   interface: pro1
+ pro-2:
+   interface: pro2
+peers:
+ peer1:
+   interface: peer1
+ peer-2:
+   interface: peer2
+''')
+
+        charm = MyCharm(self.create_framework(), None)
+
+        rel = charm.framework.model.get_relation('req1', 1)
+        unit = charm.framework.model.get_unit('remote/0')
+        charm.on['req1'].relation_joined.emit(rel, unit)
+        charm.on['req1'].relation_changed.emit(rel, unit)
+        charm.on['req-2'].relation_changed.emit(rel, unit)
+        charm.on['pro1'].relation_departed.emit(rel, unit)
+        charm.on['pro-2'].relation_departed.emit(rel, unit)
+        charm.on['peer1'].relation_broken.emit(rel)
+        charm.on['peer-2'].relation_broken.emit(rel)
+
+        self.assertEqual(charm.seen, [
+            'RelationJoinedEvent',
+            'RelationChangedEvent',
+            'RelationChangedEvent',
+            'RelationDepartedEvent',
+            'RelationDepartedEvent',
+            'RelationBrokenEvent',
+            'RelationBrokenEvent',
+        ])
+
+    def test_storage_events(self):
+
+        class MyCharm(CharmBase):
+            def __init__(self, *args):
+                super().__init__(*args)
+                self.seen = []
+                self.framework.observe(self.on['stor1'].storage_attached, self)
+                self.framework.observe(self.on['stor2'].storage_detaching, self)
+                self.framework.observe(self.on['stor3'].storage_attached, self)
+                self.framework.observe(self.on['stor-4'].storage_attached, self)
+
+            def on_stor1_storage_attached(self, event):
+                self.seen.append(type(event).__name__)
+
+            def on_stor2_storage_detaching(self, event):
+                self.seen.append(type(event).__name__)
+
+            def on_stor3_storage_attached(self, event):
+                self.seen.append(type(event).__name__)
+
+            def on_stor_4_storage_attached(self, event):
+                self.seen.append(type(event).__name__)
+
+        # language=YAML
+        self.meta = CharmMeta.from_yaml('''
+name: my-charm
+storage:
+  stor-4:
+    multiple:
+      range: 2-4
+    type: filesystem
+  stor1:
+    type: filesystem
+  stor2:
+    multiple:
+      range: "2"
+    type: filesystem
+  stor3:
+    multiple:
+      range: 2-
+    type: filesystem
+''')
+
+        self.assertIsNone(self.meta.storages['stor1'].multiple_range)
+        self.assertEqual(self.meta.storages['stor2'].multiple_range, (2, 2))
+        self.assertEqual(self.meta.storages['stor3'].multiple_range, (2, None))
+        self.assertEqual(self.meta.storages['stor-4'].multiple_range, (2, 4))
+
+        charm = MyCharm(self.create_framework(), None)
+
+        charm.on['stor1'].storage_attached.emit()
+        charm.on['stor2'].storage_detaching.emit()
+        charm.on['stor3'].storage_attached.emit()
+        charm.on['stor-4'].storage_attached.emit()
+
+        self.assertEqual(charm.seen, [
+            'StorageAttachedEvent',
+            'StorageDetachingEvent',
+            'StorageAttachedEvent',
+            'StorageAttachedEvent',
+        ])
+
+    @classmethod
+    def _get_action_test_meta(cls):
+        # language=YAML
+        return CharmMeta.from_yaml(metadata='''
+name: my-charm
+''', actions='''
+foo-bar:
+  description: "Foos the bar."
+  params:
+    foo-name:
+      description: "A foo name to bar"
+      type: string
+    silent:
+      default: false
+      description: ""
+      type: boolean
+  required: foo-bar
+  title: foo-bar
+start:
+  description: "Start the unit."
+''')
+
+    def _test_action_events(self, cmd_type):
+
+        class MyCharm(CharmBase):
+
+            def __init__(self, *args):
+                super().__init__(*args)
+                framework.observe(self.on.foo_bar_action, self)
+                framework.observe(self.on.start_action, self)
+
+            def on_foo_bar_action(self, event):
+                self.seen_action_params = event.params
+                event.log('test-log')
+                event.set_results({'res': 'val with spaces'})
+                event.fail('test-fail')
+
+            def on_start_action(self, event):
+                pass
+
+        fake_script(self, cmd_type + '-get', """echo '{"foo-name": "name", "silent": true}'""")
+        fake_script(self, cmd_type + '-set', "")
+        fake_script(self, cmd_type + '-log', "")
+        fake_script(self, cmd_type + '-fail', "")
+        self.meta = self._get_action_test_meta()
+
+        os.environ['JUJU_{}_NAME'.format(cmd_type.upper())] = 'foo-bar'
+        framework = self.create_framework()
+        charm = MyCharm(framework, None)
+
+        events = list(MyCharm.on.events())
+        self.assertIn('foo_bar_action', events)
+        self.assertIn('start_action', events)
+
+        charm.on.foo_bar_action.emit()
+        self.assertEqual(charm.seen_action_params, {"foo-name": "name", "silent": True})
+        self.assertEqual(fake_script_calls(self), [
+            [cmd_type + '-get', '--format=json'],
+            [cmd_type + '-log', "test-log"],
+            [cmd_type + '-set', "res=val with spaces"],
+            [cmd_type + '-fail', "test-fail"],
+        ])
+
+        # Make sure that action events that do not match the current context are
+        # not possible to emit by hand.
+        with self.assertRaises(RuntimeError):
+            charm.on.start_action.emit()
+
+    def test_action_events(self):
+        self._test_action_events('action')
+
+    def _test_action_event_defer_fails(self, cmd_type):
+
+        class MyCharm(CharmBase):
+
+            def __init__(self, *args):
+                super().__init__(*args)
+                framework.observe(self.on.start_action, self)
+
+            def on_start_action(self, event):
+                event.defer()
+
+        fake_script(self, cmd_type + '-get', """echo '{"foo-name": "name", "silent": true}'""")
+        self.meta = self._get_action_test_meta()
+
+        os.environ['JUJU_{}_NAME'.format(cmd_type.upper())] = 'start'
+        framework = self.create_framework()
+        charm = MyCharm(framework, None)
+
+        with self.assertRaises(RuntimeError):
+            charm.on.start_action.emit()
+
+    def test_action_event_defer_fails(self):
+        self._test_action_event_defer_fails('action')
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_framework.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_framework.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ec4c4ed56d9b18aef419107a97054c557cc4218
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_framework.py
@@ -0,0 +1,1727 @@
+# Copyright 2019-2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import gc
+import inspect
+import io
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+from unittest.mock import patch
+from pathlib import Path
+
+from ops import charm, model
+from ops.framework import (
+    _BREAKPOINT_WELCOME_MESSAGE,
+    BoundStoredState,
+    CommitEvent,
+    EventBase,
+    ObjectEvents,
+    EventSource,
+    Framework,
+    Handle,
+    NoSnapshotError,
+    Object,
+    PreCommitEvent,
+    SQLiteStorage,
+    StoredList,
+    StoredState,
+    StoredStateData,
+)
+from test.test_helpers import fake_script
+
+
+class TestFramework(unittest.TestCase):
+
+    def setUp(self):
+        self.tmpdir = Path(tempfile.mkdtemp())
+        self.addCleanup(shutil.rmtree, str(self.tmpdir))
+        default_timeout = SQLiteStorage.DB_LOCK_TIMEOUT
+
+        def timeout_cleanup():
+            SQLiteStorage.DB_LOCK_TIMEOUT = default_timeout
+        SQLiteStorage.DB_LOCK_TIMEOUT = datetime.timedelta(0)
+        self.addCleanup(timeout_cleanup)
+
+    def create_framework(self):
+        framework = Framework(self.tmpdir / "framework.data", self.tmpdir, None, None)
+        self.addCleanup(framework.close)
+        return framework
+
+    def test_handle_path(self):
+        cases = [
+            (Handle(None, "root", None), "root"),
+            (Handle(None, "root", "1"), "root[1]"),
+            (Handle(Handle(None, "root", None), "child", None), "root/child"),
+            (Handle(Handle(None, "root", "1"), "child", "2"), "root[1]/child[2]"),
+        ]
+        for handle, path in cases:
+            self.assertEqual(str(handle), path)
+            self.assertEqual(Handle.from_path(path), handle)
+
+    def test_handle_attrs_readonly(self):
+        handle = Handle(None, 'kind', 'key')
+        with self.assertRaises(AttributeError):
+            handle.parent = 'foo'
+        with self.assertRaises(AttributeError):
+            handle.kind = 'foo'
+        with self.assertRaises(AttributeError):
+            handle.key = 'foo'
+        with self.assertRaises(AttributeError):
+            handle.path = 'foo'
+
+    def test_restore_unknown(self):
+        framework = self.create_framework()
+
+        class Foo(Object):
+            pass
+
+        handle = Handle(None, "a_foo", "some_key")
+
+        framework.register_type(Foo, None, handle.kind)
+
+        try:
+            framework.load_snapshot(handle)
+        except NoSnapshotError as e:
+            self.assertEqual(e.handle_path, str(handle))
+            self.assertEqual(str(e), "no snapshot data found for a_foo[some_key] object")
+        else:
+            self.fail("exception NoSnapshotError not raised")
+
+    def test_snapshot_roundtrip(self):
+        class Foo:
+            def __init__(self, handle, n):
+                self.handle = handle
+                self.my_n = n
+
+            def snapshot(self):
+                return {"My N!": self.my_n}
+
+            def restore(self, snapshot):
+                self.my_n = snapshot["My N!"] + 1
+
+        handle = Handle(None, "a_foo", "some_key")
+        event = Foo(handle, 1)
+
+        framework1 = self.create_framework()
+        framework1.register_type(Foo, None, handle.kind)
+        framework1.save_snapshot(event)
+        framework1.commit()
+        framework1.close()
+
+        framework2 = self.create_framework()
+        framework2.register_type(Foo, None, handle.kind)
+        event2 = framework2.load_snapshot(handle)
+        self.assertEqual(event2.my_n, 2)
+
+        framework2.save_snapshot(event2)
+        del event2
+        gc.collect()
+        event3 = framework2.load_snapshot(handle)
+        self.assertEqual(event3.my_n, 3)
+
+        framework2.drop_snapshot(event.handle)
+        framework2.commit()
+        framework2.close()
+
+        framework3 = self.create_framework()
+        framework3.register_type(Foo, None, handle.kind)
+
+        self.assertRaises(NoSnapshotError, framework3.load_snapshot, handle)
+
+    def test_simple_event_observer(self):
+        framework = self.create_framework()
+
+        class MyEvent(EventBase):
+            pass
+
+        class MyNotifier(Object):
+            foo = EventSource(MyEvent)
+            bar = EventSource(MyEvent)
+            baz = EventSource(MyEvent)
+
+        class MyObserver(Object):
+            def __init__(self, parent, key):
+                super().__init__(parent, key)
+                self.seen = []
+
+            def on_any(self, event):
+                self.seen.append("on_any:" + event.handle.kind)
+
+            def on_foo(self, event):
+                self.seen.append("on_foo:" + event.handle.kind)
+
+        pub = MyNotifier(framework, "1")
+        obs = MyObserver(framework, "1")
+
+        framework.observe(pub.foo, obs.on_any)
+        framework.observe(pub.bar, obs.on_any)
+        framework.observe(pub.foo, obs)  # Method name defaults to on_<event kind>.
+
+        try:
+            framework.observe(pub.baz, obs)
+        except RuntimeError as e:
+            self.assertEqual(
+                str(e),
+                'Observer method not provided explicitly'
+                ' and MyObserver type has no "on_baz" method')
+        else:
+            self.fail("RuntimeError not raised")
+
+        pub.foo.emit()
+        pub.bar.emit()
+
+        self.assertEqual(obs.seen, ["on_any:foo", "on_foo:foo", "on_any:bar"])
+
+    def test_bad_sig_observer(self):
+
+        class MyEvent(EventBase):
+            pass
+
+        class MyNotifier(Object):
+            foo = EventSource(MyEvent)
+            bar = EventSource(MyEvent)
+            baz = EventSource(MyEvent)
+            qux = EventSource(MyEvent)
+
+        class MyObserver(Object):
+            def on_foo(self):
+                assert False, 'should not be reached'
+
+            def on_bar(self, event, extra):
+                assert False, 'should not be reached'
+
+            def on_baz(self, event, extra=None, *, k):
+                assert False, 'should not be reached'
+
+            def on_qux(self, event, extra=None):
+                assert False, 'should not be reached'
+
+        framework = self.create_framework()
+        pub = MyNotifier(framework, "pub")
+        obs = MyObserver(framework, "obs")
+
+        with self.assertRaises(TypeError):
+            framework.observe(pub.foo, obs)
+        with self.assertRaises(TypeError):
+            framework.observe(pub.bar, obs)
+        with self.assertRaises(TypeError):
+            framework.observe(pub.baz, obs)
+        framework.observe(pub.qux, obs)
+
+    def test_on_pre_commit_emitted(self):
+        framework = self.create_framework()
+
+        class PreCommitObserver(Object):
+
+            _stored = StoredState()
+
+            def __init__(self, parent, key):
+                super().__init__(parent, key)
+                self.seen = []
+                self._stored.myinitdata = 40
+
+            def on_pre_commit(self, event):
+                self._stored.myinitdata = 41
+                self._stored.mydata = 42
+                self.seen.append(type(event))
+
+            def on_commit(self, event):
+                # Modifications made here will not be persisted.
+                self._stored.myinitdata = 42
+                self._stored.mydata = 43
+                self._stored.myotherdata = 43
+                self.seen.append(type(event))
+
+        obs = PreCommitObserver(framework, None)
+
+        framework.observe(framework.on.pre_commit, obs.on_pre_commit)
+
+        framework.commit()
+
+        self.assertEqual(obs._stored.myinitdata, 41)
+        self.assertEqual(obs._stored.mydata, 42)
+        self.assertTrue(obs.seen, [PreCommitEvent, CommitEvent])
+        framework.close()
+
+        other_framework = self.create_framework()
+
+        new_obs = PreCommitObserver(other_framework, None)
+
+        self.assertEqual(obs._stored.myinitdata, 41)
+        self.assertEqual(new_obs._stored.mydata, 42)
+
+        with self.assertRaises(AttributeError):
+            new_obs._stored.myotherdata
+
+    def test_defer_and_reemit(self):
+        framework = self.create_framework()
+
+        class MyEvent(EventBase):
+            pass
+
+        class MyNotifier1(Object):
+            a = EventSource(MyEvent)
+            b = EventSource(MyEvent)
+
+        class MyNotifier2(Object):
+            c = EventSource(MyEvent)
+
+        class MyObserver(Object):
+            def __init__(self, parent, key):
+                super().__init__(parent, key)
+                self.seen = []
+                self.done = {}
+
+            def on_any(self, event):
+                self.seen.append(event.handle.kind)
+                if not self.done.get(event.handle.kind):
+                    event.defer()
+
+        pub1 = MyNotifier1(framework, "1")
+        pub2 = MyNotifier2(framework, "1")
+        obs1 = MyObserver(framework, "1")
+        obs2 = MyObserver(framework, "2")
+
+        framework.observe(pub1.a, obs1.on_any)
+        framework.observe(pub1.b, obs1.on_any)
+        framework.observe(pub1.a, obs2.on_any)
+        framework.observe(pub1.b, obs2.on_any)
+        framework.observe(pub2.c, obs2.on_any)
+
+        pub1.a.emit()
+        pub1.b.emit()
+        pub2.c.emit()
+
+        # Events remain stored because they were deferred.
+        ev_a_handle = Handle(pub1, "a", "1")
+        framework.load_snapshot(ev_a_handle)
+        ev_b_handle = Handle(pub1, "b", "2")
+        framework.load_snapshot(ev_b_handle)
+        ev_c_handle = Handle(pub2, "c", "3")
+        framework.load_snapshot(ev_c_handle)
+        # make sure the objects are gone before we reemit them
+        gc.collect()
+
+        framework.reemit()
+        obs1.done["a"] = True
+        obs2.done["b"] = True
+        framework.reemit()
+        framework.reemit()
+        obs1.done["b"] = True
+        obs2.done["a"] = True
+        framework.reemit()
+        obs2.done["c"] = True
+        framework.reemit()
+        framework.reemit()
+        framework.reemit()
+
+        self.assertEqual(" ".join(obs1.seen), "a b a b a b b b")
+        self.assertEqual(" ".join(obs2.seen), "a b c a b c a b c a c a c c")
+
+        # Now the event objects must all be gone from storage.
+        self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_a_handle)
+        self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_b_handle)
+        self.assertRaises(NoSnapshotError, framework.load_snapshot, ev_c_handle)
+
+    def test_custom_event_data(self):
+        framework = self.create_framework()
+
+        class MyEvent(EventBase):
+            def __init__(self, handle, n):
+                super().__init__(handle)
+                self.my_n = n
+
+            def snapshot(self):
+                return {"My N!": self.my_n}
+
+            def restore(self, snapshot):
+                super().restore(snapshot)
+                self.my_n = snapshot["My N!"] + 1
+
+        class MyNotifier(Object):
+            foo = EventSource(MyEvent)
+
+        class MyObserver(Object):
+            def __init__(self, parent, key):
+                super().__init__(parent, key)
+                self.seen = []
+
+            def on_foo(self, event):
+                self.seen.append("on_foo:{}={}".format(event.handle.kind, event.my_n))
+                event.defer()
+
+        pub = MyNotifier(framework, "1")
+        obs = MyObserver(framework, "1")
+
+        framework.observe(pub.foo, obs)
+
+        pub.foo.emit(1)
+
+        framework.reemit()
+
+        # Two things being checked here:
+        #
+        # 1. There's a restore roundtrip before the event is first observed.
+        #    That means the data is safe before it's ever seen, and the
+        #    roundtrip logic is tested under normal circumstances.
+        #
+        # 2. The renotification restores from the pristine event, not
+        #    from the one modified during the first restore (otherwise
+        #    we'd get a foo=3).
+        #
+        self.assertEqual(obs.seen, ["on_foo:foo=2", "on_foo:foo=2"])
+
+    def test_weak_observer(self):
+        framework = self.create_framework()
+
+        observed_events = []
+
+        class MyEvent(EventBase):
+            pass
+
+        class MyEvents(ObjectEvents):
+            foo = EventSource(MyEvent)
+
+        class MyNotifier(Object):
+            on = MyEvents()
+
+        class MyObserver(Object):
+            def on_foo(self, event):
+                observed_events.append("foo")
+
+        pub = MyNotifier(framework, "1")
+        obs = MyObserver(framework, "2")
+
+        framework.observe(pub.on.foo, obs)
+        pub.on.foo.emit()
+        self.assertEqual(observed_events, ["foo"])
+        # Now delete the observer, and note that when we emit the event, it
+        # doesn't update the local slice again
+        del obs
+        gc.collect()
+        pub.on.foo.emit()
+        self.assertEqual(observed_events, ["foo"])
+
+    def test_forget_and_multiple_objects(self):
+        framework = self.create_framework()
+
+        class MyObject(Object):
+            pass
+
+        o1 = MyObject(framework, "path")
+        # Creating a second object at the same path should fail with RuntimeError
+        with self.assertRaises(RuntimeError):
+            o2 = MyObject(framework, "path")
+        # Unless we _forget the object first
+        framework._forget(o1)
+        o2 = MyObject(framework, "path")
+        self.assertEqual(o1.handle.path, o2.handle.path)
+        # Deleting the tracked object should also work
+        del o2
+        gc.collect()
+        o3 = MyObject(framework, "path")
+        self.assertEqual(o1.handle.path, o3.handle.path)
+        framework.close()
+        # Or using a second framework
+        framework_copy = self.create_framework()
+        o_copy = MyObject(framework_copy, "path")
+        self.assertEqual(o1.handle.path, o_copy.handle.path)
+
+    def test_forget_and_multiple_objects_with_load_snapshot(self):
+        framework = self.create_framework()
+
+        class MyObject(Object):
+            def __init__(self, parent, name):
+                super().__init__(parent, name)
+                self.value = name
+
+            def snapshot(self):
+                return self.value
+
+            def restore(self, value):
+                self.value = value
+
+        framework.register_type(MyObject, None, MyObject.handle_kind)
+        o1 = MyObject(framework, "path")
+        framework.save_snapshot(o1)
+        framework.commit()
+        o_handle = o1.handle
+        del o1
+        gc.collect()
+        o2 = framework.load_snapshot(o_handle)
+        # Trying to load_snapshot a second object at the same path should fail with RuntimeError
+        with self.assertRaises(RuntimeError):
+            framework.load_snapshot(o_handle)
+        # Unless we _forget the object first
+        framework._forget(o2)
+        o3 = framework.load_snapshot(o_handle)
+        self.assertEqual(o2.value, o3.value)
+        # A loaded object also prevents direct creation of an object
+        with self.assertRaises(RuntimeError):
+            MyObject(framework, "path")
+        framework.close()
+        # But we can create an object, or load a snapshot in a copy of the framework
+        framework_copy1 = self.create_framework()
+        o_copy1 = MyObject(framework_copy1, "path")
+        self.assertEqual(o_copy1.value, "path")
+        framework_copy1.close()
+        framework_copy2 = self.create_framework()
+        framework_copy2.register_type(MyObject, None, MyObject.handle_kind)
+        o_copy2 = framework_copy2.load_snapshot(o_handle)
+        self.assertEqual(o_copy2.value, "path")
+
+    def test_events_base(self):
+        framework = self.create_framework()
+
+        class MyEvent(EventBase):
+            pass
+
+        class MyEvents(ObjectEvents):
+            foo = EventSource(MyEvent)
+            bar = EventSource(MyEvent)
+
+        class MyNotifier(Object):
+            on = MyEvents()
+
+        class MyObserver(Object):
+            def __init__(self, parent, key):
+                super().__init__(parent, key)
+                self.seen = []
+
+            def on_foo(self, event):
+                self.seen.append("on_foo:{}".format(event.handle.kind))
+                event.defer()
+
+            def on_bar(self, event):
+                self.seen.append("on_bar:{}".format(event.handle.kind))
+
+        pub = MyNotifier(framework, "1")
+        obs = MyObserver(framework, "1")
+
+        # Confirm that temporary persistence of BoundEvents doesn't cause errors,
+        # and that events can be observed.
+        for bound_event in [pub.on.foo, pub.on.bar]:
+            framework.observe(bound_event, obs)
+
+        # Confirm that events can be emitted and seen.
+        pub.on.foo.emit()
+
+        self.assertEqual(obs.seen, ["on_foo:foo"])
+
+    def test_conflicting_event_attributes(self):
+        class MyEvent(EventBase):
+            pass
+
+        event = EventSource(MyEvent)
+
+        class MyEvents(ObjectEvents):
+            foo = event
+
+        with self.assertRaises(RuntimeError) as cm:
+            class OtherEvents(ObjectEvents):
+                foo = event
+        self.assertEqual(
+            str(cm.exception),
+            "EventSource(MyEvent) reused as MyEvents.foo and OtherEvents.foo")
+
+        with self.assertRaises(RuntimeError) as cm:
+            class MyNotifier(Object):
+                on = MyEvents()
+                bar = event
+        self.assertEqual(
+            str(cm.exception),
+            "EventSource(MyEvent) reused as MyEvents.foo and MyNotifier.bar")
+
+    def test_reemit_ignores_unknown_event_type(self):
+        # The event type may have been gone for good, and nobody cares,
+        # so this shouldn't be an error scenario.
+
+        framework = self.create_framework()
+
+        class MyEvent(EventBase):
+            pass
+
+        class MyNotifier(Object):
+            foo = EventSource(MyEvent)
+
+        class MyObserver(Object):
+            def __init__(self, parent, key):
+                super().__init__(parent, key)
+                self.seen = []
+
+            def on_foo(self, event):
+                self.seen.append(event.handle)
+                event.defer()
+
+        pub = MyNotifier(framework, "1")
+        obs = MyObserver(framework, "1")
+
+        framework.observe(pub.foo, obs)
+        pub.foo.emit()
+
+        event_handle = obs.seen[0]
+        self.assertEqual(event_handle.kind, "foo")
+
+        framework.commit()
+        framework.close()
+
+        framework_copy = self.create_framework()
+
+        # No errors on missing event types here.
+        framework_copy.reemit()
+
+        # Register the type and check that the event is gone from storage.
+        framework_copy.register_type(MyEvent, event_handle.parent, event_handle.kind)
+        self.assertRaises(NoSnapshotError, framework_copy.load_snapshot, event_handle)
+
+    def test_auto_register_event_types(self):
+        framework = self.create_framework()
+
+        class MyFoo(EventBase):
+            pass
+
+        class MyBar(EventBase):
+            pass
+
+        class MyEvents(ObjectEvents):
+            foo = EventSource(MyFoo)
+
+        class MyNotifier(Object):
+            on = MyEvents()
+            bar = EventSource(MyBar)
+
+        class MyObserver(Object):
+            def __init__(self, parent, key):
+                super().__init__(parent, key)
+                self.seen = []
+
+            def on_foo(self, event):
+                self.seen.append("on_foo:{}:{}".format(type(event).__name__, event.handle.kind))
+                event.defer()
+
+            def on_bar(self, event):
+                self.seen.append("on_bar:{}:{}".format(type(event).__name__, event.handle.kind))
+                event.defer()
+
+        pub = MyNotifier(framework, "1")
+        obs = MyObserver(framework, "1")
+
+        pub.on.foo.emit()
+        pub.bar.emit()
+
+        framework.observe(pub.on.foo, obs)
+        framework.observe(pub.bar, obs)
+
+        pub.on.foo.emit()
+        pub.bar.emit()
+
+        self.assertEqual(obs.seen, ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"])
+
+    def test_dynamic_event_types(self):
+        framework = self.create_framework()
+
+        class MyEventsA(ObjectEvents):
+            handle_kind = 'on_a'
+
+        class MyEventsB(ObjectEvents):
+            handle_kind = 'on_b'
+
+        class MyNotifier(Object):
+            on_a = MyEventsA()
+            on_b = MyEventsB()
+
+        class MyObserver(Object):
+            def __init__(self, parent, key):
+                super().__init__(parent, key)
+                self.seen = []
+
+            def on_foo(self, event):
+                self.seen.append("on_foo:{}:{}".format(type(event).__name__, event.handle.kind))
+                event.defer()
+
+            def on_bar(self, event):
+                self.seen.append("on_bar:{}:{}".format(type(event).__name__, event.handle.kind))
+                event.defer()
+
+        pub = MyNotifier(framework, "1")
+        obs = MyObserver(framework, "1")
+
+        class MyFoo(EventBase):
+            pass
+
+        class MyBar(EventBase):
+            pass
+
+        class DeadBeefEvent(EventBase):
+            pass
+
+        class NoneEvent(EventBase):
+            pass
+
+        pub.on_a.define_event("foo", MyFoo)
+        pub.on_b.define_event("bar", MyBar)
+
+        framework.observe(pub.on_a.foo, obs)
+        framework.observe(pub.on_b.bar, obs)
+
+        pub.on_a.foo.emit()
+        pub.on_b.bar.emit()
+
+        self.assertEqual(obs.seen, ["on_foo:MyFoo:foo", "on_bar:MyBar:bar"])
+
+        # Definitions remained local to the specific type.
+        self.assertRaises(AttributeError, lambda: pub.on_a.bar)
+        self.assertRaises(AttributeError, lambda: pub.on_b.foo)
+
+        # Try to use an event name which is not a valid python identifier.
+        with self.assertRaises(RuntimeError):
+            pub.on_a.define_event("dead-beef", DeadBeefEvent)
+
+        # Try to use a python keyword for an event name.
+        with self.assertRaises(RuntimeError):
+            pub.on_a.define_event("None", NoneEvent)
+
+        # Try to override an existing attribute.
+        with self.assertRaises(RuntimeError):
+            pub.on_a.define_event("foo", MyFoo)
+
+    def test_event_key_roundtrip(self):
+        class MyEvent(EventBase):
+            def __init__(self, handle, value):
+                super().__init__(handle)
+                self.value = value
+
+            def snapshot(self):
+                return self.value
+
+            def restore(self, value):
+                self.value = value
+
+        class MyNotifier(Object):
+            foo = EventSource(MyEvent)
+
+        class MyObserver(Object):
+            has_deferred = False
+
+            def __init__(self, parent, key):
+                super().__init__(parent, key)
+                self.seen = []
+
+            def on_foo(self, event):
+                self.seen.append((event.handle.key, event.value))
+                # Only defer the first event and once.
+                if not MyObserver.has_deferred:
+                    event.defer()
+                    MyObserver.has_deferred = True
+
+        framework1 = self.create_framework()
+        pub1 = MyNotifier(framework1, "pub")
+        obs1 = MyObserver(framework1, "obs")
+        framework1.observe(pub1.foo, obs1)
+        pub1.foo.emit('first')
+        self.assertEqual(obs1.seen, [('1', 'first')])
+
+        framework1.commit()
+        framework1.close()
+        del framework1
+
+        framework2 = self.create_framework()
+        pub2 = MyNotifier(framework2, "pub")
+        obs2 = MyObserver(framework2, "obs")
+        framework2.observe(pub2.foo, obs2)
+        pub2.foo.emit('second')
+        framework2.reemit()
+
+        # First observer didn't get updated, since framework it was bound to is gone.
+        self.assertEqual(obs1.seen, [('1', 'first')])
+        # Second observer saw the new event plus the reemit of the first event.
+        # (The event key goes up by 2 due to the pre-commit and commit events.)
+        self.assertEqual(obs2.seen, [('4', 'second'), ('1', 'first')])
+
+    def test_helper_properties(self):
+        framework = self.create_framework()
+        framework.model = 'test-model'
+        framework.meta = 'test-meta'
+
+        my_obj = Object(framework, 'my_obj')
+        self.assertEqual(my_obj.model, framework.model)
+
+    def test_ban_concurrent_frameworks(self):
+        f = self.create_framework()
+        with self.assertRaises(Exception) as cm:
+            self.create_framework()
+        self.assertIn('database is locked', str(cm.exception))
+        f.close()
+
+    def test_snapshot_saving_restricted_to_simple_types(self):
+        # this can not be saved, as it has not simple types!
+        to_be_saved = {"bar": TestFramework}
+
+        class FooEvent(EventBase):
+            def snapshot(self):
+                return to_be_saved
+
+        handle = Handle(None, "a_foo", "some_key")
+        event = FooEvent(handle)
+
+        framework = self.create_framework()
+        framework.register_type(FooEvent, None, handle.kind)
+        with self.assertRaises(ValueError) as cm:
+            framework.save_snapshot(event)
+        expected = (
+            "unable to save the data for FooEvent, it must contain only simple types: "
+            "{'bar': <class 'test.test_framework.TestFramework'>}")
+        self.assertEqual(str(cm.exception), expected)
+
+
+class TestStoredState(unittest.TestCase):
+
+    def setUp(self):
+        self.tmpdir = Path(tempfile.mkdtemp())
+        self.addCleanup(shutil.rmtree, str(self.tmpdir))
+
+    def create_framework(self, cls=Framework):
+        framework = cls(self.tmpdir / "framework.data", self.tmpdir, None, None)
+        self.addCleanup(framework.close)
+        return framework
+
+    def test_basic_state_storage(self):
+        class SomeObject(Object):
+            _stored = StoredState()
+
+        self._stored_state_tests(SomeObject)
+
+    def test_straight_subclass(self):
+        class SomeObject(Object):
+            _stored = StoredState()
+
+        class Sub(SomeObject):
+            pass
+
+        self._stored_state_tests(Sub)
+
+    def test_straight_sub_subclass(self):
+        class SomeObject(Object):
+            _stored = StoredState()
+
+        class Sub(SomeObject):
+            pass
+
+        class SubSub(SomeObject):
+            pass
+
+        self._stored_state_tests(SubSub)
+
+    def test_two_subclasses(self):
+        class SomeObject(Object):
+            _stored = StoredState()
+
+        class SubA(SomeObject):
+            pass
+
+        class SubB(SomeObject):
+            pass
+
+        self._stored_state_tests(SubA)
+        self._stored_state_tests(SubB)
+
+    def test_the_crazy_thing(self):
+        class NoState(Object):
+            pass
+
+        class StatedObject(NoState):
+            _stored = StoredState()
+
+        class Sibling(NoState):
+            pass
+
+        class FinalChild(StatedObject, Sibling):
+            pass
+
+        self._stored_state_tests(FinalChild)
+
+    def _stored_state_tests(self, cls):
+        framework = self.create_framework()
+        obj = cls(framework, "1")
+
+        try:
+            obj._stored.foo
+        except AttributeError as e:
+            self.assertEqual(str(e), "attribute 'foo' is not stored")
+        else:
+            self.fail("AttributeError not raised")
+
+        try:
+            obj._stored.on = "nonono"
+        except AttributeError as e:
+            self.assertEqual(str(e), "attribute 'on' is reserved and cannot be set")
+        else:
+            self.fail("AttributeError not raised")
+
+        obj._stored.foo = 41
+        obj._stored.foo = 42
+        obj._stored.bar = "s"
+        obj._stored.baz = 4.2
+        obj._stored.bing = True
+
+        self.assertEqual(obj._stored.foo, 42)
+
+        framework.commit()
+
+        # This won't be committed, and should not be seen.
+        obj._stored.foo = 43
+
+        framework.close()
+
+        # Since this has the same absolute object handle, it will get its state back.
+        framework_copy = self.create_framework()
+        obj_copy = cls(framework_copy, "1")
+        self.assertEqual(obj_copy._stored.foo, 42)
+        self.assertEqual(obj_copy._stored.bar, "s")
+        self.assertEqual(obj_copy._stored.baz, 4.2)
+        self.assertEqual(obj_copy._stored.bing, True)
+
+        framework_copy.close()
+
+    def test_two_subclasses_no_conflicts(self):
+        class Base(Object):
+            _stored = StoredState()
+
+        class SubA(Base):
+            pass
+
+        class SubB(Base):
+            pass
+
+        framework = self.create_framework()
+        a = SubA(framework, None)
+        b = SubB(framework, None)
+        z = Base(framework, None)
+
+        a._stored.foo = 42
+        b._stored.foo = "hello"
+        z._stored.foo = {1}
+
+        framework.commit()
+        framework.close()
+
+        framework2 = self.create_framework()
+        a2 = SubA(framework2, None)
+        b2 = SubB(framework2, None)
+        z2 = Base(framework2, None)
+
+        self.assertEqual(a2._stored.foo, 42)
+        self.assertEqual(b2._stored.foo, "hello")
+        self.assertEqual(z2._stored.foo, {1})
+
+    def test_two_names_one_state(self):
+        class Mine(Object):
+            _stored = StoredState()
+            _stored2 = _stored
+
+        framework = self.create_framework()
+        obj = Mine(framework, None)
+
+        with self.assertRaises(RuntimeError):
+            obj._stored.foo = 42
+
+        with self.assertRaises(RuntimeError):
+            obj._stored2.foo = 42
+
+        framework.close()
+
+        # make sure we're not changing the object on failure
+        self.assertNotIn("_stored", obj.__dict__)
+        self.assertNotIn("_stored2", obj.__dict__)
+
+    def test_same_name_two_classes(self):
+        class Base(Object):
+            pass
+
+        class A(Base):
+            _stored = StoredState()
+
+        class B(Base):
+            _stored = A._stored
+
+        framework = self.create_framework()
+        a = A(framework, None)
+        b = B(framework, None)
+
+        # NOTE it's the second one that actually triggers the
+        # exception, but that's an implementation detail
+        a._stored.foo = 42
+
+        with self.assertRaises(RuntimeError):
+            b._stored.foo = "xyzzy"
+
+        framework.close()
+
+        # make sure we're not changing the object on failure
+        self.assertNotIn("_stored", b.__dict__)
+
+    def test_mutable_types_invalid(self):
+        framework = self.create_framework()
+
+        class SomeObject(Object):
+            _stored = StoredState()
+
+        obj = SomeObject(framework, '1')
+        try:
+            class CustomObject:
+                pass
+            obj._stored.foo = CustomObject()
+        except AttributeError as e:
+            self.assertEqual(
+                str(e),
+                "attribute 'foo' cannot be a CustomObject: must be int/float/dict/list/etc")
+        else:
+            self.fail('AttributeError not raised')
+
+        framework.commit()
+
+    def test_mutable_types(self):
+        # Test and validation functions in a list of 2-tuples.
+        # Assignment and keywords like del are not supported in lambdas
+        #  so functions are used instead.
+        test_operations = [(
+            lambda: {},         # Operand A.
+            None,               # Operand B.
+            {},                 # Expected result.
+            lambda a, b: None,  # Operation to perform.
+            lambda res, expected_res: self.assertEqual(res, expected_res)  # Validation to perform.
+        ), (
+            lambda: {},
+            {'a': {}},
+            {'a': {}},
+            lambda a, b: a.update(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: {'a': {}},
+            {'b': 'c'},
+            {'a': {'b': 'c'}},
+            lambda a, b: a['a'].update(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: {'a': {'b': 'c'}},
+            {'d': 'e'},
+            {'a': {'b': 'c', 'd': 'e'}},
+            lambda a, b: a['a'].update(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: {'a': {'b': 'c', 'd': 'e'}},
+            'd',
+            {'a': {'b': 'c'}},
+            lambda a, b: a['a'].pop(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: {'s': set()},
+            'a',
+            {'s': {'a'}},
+            lambda a, b: a['s'].add(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: {'s': {'a'}},
+            'a',
+            {'s': set()},
+            lambda a, b: a['s'].discard(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: [],
+            None,
+            [],
+            lambda a, b: None,
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: [],
+            'a',
+            ['a'],
+            lambda a, b: a.append(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: ['a'],
+            ['c'],
+            ['a', ['c']],
+            lambda a, b: a.append(b),
+            lambda res, expected_res: (
+                self.assertEqual(res, expected_res),
+                self.assertIsInstance(res[1], StoredList),
+            )
+        ), (
+            lambda: ['a', ['c']],
+            'b',
+            ['b', 'a', ['c']],
+            lambda a, b: a.insert(0, b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: ['b', 'a', ['c']],
+            ['d'],
+            ['b', ['d'], 'a', ['c']],
+            lambda a, b: a.insert(1, b),
+            lambda res, expected_res: (
+                self.assertEqual(res, expected_res),
+                self.assertIsInstance(res[1], StoredList)
+            ),
+        ), (
+            lambda: ['b', 'a', ['c']],
+            ['d'],
+            ['b', ['d'], ['c']],
+            # a[1] = b
+            lambda a, b: a.__setitem__(1, b),
+            lambda res, expected_res: (
+                self.assertEqual(res, expected_res),
+                self.assertIsInstance(res[1], StoredList)
+            ),
+        ), (
+            lambda: ['b', ['d'], 'a', ['c']],
+            0,
+            [['d'], 'a', ['c']],
+            lambda a, b: a.pop(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: [['d'], 'a', ['c']],
+            ['d'],
+            ['a', ['c']],
+            lambda a, b: a.remove(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: ['a', ['c']],
+            'd',
+            ['a', ['c', 'd']],
+            lambda a, b: a[1].append(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: ['a', ['c', 'd']],
+            1,
+            ['a', ['c']],
+            lambda a, b: a[1].pop(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: ['a', ['c']],
+            'd',
+            ['a', ['c', 'd']],
+            lambda a, b: a[1].insert(1, b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: ['a', ['c', 'd']],
+            'd',
+            ['a', ['c']],
+            lambda a, b: a[1].remove(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: set(),
+            None,
+            set(),
+            lambda a, b: None,
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: set(),
+            'a',
+            set(['a']),
+            lambda a, b: a.add(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: set(['a']),
+            'a',
+            set(),
+            lambda a, b: a.discard(b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        ), (
+            lambda: set(),
+            {'a'},
+            set(),
+            # Nested sets are not allowed as sets themselves are not hashable.
+            lambda a, b: self.assertRaises(TypeError, a.add, b),
+            lambda res, expected_res: self.assertEqual(res, expected_res)
+        )]
+
+        class SomeObject(Object):
+            _stored = StoredState()
+
+        class WrappedFramework(Framework):
+            def __init__(self, data_path, charm_dir, meta, model):
+                super().__init__(data_path, charm_dir, meta, model)
+                self.snapshots = []
+
+            def save_snapshot(self, value):
+                if value.handle.path == 'SomeObject[1]/StoredStateData[_stored]':
+                    self.snapshots.append((type(value), value.snapshot()))
+                return super().save_snapshot(value)
+
+        # Validate correctness of modification operations.
+        for get_a, b, expected_res, op, validate_op in test_operations:
+            framework = self.create_framework(cls=WrappedFramework)
+            obj = SomeObject(framework, '1')
+
+            obj._stored.a = get_a()
+            self.assertTrue(isinstance(obj._stored, BoundStoredState))
+
+            op(obj._stored.a, b)
+            validate_op(obj._stored.a, expected_res)
+
+            obj._stored.a = get_a()
+            framework.commit()
+            # We should see an update for initializing a
+            self.assertEqual(framework.snapshots, [
+                (StoredStateData, {'a': get_a()}),
+            ])
+            del obj
+            gc.collect()
+            obj_copy1 = SomeObject(framework, '1')
+            self.assertEqual(obj_copy1._stored.a, get_a())
+
+            op(obj_copy1._stored.a, b)
+            validate_op(obj_copy1._stored.a, expected_res)
+            framework.commit()
+            framework.close()
+
+            framework_copy = self.create_framework(cls=WrappedFramework)
+
+            obj_copy2 = SomeObject(framework_copy, '1')
+
+            validate_op(obj_copy2._stored.a, expected_res)
+
+            # Commit saves the pre-commit and commit events, and the framework
+            # event counter, but shouldn't update the stored state of my object
+            framework.snapshots.clear()
+            framework_copy.commit()
+            self.assertEqual(framework_copy.snapshots, [])
+            framework_copy.close()
+
+    def test_comparison_operations(self):
+        test_operations = [(
+            {"1"},               # Operand A.
+            {"1", "2"},          # Operand B.
+            lambda a, b: a < b,  # Operation to test.
+            True,                # Result of op(A, B).
+            False,               # Result of op(B, A).
+        ), (
+            {"1"},
+            {"1", "2"},
+            lambda a, b: a > b,
+            False,
+            True
+        ), (
+            # Empty set comparison.
+            set(),
+            set(),
+            lambda a, b: a == b,
+            True,
+            True
+        ), (
+            {"a", "c"},
+            {"c", "a"},
+            lambda a, b: a == b,
+            True,
+            True
+        ), (
+            dict(),
+            dict(),
+            lambda a, b: a == b,
+            True,
+            True
+        ), (
+            {"1": "2"},
+            {"1": "2"},
+            lambda a, b: a == b,
+            True,
+            True
+        ), (
+            {"1": "2"},
+            {"1": "3"},
+            lambda a, b: a == b,
+            False,
+            False
+        ), (
+            [],
+            [],
+            lambda a, b: a == b,
+            True,
+            True
+        ), (
+            [1, 2],
+            [1, 2],
+            lambda a, b: a == b,
+            True,
+            True
+        ), (
+            [1, 2, 5, 6],
+            [1, 2, 5, 8, 10],
+            lambda a, b: a <= b,
+            True,
+            False
+        ), (
+            [1, 2, 5, 6],
+            [1, 2, 5, 8, 10],
+            lambda a, b: a < b,
+            True,
+            False
+        ), (
+            [1, 2, 5, 8],
+            [1, 2, 5, 6, 10],
+            lambda a, b: a > b,
+            True,
+            False
+        ), (
+            [1, 2, 5, 8],
+            [1, 2, 5, 6, 10],
+            lambda a, b: a >= b,
+            True,
+            False
+        )]
+
+        class SomeObject(Object):
+            _stored = StoredState()
+
+        framework = self.create_framework()
+
+        for i, (a, b, op, op_ab, op_ba) in enumerate(test_operations):
+            obj = SomeObject(framework, str(i))
+            obj._stored.a = a
+            self.assertEqual(op(obj._stored.a, b), op_ab)
+            self.assertEqual(op(b, obj._stored.a), op_ba)
+
+    def test_set_operations(self):
+        test_operations = [(
+            {"1"},  # A set to test an operation against (other_set).
+            lambda a, b: a | b,  # An operation to test.
+            {"1", "a", "b"},  # The expected result of operation(obj._stored.set, other_set).
+            {"1", "a", "b"}  # The expected result of operation(other_set, obj._stored.set).
+        ), (
+            {"a", "c"},
+            lambda a, b: a - b,
+            {"b"},
+            {"c"}
+        ), (
+            {"a", "c"},
+            lambda a, b: a & b,
+            {"a"},
+            {"a"}
+        ), (
+            {"a", "c", "d"},
+            lambda a, b: a ^ b,
+            {"b", "c", "d"},
+            {"b", "c", "d"}
+        ), (
+            set(),
+            lambda a, b: set(a),
+            {"a", "b"},
+            set()
+        )]
+
+        class SomeObject(Object):
+            _stored = StoredState()
+
+        framework = self.create_framework()
+
+        # Validate that operations between StoredSet and built-in sets
+        # only result in built-in sets being returned.
+        # Make sure that commutativity is preserved and that the
+        # original sets are not changed or used as a result.
+        for i, (variable_operand, operation, ab_res, ba_res) in enumerate(test_operations):
+            obj = SomeObject(framework, str(i))
+            obj._stored.set = {"a", "b"}
+
+            for a, b, expected in [
+                    (obj._stored.set, variable_operand, ab_res),
+                    (variable_operand, obj._stored.set, ba_res)]:
+                old_a = set(a)
+                old_b = set(b)
+
+                result = operation(a, b)
+                self.assertEqual(result, expected)
+
+                # Common sanity checks
+                self.assertIsNot(obj._stored.set._under, result)
+                self.assertIsNot(result, a)
+                self.assertIsNot(result, b)
+                self.assertEqual(a, old_a)
+                self.assertEqual(b, old_b)
+
+    def test_set_default(self):
+        framework = self.create_framework()
+
+        class StatefulObject(Object):
+            _stored = StoredState()
+        parent = StatefulObject(framework, 'key')
+        parent._stored.set_default(foo=1)
+        self.assertEqual(parent._stored.foo, 1)
+        parent._stored.set_default(foo=2)
+        # foo was already set, so it doesn't get replaced
+        self.assertEqual(parent._stored.foo, 1)
+        parent._stored.set_default(foo=3, bar=4)
+        self.assertEqual(parent._stored.foo, 1)
+        self.assertEqual(parent._stored.bar, 4)
+        # reloading the state still leaves things at the default values
+        framework.commit()
+        del parent
+        parent = StatefulObject(framework, 'key')
+        parent._stored.set_default(foo=5, bar=6)
+        self.assertEqual(parent._stored.foo, 1)
+        self.assertEqual(parent._stored.bar, 4)
+        # TODO: jam 2020-01-30 is there a clean way to tell that
+        #       parent._stored._data.dirty is False?
+
+
+def create_model(testcase):
+    """Create a Model object."""
+    unit_name = 'myapp/0'
+    patcher = patch.dict(os.environ, {'JUJU_UNIT_NAME': unit_name})
+    patcher.start()
+    testcase.addCleanup(patcher.stop)
+
+    backend = model.ModelBackend()
+    meta = charm.CharmMeta()
+    test_model = model.Model('myapp/0', meta, backend)
+    return test_model
+
+
+def create_framework(testcase, model=None):
+    """Create a Framework object."""
+    framework = Framework(":memory:", charm_dir='non-existant', meta=None, model=model)
+    testcase.addCleanup(framework.close)
+    return framework
+
+
+class GenericObserver(Object):
+    """Generic observer for the tests."""
+
+    def __init__(self, parent, key):
+        super().__init__(parent, key)
+        self.called = False
+
+    def callback_method(self, event):
+        """Set the instance .called to True."""
+        self.called = True
+
+
+@patch('sys.stderr', new_callable=io.StringIO)
+class BreakpointTests(unittest.TestCase):
+
+    def test_ignored(self, fake_stderr):
+        # It doesn't do anything really unless proper environment is there.
+        with patch.dict(os.environ):
+            os.environ.pop('JUJU_DEBUG_AT', None)
+            framework = create_framework(self)
+
+        with patch('pdb.Pdb.set_trace') as mock:
+            framework.breakpoint()
+        self.assertEqual(mock.call_count, 0)
+        self.assertEqual(fake_stderr.getvalue(), "")
+
+    def test_pdb_properly_called(self, fake_stderr):
+        # The debugger needs to leave the user in the frame where the breakpoint is executed,
+        # which for the test is the frame we're calling it here in the test :).
+        with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}):
+            framework = create_framework(self)
+
+        with patch('pdb.Pdb.set_trace') as mock:
+            this_frame = inspect.currentframe()
+            framework.breakpoint()
+
+        self.assertEqual(mock.call_count, 1)
+        self.assertEqual(mock.call_args, ((this_frame,), {}))
+
+    def test_welcome_message(self, fake_stderr):
+        # Check that an initial message is shown to the user when code is interrupted.
+        with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}):
+            framework = create_framework(self)
+        with patch('pdb.Pdb.set_trace'):
+            framework.breakpoint()
+        self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE)
+
+    def test_welcome_message_not_multiple(self, fake_stderr):
+        # Check that an initial message is NOT shown twice if the breakpoint is exercised
+        # twice in the same run.
+        with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}):
+            framework = create_framework(self)
+        with patch('pdb.Pdb.set_trace'):
+            framework.breakpoint()
+            self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE)
+            framework.breakpoint()
+            self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE)
+
+    def test_builtin_breakpoint_hooked(self, fake_stderr):
+        # Verify that the proper hook is set.
+        with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'all'}):
+            create_framework(self)  # creating the framework setups the hook
+        with patch('pdb.Pdb.set_trace') as mock:
+            # Calling through sys, not breakpoint() directly, so we can run the
+            # tests with Py < 3.7.
+            sys.breakpointhook()
+        self.assertEqual(mock.call_count, 1)
+
+    def test_breakpoint_names(self, fake_stderr):
+        framework = create_framework(self)
+
+        # Name rules:
+        # - must start and end with lowercase alphanumeric characters
+        # - only contain lowercase alphanumeric characters, or the hyphen "-"
+        good_names = [
+            'foobar',
+            'foo-bar-baz',
+            'foo-------bar',
+            'foo123',
+            '778',
+            '77-xx',
+            'a-b',
+            'ab',
+            'x',
+        ]
+        for name in good_names:
+            with self.subTest(name=name):
+                framework.breakpoint(name)
+
+        bad_names = [
+            '',
+            '.',
+            '-',
+            '...foo',
+            'foo.bar',
+            'bar--'
+            'FOO',
+            'FooBar',
+            'foo bar',
+            'foo_bar',
+            '/foobar',
+            'break-here-☚',
+        ]
+        msg = 'breakpoint names must look like "foo" or "foo-bar"'
+        for name in bad_names:
+            with self.subTest(name=name):
+                with self.assertRaises(ValueError) as cm:
+                    framework.breakpoint(name)
+                self.assertEqual(str(cm.exception), msg)
+
+        reserved_names = [
+            'all',
+            'hook',
+        ]
+        msg = 'breakpoint names "all" and "hook" are reserved'
+        for name in reserved_names:
+            with self.subTest(name=name):
+                with self.assertRaises(ValueError) as cm:
+                    framework.breakpoint(name)
+                self.assertEqual(str(cm.exception), msg)
+
+        not_really_names = [
+            123,
+            1.1,
+            False,
+        ]
+        for name in not_really_names:
+            with self.subTest(name=name):
+                with self.assertRaises(TypeError) as cm:
+                    framework.breakpoint(name)
+                self.assertEqual(str(cm.exception), 'breakpoint names must be strings')
+
+    def check_trace_set(self, envvar_value, breakpoint_name, call_count):
+        """Helper to check the diverse combinations of situations."""
+        with patch.dict(os.environ, {'JUJU_DEBUG_AT': envvar_value}):
+            framework = create_framework(self)
+        with patch('pdb.Pdb.set_trace') as mock:
+            framework.breakpoint(breakpoint_name)
+        self.assertEqual(mock.call_count, call_count)
+
+    def test_unnamed_indicated_all(self, fake_stderr):
+        # If 'all' is indicated, unnamed breakpoints will always activate.
+        self.check_trace_set('all', None, 1)
+
+    def test_unnamed_indicated_hook(self, fake_stderr):
+        # Special value 'hook' was indicated, nothing to do with any call.
+        self.check_trace_set('hook', None, 0)
+
+    def test_named_indicated_specifically(self, fake_stderr):
+        # Some breakpoint was indicated, and the framework call used exactly that name.
+        self.check_trace_set('mybreak', 'mybreak', 1)
+
+    def test_named_indicated_somethingelse(self, fake_stderr):
+        # Some breakpoint was indicated, but the framework call was not with that name.
+        self.check_trace_set('some-breakpoint', None, 0)
+
+    def test_named_indicated_ingroup(self, fake_stderr):
+        # A multiple breakpoint was indicated, and the framework call used a name among those.
+        self.check_trace_set('some,mybreak,foobar', 'mybreak', 1)
+
+    def test_named_indicated_all(self, fake_stderr):
+        # The framework indicated 'all', which includes any named breakpoint set.
+        self.check_trace_set('all', 'mybreak', 1)
+
+    def test_named_indicated_hook(self, fake_stderr):
+        # The framework indicated the special value 'hook', nothing to do with any named call.
+        self.check_trace_set('hook', 'mybreak', 0)
+
+
+class DebugHookTests(unittest.TestCase):
+
+    def test_envvar_parsing_missing(self):
+        with patch.dict(os.environ):
+            os.environ.pop('JUJU_DEBUG_AT', None)
+            framework = create_framework(self)
+        self.assertEqual(framework._juju_debug_at, ())
+
+    def test_envvar_parsing_empty(self):
+        with patch.dict(os.environ, {'JUJU_DEBUG_AT': ''}):
+            framework = create_framework(self)
+        self.assertEqual(framework._juju_debug_at, ())
+
+    def test_envvar_parsing_simple(self):
+        with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'hook'}):
+            framework = create_framework(self)
+        self.assertEqual(framework._juju_debug_at, ['hook'])
+
+    def test_envvar_parsing_multiple(self):
+        with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'foo,bar,all'}):
+            framework = create_framework(self)
+        self.assertEqual(framework._juju_debug_at, ['foo', 'bar', 'all'])
+
+    def test_basic_interruption_enabled(self):
+        framework = create_framework(self)
+        framework._juju_debug_at = ['hook']
+
+        publisher = charm.CharmEvents(framework, "1")
+        observer = GenericObserver(framework, "1")
+        framework.observe(publisher.install, observer.callback_method)
+
+        with patch('sys.stderr', new_callable=io.StringIO) as fake_stderr:
+            with patch('pdb.runcall') as mock:
+                publisher.install.emit()
+
+        # Check that the pdb module was used correctly and that the callback method was NOT
+        # called (as we intercepted the normal pdb behaviour! this is to check that the
+        # framework didn't call the callback directly)
+        self.assertEqual(mock.call_count, 1)
+        expected_callback, expected_event = mock.call_args[0]
+        self.assertEqual(expected_callback, observer.callback_method)
+        self.assertIsInstance(expected_event, EventBase)
+        self.assertFalse(observer.called)
+
+        # Verify proper message was given to the user.
+        self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE)
+
+    def test_actions_are_interrupted(self):
+        test_model = create_model(self)
+        framework = create_framework(self, model=test_model)
+        framework._juju_debug_at = ['hook']
+
+        class CustomEvents(ObjectEvents):
+            foobar_action = EventSource(charm.ActionEvent)
+
+        publisher = CustomEvents(framework, "1")
+        observer = GenericObserver(framework, "1")
+        framework.observe(publisher.foobar_action, observer.callback_method)
+        fake_script(self, 'action-get', "echo {}")
+
+        with patch('sys.stderr', new_callable=io.StringIO):
+            with patch('pdb.runcall') as mock:
+                with patch.dict(os.environ, {'JUJU_ACTION_NAME': 'foobar'}):
+                    publisher.foobar_action.emit()
+
+        self.assertEqual(mock.call_count, 1)
+        self.assertFalse(observer.called)
+
+    def test_internal_events_not_interrupted(self):
+        class MyNotifier(Object):
+            """Generic notifier for the tests."""
+            bar = EventSource(EventBase)
+
+        framework = create_framework(self)
+        framework._juju_debug_at = ['hook']
+
+        publisher = MyNotifier(framework, "1")
+        observer = GenericObserver(framework, "1")
+        framework.observe(publisher.bar, observer.callback_method)
+
+        with patch('pdb.runcall') as mock:
+            publisher.bar.emit()
+
+        self.assertEqual(mock.call_count, 0)
+        self.assertTrue(observer.called)
+
+    def test_envvar_mixed(self):
+        framework = create_framework(self)
+        framework._juju_debug_at = ['foo', 'hook', 'all', 'whatever']
+
+        publisher = charm.CharmEvents(framework, "1")
+        observer = GenericObserver(framework, "1")
+        framework.observe(publisher.install, observer.callback_method)
+
+        with patch('sys.stderr', new_callable=io.StringIO):
+            with patch('pdb.runcall') as mock:
+                publisher.install.emit()
+
+        self.assertEqual(mock.call_count, 1)
+        self.assertFalse(observer.called)
+
+    def test_no_registered_method(self):
+        framework = create_framework(self)
+        framework._juju_debug_at = ['hook']
+
+        publisher = charm.CharmEvents(framework, "1")
+        observer = GenericObserver(framework, "1")
+
+        with patch('pdb.runcall') as mock:
+            publisher.install.emit()
+
+        self.assertEqual(mock.call_count, 0)
+        self.assertFalse(observer.called)
+
+    def test_envvar_nohook(self):
+        framework = create_framework(self)
+        framework._juju_debug_at = ['something-else']
+
+        publisher = charm.CharmEvents(framework, "1")
+        observer = GenericObserver(framework, "1")
+        framework.observe(publisher.install, observer.callback_method)
+
+        with patch.dict(os.environ, {'JUJU_DEBUG_AT': 'something-else'}):
+            with patch('pdb.runcall') as mock:
+                publisher.install.emit()
+
+        self.assertEqual(mock.call_count, 0)
+        self.assertTrue(observer.called)
+
+    def test_envvar_missing(self):
+        framework = create_framework(self)
+        framework._juju_debug_at = ()
+
+        publisher = charm.CharmEvents(framework, "1")
+        observer = GenericObserver(framework, "1")
+        framework.observe(publisher.install, observer.callback_method)
+
+        with patch('pdb.runcall') as mock:
+            publisher.install.emit()
+
+        self.assertEqual(mock.call_count, 0)
+        self.assertTrue(observer.called)
+
+    def test_welcome_message_not_multiple(self):
+        framework = create_framework(self)
+        framework._juju_debug_at = ['hook']
+
+        publisher = charm.CharmEvents(framework, "1")
+        observer = GenericObserver(framework, "1")
+        framework.observe(publisher.install, observer.callback_method)
+
+        with patch('sys.stderr', new_callable=io.StringIO) as fake_stderr:
+            with patch('pdb.runcall') as mock:
+                publisher.install.emit()
+                self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE)
+                publisher.install.emit()
+                self.assertEqual(fake_stderr.getvalue(), _BREAKPOINT_WELCOME_MESSAGE)
+        self.assertEqual(mock.call_count, 2)
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_helpers.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..131681d0ee5f0f5529dde828c0739bd9b783d471
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_helpers.py
@@ -0,0 +1,81 @@
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import pathlib
+import subprocess
+import shutil
+import tempfile
+import unittest
+
+
+def fake_script(test_case, name, content):
+    if not hasattr(test_case, 'fake_script_path'):
+        fake_script_path = tempfile.mkdtemp('-fake_script')
+        os.environ['PATH'] = '{}:{}'.format(fake_script_path, os.environ["PATH"])
+
+        def cleanup():
+            shutil.rmtree(fake_script_path)
+            os.environ['PATH'] = os.environ['PATH'].replace(fake_script_path + ':', '')
+
+        test_case.addCleanup(cleanup)
+        test_case.fake_script_path = pathlib.Path(fake_script_path)
+
+    with (test_case.fake_script_path / name).open('wt') as f:
+        # Before executing the provided script, dump the provided arguments in calls.txt.
+        f.write('''#!/bin/bash
+{ echo -n $(basename $0); printf ";%s" "$@"; echo; } >> $(dirname $0)/calls.txt
+''' + content)
+    os.chmod(str(test_case.fake_script_path / name), 0o755)
+
+
+def fake_script_calls(test_case, clear=False):
+    try:
+        with (test_case.fake_script_path / 'calls.txt').open('r+t') as f:
+            calls = [line.split(';') for line in f.read().splitlines()]
+            if clear:
+                f.truncate(0)
+            return calls
+    except FileNotFoundError:
+        return []
+
+
+class FakeScriptTest(unittest.TestCase):
+
+    def test_fake_script_works(self):
+        fake_script(self, 'foo', 'echo foo runs')
+        fake_script(self, 'bar', 'echo bar runs')
+        output = subprocess.getoutput('foo a "b c "; bar "d e" f')
+        self.assertEqual(output, 'foo runs\nbar runs')
+        self.assertEqual(fake_script_calls(self), [
+            ['foo', 'a', 'b c '],
+            ['bar', 'd e', 'f'],
+        ])
+
+    def test_fake_script_clear(self):
+        fake_script(self, 'foo', 'echo foo runs')
+
+        output = subprocess.getoutput('foo a "b c"')
+        self.assertEqual(output, 'foo runs')
+
+        self.assertEqual(fake_script_calls(self, clear=True), [['foo', 'a', 'b c']])
+
+        fake_script(self, 'bar', 'echo bar runs')
+
+        output = subprocess.getoutput('bar "d e" f')
+        self.assertEqual(output, 'bar runs')
+
+        self.assertEqual(fake_script_calls(self, clear=True), [['bar', 'd e', 'f']])
+
+        self.assertEqual(fake_script_calls(self, clear=True), [])
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_infra.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_infra.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d269547207924078ac83f90d853611a371dabf0
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_infra.py
@@ -0,0 +1,130 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import itertools
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import unittest
+from unittest.mock import patch
+
+import autopep8
+from flake8.api.legacy import get_style_guide
+
+
+def get_python_filepaths():
+    """Helper to retrieve paths of Python files."""
+    python_paths = ['setup.py']
+    for root in ['ops', 'test']:
+        for dirpath, dirnames, filenames in os.walk(root):
+            for filename in filenames:
+                if filename.endswith(".py"):
+                    python_paths.append(os.path.join(dirpath, filename))
+    return python_paths
+
+
+class InfrastructureTests(unittest.TestCase):
+
+    def test_pep8(self):
+        # verify all files are nicely styled
+        python_filepaths = get_python_filepaths()
+        style_guide = get_style_guide()
+        fake_stdout = io.StringIO()
+        with patch('sys.stdout', fake_stdout):
+            report = style_guide.check_files(python_filepaths)
+
+        # if flake8 didnt' report anything, we're done
+        if report.total_errors == 0:
+            return
+
+        # grab on which files we have issues
+        flake8_issues = fake_stdout.getvalue().split('\n')
+        broken_filepaths = {item.split(':')[0] for item in flake8_issues if item}
+
+        # give hints to the developer on how files' style could be improved
+        options = autopep8.parse_args([''])
+        options.aggressive = 1
+        options.diff = True
+        options.max_line_length = 99
+
+        issues = []
+        for filepath in broken_filepaths:
+            diff = autopep8.fix_file(filepath, options=options)
+            if diff:
+                issues.append(diff)
+
+        report = ["Please fix files as suggested by autopep8:"] + issues
+        report += ["\n-- Original flake8 reports:"] + flake8_issues
+        self.fail("\n".join(report))
+
+    def test_quote_backslashes(self):
+        # ensure we're not using unneeded backslash to escape strings
+        issues = []
+        for filepath in get_python_filepaths():
+            with open(filepath, "rt", encoding="utf8") as fh:
+                for idx, line in enumerate(fh, 1):
+                    if (r'\"' in line or r"\'" in line) and "NOQA" not in line:
+                        issues.append((filepath, idx, line.rstrip()))
+        if issues:
+            msgs = ["{}:{:d}:{}".format(*issue) for issue in issues]
+            self.fail("Spurious backslashes found, please fix these quotings:\n" + "\n".join(msgs))
+
+    def test_ensure_copyright(self):
+        # all non-empty Python files must have a proper copyright somewhere in the first 5 lines
+        issues = []
+        regex = re.compile(r"# Copyright \d\d\d\d(-\d\d\d\d)? Canonical Ltd.\n")
+        for filepath in get_python_filepaths():
+            if os.stat(filepath).st_size == 0:
+                continue
+
+            with open(filepath, "rt", encoding="utf8") as fh:
+                for line in itertools.islice(fh, 5):
+                    if regex.match(line):
+                        break
+                else:
+                    issues.append(filepath)
+        if issues:
+            self.fail("Please add copyright headers to the following files:\n" + "\n".join(issues))
+
+
+class ImportersTestCase(unittest.TestCase):
+
+    template = "from ops import {module_name}"
+
+    def test_imports(self):
+        mod_names = [
+            'charm',
+            'framework',
+            'main',
+            'model',
+            'testing',
+        ]
+
+        for name in mod_names:
+            with self.subTest(name=name):
+                self.check(name)
+
+    def check(self, name):
+        """Helper function to run the test."""
+        _, testfile = tempfile.mkstemp()
+        self.addCleanup(os.unlink, testfile)
+
+        with open(testfile, 'wt', encoding='utf8') as fh:
+            fh.write(self.template.format(module_name=name))
+
+        proc = subprocess.run([sys.executable, testfile], env={'PYTHONPATH': os.getcwd()})
+        self.assertEqual(proc.returncode, 0)
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_jujuversion.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_jujuversion.py
new file mode 100755
index 0000000000000000000000000000000000000000..d19fd60045800c61378bcb0496fc79926bc71110
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_jujuversion.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python3
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+from ops.jujuversion import JujuVersion
+
+
+class TestJujuVersion(unittest.TestCase):
+
+    def test_parsing(self):
+        test_cases = [
+            ("0.0.0", 0, 0, '', 0, 0),
+            ("0.0.2", 0, 0, '', 2, 0),
+            ("0.1.0", 0, 1, '', 0, 0),
+            ("0.2.3", 0, 2, '', 3, 0),
+            ("10.234.3456", 10, 234, '', 3456, 0),
+            ("10.234.3456.1", 10, 234, '', 3456, 1),
+            ("1.21-alpha12", 1, 21, 'alpha', 12, 0),
+            ("1.21-alpha1.34", 1, 21, 'alpha', 1, 34),
+            ("2.7", 2, 7, '', 0, 0)
+        ]
+
+        for vs, major, minor, tag, patch, build in test_cases:
+            v = JujuVersion(vs)
+            self.assertEqual(v.major, major)
+            self.assertEqual(v.minor, minor)
+            self.assertEqual(v.tag, tag)
+            self.assertEqual(v.patch, patch)
+            self.assertEqual(v.build, build)
+
+    def test_parsing_errors(self):
+        invalid_versions = [
+            "xyz",
+            "foo.bar",
+            "foo.bar.baz",
+            "dead.beef.ca.fe",
+            "1234567890.2.1",     # The major version is too long.
+            "0.2..1",             # Two periods next to each other.
+            "1.21.alpha1",        # Tag comes after period.
+            "1.21-alpha",         # No patch number but a tag is present.
+            "1.21-alpha1beta",    # Non-numeric string after the patch number.
+            "1.21-alpha-dev",     # Tag duplication.
+            "1.21-alpha_dev3",    # Underscore in a tag.
+            "1.21-alpha123dev3",  # Non-numeric string after the patch number.
+        ]
+        for v in invalid_versions:
+            with self.assertRaises(RuntimeError):
+                JujuVersion(v)
+
+    def test_equality(self):
+        test_cases = [
+            ("1.0.0", "1.0.0", True),
+            ("01.0.0", "1.0.0", True),
+            ("10.0.0", "9.0.0", False),
+            ("1.0.0", "1.0.1", False),
+            ("1.0.1", "1.0.0", False),
+            ("1.0.0", "1.1.0", False),
+            ("1.1.0", "1.0.0", False),
+            ("1.0.0", "2.0.0", False),
+            ("1.2-alpha1", "1.2.0", False),
+            ("1.2-alpha2", "1.2-alpha1", False),
+            ("1.2-alpha2.1", "1.2-alpha2", False),
+            ("1.2-alpha2.2", "1.2-alpha2.1", False),
+            ("1.2-beta1", "1.2-alpha1", False),
+            ("1.2-beta1", "1.2-alpha2.1", False),
+            ("1.2-beta1", "1.2.0", False),
+            ("1.2.1", "1.2.0", False),
+            ("2.0.0", "1.0.0", False),
+            ("2.0.0.0", "2.0.0", True),
+            ("2.0.0.0", "2.0.0.0", True),
+            ("2.0.0.1", "2.0.0.0", False),
+            ("2.0.1.10", "2.0.0.0", False),
+        ]
+
+        for a, b, expected in test_cases:
+            self.assertEqual(JujuVersion(a) == JujuVersion(b), expected)
+            self.assertEqual(JujuVersion(a) == b, expected)
+
+    def test_comparison(self):
+        test_cases = [
+            ("1.0.0", "1.0.0", False, True),
+            ("01.0.0", "1.0.0", False, True),
+            ("10.0.0", "9.0.0", False, False),
+            ("1.0.0", "1.0.1", True, True),
+            ("1.0.1", "1.0.0", False, False),
+            ("1.0.0", "1.1.0", True, True),
+            ("1.1.0", "1.0.0", False, False),
+            ("1.0.0", "2.0.0", True, True),
+            ("1.2-alpha1", "1.2.0", True, True),
+            ("1.2-alpha2", "1.2-alpha1", False, False),
+            ("1.2-alpha2.1", "1.2-alpha2", False, False),
+            ("1.2-alpha2.2", "1.2-alpha2.1", False, False),
+            ("1.2-beta1", "1.2-alpha1", False, False),
+            ("1.2-beta1", "1.2-alpha2.1", False, False),
+            ("1.2-beta1", "1.2.0", True, True),
+            ("1.2.1", "1.2.0", False, False),
+            ("2.0.0", "1.0.0", False, False),
+            ("2.0.0.0", "2.0.0", False, True),
+            ("2.0.0.0", "2.0.0.0", False, True),
+            ("2.0.0.1", "2.0.0.0", False, False),
+            ("2.0.1.10", "2.0.0.0", False, False),
+        ]
+
+        for a, b, expected_strict, expected_weak in test_cases:
+            self.assertEqual(JujuVersion(a) < JujuVersion(b), expected_strict)
+            self.assertEqual(JujuVersion(a) <= JujuVersion(b), expected_weak)
+            self.assertEqual(JujuVersion(b) > JujuVersion(a), expected_strict)
+            self.assertEqual(JujuVersion(b) >= JujuVersion(a), expected_weak)
+            # Implicit conversion.
+            self.assertEqual(JujuVersion(a) < b, expected_strict)
+            self.assertEqual(JujuVersion(a) <= b, expected_weak)
+            self.assertEqual(b > JujuVersion(a), expected_strict)
+            self.assertEqual(b >= JujuVersion(a), expected_weak)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_log.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_log.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7f74d5c901ffb7833c1e5523f9f69a1959999e1
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_log.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python3
+
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import io
+import unittest
+from unittest.mock import patch
+import importlib
+
+import logging
+import ops.log
+
+
+class FakeModelBackend:
+
+    def __init__(self):
+        self._calls = []
+
+    def calls(self, clear=False):
+        calls = self._calls
+        if clear:
+            self._calls = []
+        return calls
+
+    def juju_log(self, message, level):
+        self._calls.append((message, level))
+
+
+def reset_logging():
+    logging.shutdown()
+    importlib.reload(logging)
+
+
+class TestLogging(unittest.TestCase):
+
+    def setUp(self):
+        self.backend = FakeModelBackend()
+
+        reset_logging()
+        self.addCleanup(reset_logging)
+
+    def test_default_logging(self):
+        ops.log.setup_root_logging(self.backend)
+
+        logger = logging.getLogger()
+        self.assertEqual(logger.level, logging.DEBUG)
+        self.assertIsInstance(logger.handlers[0], ops.log.JujuLogHandler)
+
+        test_cases = [(
+            lambda: logger.critical('critical'), [('CRITICAL', 'critical')]
+        ), (
+            lambda: logger.error('error'), [('ERROR', 'error')]
+        ), (
+            lambda: logger.warning('warning'), [('WARNING', 'warning')]
+        ), (
+            lambda: logger.info('info'), [('INFO', 'info')]
+        ), (
+            lambda: logger.debug('debug'), [('DEBUG', 'debug')]
+        )]
+
+        for do, res in test_cases:
+            do()
+            calls = self.backend.calls(clear=True)
+            self.assertEqual(calls, res)
+
+    def test_handler_filtering(self):
+        logger = logging.getLogger()
+        logger.setLevel(logging.INFO)
+        logger.addHandler(ops.log.JujuLogHandler(self.backend, logging.WARNING))
+        logger.info('foo')
+        self.assertEqual(self.backend.calls(), [])
+        logger.warning('bar')
+        self.assertEqual(self.backend.calls(), [('WARNING', 'bar')])
+
+    def test_no_stderr_without_debug(self):
+        buffer = io.StringIO()
+        with patch('sys.stderr', buffer):
+            ops.log.setup_root_logging(self.backend, debug=False)
+            logger = logging.getLogger()
+            logger.debug('debug message')
+            logger.info('info message')
+            logger.warning('warning message')
+            logger.critical('critical message')
+        self.assertEqual(
+            self.backend.calls(),
+            [('DEBUG', 'debug message'),
+             ('INFO', 'info message'),
+                ('WARNING', 'warning message'),
+                ('CRITICAL', 'critical message'),
+             ])
+        self.assertEqual(buffer.getvalue(), "")
+
+    def test_debug_logging(self):
+        buffer = io.StringIO()
+        with patch('sys.stderr', buffer):
+            ops.log.setup_root_logging(self.backend, debug=True)
+            logger = logging.getLogger()
+            logger.debug('debug message')
+            logger.info('info message')
+            logger.warning('warning message')
+            logger.critical('critical message')
+        self.assertEqual(
+            self.backend.calls(),
+            [('DEBUG', 'debug message'),
+             ('INFO', 'info message'),
+             ('WARNING', 'warning message'),
+             ('CRITICAL', 'critical message'),
+             ])
+        self.assertRegex(
+            buffer.getvalue(),
+            r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d DEBUG    debug message\n"
+            r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d INFO     info message\n"
+            r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d WARNING  warning message\n"
+            r"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d,\d\d\d CRITICAL critical message\n"
+        )
+
+    def test_reduced_logging(self):
+        ops.log.setup_root_logging(self.backend)
+        logger = logging.getLogger()
+        logger.setLevel(logging.WARNING)
+        logger.debug('debug')
+        logger.info('info')
+        logger.warning('warning')
+        self.assertEqual(self.backend.calls(), [('WARNING', 'warning')])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_main.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_main.py
new file mode 100755
index 0000000000000000000000000000000000000000..0703b88aa035baa5e907cf07fc4c5f231ca5669f
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_main.py
@@ -0,0 +1,675 @@
+#!/usr/bin/env python3
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import base64
+import logging
+import os
+import pickle
+import shutil
+import subprocess
+import sys
+import tempfile
+import unittest
+import importlib.util
+from pathlib import Path
+
+from ops.charm import (
+    CharmBase,
+    CharmEvents,
+    HookEvent,
+    InstallEvent,
+    StartEvent,
+    ConfigChangedEvent,
+    UpgradeCharmEvent,
+    UpdateStatusEvent,
+    LeaderSettingsChangedEvent,
+    RelationJoinedEvent,
+    RelationChangedEvent,
+    RelationDepartedEvent,
+    RelationBrokenEvent,
+    RelationEvent,
+    StorageAttachedEvent,
+    ActionEvent,
+    CollectMetricsEvent,
+)
+
+from .test_helpers import fake_script, fake_script_calls
+
+# This relies on the expected repository structure to find a path to
+# source of the charm under test.
+TEST_CHARM_DIR = Path(__file__ + '/../charms/test_main').resolve()
+
+logger = logging.getLogger(__name__)
+
+
+class SymlinkTargetError(Exception):
+    pass
+
+
+class EventSpec:
+    def __init__(self, event_type, event_name, env_var=None,
+                 relation_id=None, remote_app=None, remote_unit=None,
+                 charm_config=None):
+        self.event_type = event_type
+        self.event_name = event_name
+        self.env_var = env_var
+        self.relation_id = relation_id
+        self.remote_app = remote_app
+        self.remote_unit = remote_unit
+        self.charm_config = charm_config
+
+
+class TestMain(abc.ABC):
+
+    @abc.abstractmethod
+    def _setup_entry_point(self, directory, entry_point):
+        """Set up the given entry point in the given directory.
+
+        If not using dispatch, that would be a symlink <dir>/<entry_point>
+        pointing at src/charm.py; if using dispatch that would be the dispatch
+        symlink. It could also not be a symlink...
+        """
+        return NotImplemented
+
+    @abc.abstractmethod
+    def _call_event(self, rel_path, env):
+        """Set up the environment and call (i.e. run) the given event."""
+        return NotImplemented
+
+    @abc.abstractmethod
+    def test_setup_event_links(self):
+        """Test auto-creation of symlinks caused by initial events.
+
+        Depending on the combination of dispatch and non-dispatch, this should
+        be checking for the creation or the _lack_ of creation, as appropriate.
+        """
+        return NotImplemented
+
+    def setUp(self):
+        self._setup_charm_dir()
+
+        _, tmp_file = tempfile.mkstemp()
+        self._state_file = Path(tmp_file)
+        self.addCleanup(self._state_file.unlink)
+
+        # Relations events are defined dynamically and modify the class attributes.
+        # We use a subclass temporarily to prevent these side effects from leaking.
+        class TestCharmEvents(CharmEvents):
+            pass
+        CharmBase.on = TestCharmEvents()
+
+        def cleanup():
+            shutil.rmtree(str(self.JUJU_CHARM_DIR))
+            CharmBase.on = CharmEvents()
+        self.addCleanup(cleanup)
+
+        fake_script(self, 'juju-log', "exit 0")
+
+        # set to something other than None for tests that care
+        self.stdout = None
+        self.stderr = None
+
+    def _setup_charm_dir(self):
+        self.JUJU_CHARM_DIR = Path(tempfile.mkdtemp()) / 'test_main'
+        self.hooks_dir = self.JUJU_CHARM_DIR / 'hooks'
+        charm_path = str(self.JUJU_CHARM_DIR / 'src/charm.py')
+        self.charm_exec_path = os.path.relpath(charm_path,
+                                               str(self.hooks_dir))
+        shutil.copytree(str(TEST_CHARM_DIR), str(self.JUJU_CHARM_DIR))
+
+        charm_spec = importlib.util.spec_from_file_location("charm", charm_path)
+        self.charm_module = importlib.util.module_from_spec(charm_spec)
+        charm_spec.loader.exec_module(self.charm_module)
+
+        self._prepare_initial_hooks()
+
+    def _prepare_initial_hooks(self):
+        initial_hooks = ('install', 'start', 'upgrade-charm', 'disks-storage-attached')
+        self.hooks_dir.mkdir()
+        for hook in initial_hooks:
+            self._setup_entry_point(self.hooks_dir, hook)
+
+    def _prepare_actions(self):
+        actions_meta = '''
+foo-bar:
+  description: Foos the bar.
+  title: foo-bar
+  params:
+    foo-name:
+      type: string
+      description: A foo name to bar.
+    silent:
+      type: boolean
+      description:
+      default: false
+  required:
+    - foo-name
+start:
+    description: Start the unit.'''
+        actions_dir_name = 'actions'
+        actions_meta_file = 'actions.yaml'
+
+        with (self.JUJU_CHARM_DIR / actions_meta_file).open('w+t') as f:
+            f.write(actions_meta)
+        actions_dir = self.JUJU_CHARM_DIR / actions_dir_name
+        actions_dir.mkdir()
+        for action_name in ('start', 'foo-bar'):
+            self._setup_entry_point(actions_dir, action_name)
+
+    def _read_and_clear_state(self):
+        state = None
+        if self._state_file.stat().st_size:
+            with self._state_file.open('r+b') as state_file:
+                state = pickle.load(state_file)
+                state_file.truncate(0)
+        return state
+
+    def _simulate_event(self, event_spec):
+        env = {
+            'PATH': "{}:{}".format(Path(__file__).parent / 'bin', os.environ['PATH']),
+            'JUJU_CHARM_DIR': str(self.JUJU_CHARM_DIR),
+            'JUJU_UNIT_NAME': 'test_main/0',
+            'CHARM_CONFIG': event_spec.charm_config,
+        }
+        if issubclass(event_spec.event_type, RelationEvent):
+            rel_name = event_spec.event_name.split('_')[0]
+            env.update({
+                'JUJU_RELATION': rel_name,
+                'JUJU_RELATION_ID': str(event_spec.relation_id),
+            })
+            remote_app = event_spec.remote_app
+            # For juju < 2.7 app name is extracted from JUJU_REMOTE_UNIT.
+            if remote_app is not None:
+                env['JUJU_REMOTE_APP'] = remote_app
+
+            remote_unit = event_spec.remote_unit
+            if remote_unit is None:
+                remote_unit = ''
+
+            env['JUJU_REMOTE_UNIT'] = remote_unit
+        else:
+            env.update({
+                'JUJU_REMOTE_UNIT': '',
+                'JUJU_REMOTE_APP': '',
+            })
+        if issubclass(event_spec.event_type, ActionEvent):
+            event_filename = event_spec.event_name[:-len('_action')].replace('_', '-')
+            env.update({
+                event_spec.env_var: event_filename,
+            })
+            if event_spec.env_var == 'JUJU_ACTION_NAME':
+                event_dir = 'actions'
+            else:
+                raise RuntimeError('invalid envar name specified for a action event')
+        else:
+            event_filename = event_spec.event_name.replace('_', '-')
+            event_dir = 'hooks'
+
+        self._call_event(Path(event_dir, event_filename), env)
+        return self._read_and_clear_state()
+
+    def test_event_reemitted(self):
+        # base64 encoding is used to avoid null bytes.
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+
+        # First run "install" to make sure all hooks are set up.
+        state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config))
+        self.assertEqual(state['observed_event_types'], [InstallEvent])
+
+        state = self._simulate_event(EventSpec(ConfigChangedEvent, 'config-changed',
+                                               charm_config=charm_config))
+        self.assertEqual(state['observed_event_types'], [ConfigChangedEvent])
+
+        # Re-emit should pick the deferred config-changed.
+        state = self._simulate_event(EventSpec(UpdateStatusEvent, 'update-status',
+                                               charm_config=charm_config))
+        self.assertEqual(state['observed_event_types'], [ConfigChangedEvent, UpdateStatusEvent])
+
+    def test_no_reemission_on_collect_metrics(self):
+        # base64 encoding is used to avoid null bytes.
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+        fake_script(self, 'add-metric', 'exit 0')
+
+        # First run "install" to make sure all hooks are set up.
+        state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config))
+        self.assertEqual(state['observed_event_types'], [InstallEvent])
+
+        state = self._simulate_event(EventSpec(ConfigChangedEvent, 'config-changed',
+                                               charm_config=charm_config))
+        self.assertEqual(state['observed_event_types'], [ConfigChangedEvent])
+
+        # Re-emit should not pick the deferred config-changed because
+        # collect-metrics runs in a restricted context.
+        state = self._simulate_event(EventSpec(CollectMetricsEvent, 'collect-metrics',
+                                               charm_config=charm_config))
+        self.assertEqual(state['observed_event_types'], [CollectMetricsEvent])
+
+    def test_multiple_events_handled(self):
+        self._prepare_actions()
+
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+        actions_charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+            'USE_ACTIONS': True,
+        }))
+
+        fake_script(self, 'action-get', "echo '{}'")
+
+        # Sample events with a different amount of dashes used
+        # and with endpoints from different sections of metadata.yaml
+        events_under_test = [(
+            EventSpec(InstallEvent, 'install',
+                      charm_config=charm_config),
+            {},
+        ), (
+            EventSpec(StartEvent, 'start',
+                      charm_config=charm_config),
+            {},
+        ), (
+            EventSpec(UpdateStatusEvent, 'update_status',
+                      charm_config=charm_config),
+            {},
+        ), (
+            EventSpec(LeaderSettingsChangedEvent, 'leader_settings_changed',
+                      charm_config=charm_config),
+            {},
+        ), (
+            EventSpec(RelationJoinedEvent, 'db_relation_joined',
+                      relation_id=1,
+                      remote_app='remote', remote_unit='remote/0',
+                      charm_config=charm_config),
+            {'relation_name': 'db',
+             'relation_id': 1,
+             'app_name': 'remote',
+             'unit_name': 'remote/0'},
+        ), (
+            EventSpec(RelationChangedEvent, 'mon_relation_changed',
+                      relation_id=2,
+                      remote_app='remote', remote_unit='remote/0',
+                      charm_config=charm_config),
+            {'relation_name': 'mon',
+             'relation_id': 2,
+             'app_name': 'remote',
+             'unit_name': 'remote/0'},
+        ), (
+            EventSpec(RelationChangedEvent, 'mon_relation_changed',
+                      relation_id=2,
+                      remote_app='remote', remote_unit=None,
+                      charm_config=charm_config),
+            {'relation_name': 'mon',
+             'relation_id': 2,
+             'app_name': 'remote',
+             'unit_name': None},
+        ), (
+            EventSpec(RelationDepartedEvent, 'mon_relation_departed',
+                      relation_id=2,
+                      remote_app='remote', remote_unit='remote/0',
+                      charm_config=charm_config),
+            {'relation_name': 'mon',
+             'relation_id': 2,
+             'app_name': 'remote',
+             'unit_name': 'remote/0'},
+        ), (
+            EventSpec(RelationBrokenEvent, 'ha_relation_broken',
+                      relation_id=3,
+                      charm_config=charm_config),
+            {'relation_name': 'ha',
+             'relation_id': 3},
+        ), (
+            # Events without a remote app specified (for Juju < 2.7).
+            EventSpec(RelationJoinedEvent, 'db_relation_joined',
+                      relation_id=1,
+                      remote_unit='remote/0',
+                      charm_config=charm_config),
+            {'relation_name': 'db',
+             'relation_id': 1,
+             'app_name': 'remote',
+             'unit_name': 'remote/0'},
+        ), (
+            EventSpec(RelationChangedEvent, 'mon_relation_changed',
+                      relation_id=2,
+                      remote_unit='remote/0',
+                      charm_config=charm_config),
+            {'relation_name': 'mon',
+             'relation_id': 2,
+             'app_name': 'remote',
+             'unit_name': 'remote/0'},
+        ), (
+            EventSpec(RelationDepartedEvent, 'mon_relation_departed',
+                      relation_id=2,
+                      remote_unit='remote/0',
+                      charm_config=charm_config),
+            {'relation_name': 'mon',
+             'relation_id': 2,
+             'app_name': 'remote',
+             'unit_name': 'remote/0'},
+        ), (
+            EventSpec(ActionEvent, 'start_action',
+                      env_var='JUJU_ACTION_NAME',
+                      charm_config=actions_charm_config),
+            {},
+        ), (
+            EventSpec(ActionEvent, 'foo_bar_action',
+                      env_var='JUJU_ACTION_NAME',
+                      charm_config=actions_charm_config),
+            {},
+        )]
+
+        logger.debug('Expected events %s', events_under_test)
+
+        # First run "install" to make sure all hooks are set up.
+        self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config))
+
+        # Simulate hook executions for every event.
+        for event_spec, expected_event_data in events_under_test:
+            state = self._simulate_event(event_spec)
+
+            state_key = 'on_' + event_spec.event_name
+            handled_events = state.get(state_key, [])
+
+            # Make sure that a handler for that event was called once.
+            self.assertEqual(len(handled_events), 1)
+            # Make sure the event handled by the Charm has the right type.
+            handled_event_type = handled_events[0]
+            self.assertEqual(handled_event_type, event_spec.event_type)
+
+            self.assertEqual(state['observed_event_types'], [event_spec.event_type])
+
+            if event_spec.event_name in expected_event_data:
+                self.assertEqual(state[event_spec.event_name + '_data'],
+                                 expected_event_data[event_spec.event_name])
+
+    def test_event_not_implemented(self):
+        """Make sure events without implementation do not cause non-zero exit.
+        """
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+
+        # Simulate a scenario where there is a symlink for an event that
+        # a charm does not know how to handle.
+        hook_path = self.JUJU_CHARM_DIR / 'hooks/not-implemented-event'
+        # This will be cleared up in tearDown.
+        hook_path.symlink_to('install')
+
+        try:
+            self._simulate_event(EventSpec(HookEvent, 'not-implemented-event',
+                                           charm_config=charm_config))
+        except subprocess.CalledProcessError:
+            self.fail('Event simulation for an unsupported event'
+                      ' results in a non-zero exit code returned')
+
+    def test_collect_metrics(self):
+        indicator_file = self.JUJU_CHARM_DIR / 'indicator'
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+            'INDICATOR_FILE': indicator_file
+        }))
+        fake_script(self, 'add-metric', 'exit 0')
+        fake_script(self, 'juju-log', 'exit 0')
+        self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config))
+        # Clear the calls during 'install'
+        fake_script_calls(self, clear=True)
+        self._simulate_event(EventSpec(CollectMetricsEvent, 'collect_metrics',
+                                       charm_config=charm_config))
+        self.assertEqual(
+            fake_script_calls(self),
+            [['juju-log', '--log-level', 'DEBUG', 'Emitting Juju event collect_metrics'],
+             ['add-metric', '--labels', 'bar=4.2', 'foo=42']])
+
+    def test_logger(self):
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+            'USE_LOG_ACTIONS': True,
+        }))
+        fake_script(self, 'action-get', "echo '{}'")
+        actions_yaml = self.JUJU_CHARM_DIR / 'actions.yaml'
+        actions_yaml.write_text(
+            '''
+log_critical: {}
+log_error: {}
+log_warning: {}
+log_info: {}
+log_debug: {}
+            ''')
+
+        test_cases = [(
+            EventSpec(ActionEvent, 'log_critical_action', env_var='JUJU_ACTION_NAME',
+                      charm_config=charm_config),
+            ['juju-log', '--log-level', 'CRITICAL', 'super critical'],
+        ), (
+            EventSpec(ActionEvent, 'log_error_action',
+                      env_var='JUJU_ACTION_NAME',
+                      charm_config=charm_config),
+            ['juju-log', '--log-level', 'ERROR', 'grave error'],
+        ), (
+            EventSpec(ActionEvent, 'log_warning_action',
+                      env_var='JUJU_ACTION_NAME',
+                      charm_config=charm_config),
+            ['juju-log', '--log-level', 'WARNING', 'wise warning'],
+        ), (
+            EventSpec(ActionEvent, 'log_info_action',
+                      env_var='JUJU_ACTION_NAME',
+                      charm_config=charm_config),
+            ['juju-log', '--log-level', 'INFO', 'useful info'],
+        )]
+
+        # Set up action symlinks.
+        self._simulate_event(EventSpec(InstallEvent, 'install',
+                                       charm_config=charm_config))
+
+        for event_spec, calls in test_cases:
+            self._simulate_event(event_spec)
+            self.assertIn(calls, fake_script_calls(self, clear=True))
+
+
+class TestMainWithNoDispatch(TestMain, unittest.TestCase):
+    def _setup_entry_point(self, directory, entry_point):
+        path = directory / entry_point
+        path.symlink_to(self.charm_exec_path)
+
+    def _call_event(self, rel_path, env):
+        event_file = self.JUJU_CHARM_DIR / rel_path
+        # Note that sys.executable is used to make sure we are using the same
+        # interpreter for the child process to support virtual environments.
+        subprocess.run(
+            [sys.executable, str(event_file)],
+            check=True, env=env, cwd=str(self.JUJU_CHARM_DIR))
+
+    def test_setup_event_links(self):
+        """Test auto-creation of symlinks caused by initial events.
+        """
+        all_event_hooks = ['hooks/' + e.replace("_", "-")
+                           for e in self.charm_module.Charm.on.events().keys()]
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+        initial_events = {
+            EventSpec(InstallEvent, 'install', charm_config=charm_config),
+            EventSpec(StorageAttachedEvent, 'disks-storage-attached', charm_config=charm_config),
+            EventSpec(StartEvent, 'start', charm_config=charm_config),
+            EventSpec(UpgradeCharmEvent, 'upgrade-charm', charm_config=charm_config),
+        }
+
+        def _assess_event_links(event_spec):
+            self.assertTrue(self.hooks_dir / event_spec.event_name in self.hooks_dir.iterdir())
+            for event_hook in all_event_hooks:
+                self.assertTrue((self.JUJU_CHARM_DIR / event_hook).exists(),
+                                'Missing hook: ' + event_hook)
+                self.assertEqual(os.readlink(str(self.JUJU_CHARM_DIR / event_hook)),
+                                 self.charm_exec_path)
+
+        for initial_event in initial_events:
+            self._setup_charm_dir()
+
+            self._simulate_event(initial_event)
+            _assess_event_links(initial_event)
+            # Make sure it is idempotent.
+            self._simulate_event(initial_event)
+            _assess_event_links(initial_event)
+
+    def test_setup_action_links(self):
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+        actions_yaml = self.JUJU_CHARM_DIR / 'actions.yaml'
+        actions_yaml.write_text('test: {}')
+        self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config))
+        action_hook = self.JUJU_CHARM_DIR / 'actions' / 'test'
+        self.assertTrue(action_hook.exists())
+
+
+class TestMainWithDispatch(TestMain, unittest.TestCase):
+    def _setup_entry_point(self, directory, entry_point):
+        path = self.JUJU_CHARM_DIR / 'dispatch'
+        if not path.exists():
+            path.symlink_to('src/charm.py')
+
+    def _call_event(self, rel_path, env):
+        env["JUJU_DISPATCH_PATH"] = str(rel_path)
+        dispatch = self.JUJU_CHARM_DIR / 'dispatch'
+        subprocess.run(
+            [sys.executable, str(dispatch)],
+            stdout=self.stdout,
+            stderr=self.stderr,
+            check=True, env=env, cwd=str(self.JUJU_CHARM_DIR))
+
+    def test_setup_event_links(self):
+        """Test auto-creation of symlinks caused by initial events does _not_ happen when using dispatch.
+        """
+        all_event_hooks = ['hooks/' + e.replace("_", "-")
+                           for e in self.charm_module.Charm.on.events().keys()]
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+        initial_events = {
+            EventSpec(InstallEvent, 'install', charm_config=charm_config),
+            EventSpec(StorageAttachedEvent, 'disks-storage-attached', charm_config=charm_config),
+            EventSpec(StartEvent, 'start', charm_config=charm_config),
+            EventSpec(UpgradeCharmEvent, 'upgrade-charm', charm_config=charm_config),
+        }
+
+        def _assess_event_links(event_spec):
+            self.assertNotIn(self.hooks_dir / event_spec.event_name, self.hooks_dir.iterdir())
+            for event_hook in all_event_hooks:
+                self.assertFalse((self.JUJU_CHARM_DIR / event_hook).exists(),
+                                 'Spurious hook: ' + event_hook)
+
+        for initial_event in initial_events:
+            self._setup_charm_dir()
+
+            self._simulate_event(initial_event)
+            _assess_event_links(initial_event)
+
+    def test_hook_and_dispatch(self):
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+
+        self.fake_script_path = self.hooks_dir
+        fake_script(self, 'install', 'exit 0')
+        state = self._simulate_event(EventSpec(InstallEvent, 'install', charm_config=charm_config))
+
+        # the script was called, *and*, the .on. was called
+        self.assertEqual(fake_script_calls(self), [['install', '']])
+        self.assertEqual(state['observed_event_types'], [InstallEvent])
+
+    def test_hook_and_dispatch_with_failing_hook(self):
+        self.stdout = self.stderr = tempfile.TemporaryFile()
+        self.addCleanup(self.stdout.close)
+
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+
+        old_path = self.fake_script_path
+        self.fake_script_path = self.hooks_dir
+        fake_script(self, 'install', 'exit 42')
+        event = EventSpec(InstallEvent, 'install', charm_config=charm_config)
+        with self.assertRaises(subprocess.CalledProcessError):
+            self._simulate_event(event)
+        self.fake_script_path = old_path
+
+        self.stdout.seek(0)
+        self.assertEqual(self.stdout.read(), b'')
+        calls = fake_script_calls(self)
+        self.assertEqual(len(calls), 1, 'unexpect call result: {}'.format(calls))
+        self.assertEqual(len(calls[0]), 4, 'unexpect call result: {}'.format(calls[0]))
+        self.assertEqual(
+            calls[0][:3],
+            ['juju-log', '--log-level', 'WARNING']
+        )
+        self.assertRegex(calls[0][3], r'hook /\S+/install exited with status 42')
+
+    def test_hook_and_dispatch_but_hook_is_dispatch(self):
+        charm_config = base64.b64encode(pickle.dumps({
+            'STATE_FILE': self._state_file,
+        }))
+        event = EventSpec(InstallEvent, 'install', charm_config=charm_config)
+        hook_path = self.hooks_dir / 'install'
+        for ((rel, ind), path) in {
+                # relative and indirect
+                (True, True): Path('../dispatch'),
+                # relative and direct
+                (True, False): Path(self.charm_exec_path),
+                # absolute and direct
+                (False, False): (self.hooks_dir / self.charm_exec_path).resolve(),
+                # absolute and indirect
+                (False, True): self.JUJU_CHARM_DIR / 'dispatch',
+        }.items():
+            with self.subTest(path=path, rel=rel, ind=ind):
+                # sanity check
+                self.assertEqual(path.is_absolute(), not rel)
+                self.assertEqual(path.name == 'dispatch', ind)
+                try:
+                    hook_path.symlink_to(path)
+
+                    state = self._simulate_event(event)
+
+                    # the .on. was only called once
+                    self.assertEqual(state['observed_event_types'], [InstallEvent])
+                    self.assertEqual(state['on_install'], [InstallEvent])
+                finally:
+                    hook_path.unlink()
+
+
+# TODO: this does not work
+# class TestMainWithDispatchAsScript(TestMainWithDispatch):
+#     """Here dispatch is a script that execs the charm.py instead of a symlink.
+#     """
+#     def _setup_entry_point(self, directory, entry_point):
+#         path = self.JUJU_CHARM_DIR / 'dispatch'
+#         if not path.exists():
+#             path.write_text('#!/bin/sh\nexec "{}" "{}"\n'.format(
+#                 sys.executable,
+#                 self.JUJU_CHARM_DIR / 'src/charm.py'))
+#             path.chmod(0o755)
+
+#     def _call_event(self, rel_path, env):
+#         env["JUJU_DISPATCH_PATH"] = str(rel_path)
+#         dispatch = self.JUJU_CHARM_DIR / 'dispatch'
+#         subprocess.check_call([str(dispatch)],
+#                               env=env, cwd=str(self.JUJU_CHARM_DIR))
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_model.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_model.py
new file mode 100755
index 0000000000000000000000000000000000000000..660176a4556a0d5f7e7ceeb9dbb045d84f727228
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_model.py
@@ -0,0 +1,1288 @@
+#!/usr/bin/python3
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import pathlib
+import unittest
+import json
+import ipaddress
+from collections import OrderedDict
+
+import ops.model
+import ops.charm
+from ops.charm import RelationMeta
+
+from test.test_helpers import fake_script, fake_script_calls
+
+
+class TestModel(unittest.TestCase):
+
+    def setUp(self):
+        def restore_env(env):
+            os.environ.clear()
+            os.environ.update(env)
+        self.addCleanup(restore_env, os.environ.copy())
+
+        os.environ['JUJU_UNIT_NAME'] = 'myapp/0'
+
+        self.backend = ops.model.ModelBackend()
+        meta = ops.charm.CharmMeta()
+        meta.relations = {
+            'db0': RelationMeta('provides', 'db0', {'interface': 'db0', 'scope': 'global'}),
+            'db1': RelationMeta('requires', 'db1', {'interface': 'db1', 'scope': 'global'}),
+            'db2': RelationMeta('peers', 'db2', {'interface': 'db2', 'scope': 'global'}),
+        }
+        self.model = ops.model.Model('myapp/0', meta, self.backend)
+        fake_script(self, 'relation-ids', """([ "$1" = db0 ] && echo '["db0:4"]') || echo '[]'""")
+
+    def test_model(self):
+        self.assertIs(self.model.app, self.model.unit.app)
+
+    def test_relations_keys(self):
+        fake_script(self, 'relation-ids',
+                    """[ "$1" = db2 ] && echo '["db2:5", "db2:6"]' || echo '[]'""")
+        fake_script(self, 'relation-list',
+                    """
+case "$2" in
+    5)
+        echo '["remoteapp1/0", "remoteapp1/1"]'
+        ;;
+    6)
+        echo '["remoteapp2/0"]'
+        ;;
+    *)
+        exit 2
+    ;;
+esac
+""")
+
+        for relation in self.model.relations['db2']:
+            self.assertIn(self.model.unit, relation.data)
+            unit_from_rel = next(filter(lambda u: u.name == 'myapp/0', relation.data.keys()))
+            self.assertIs(self.model.unit, unit_from_rel)
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db2', '--format=json'],
+            ['relation-list', '-r', '5', '--format=json'],
+            ['relation-list', '-r', '6', '--format=json']
+        ])
+
+    def test_get_relation(self):
+        err_msg = 'ERROR invalid value "$2" for option -r: relation not found'
+
+        fake_script(self, 'relation-ids', '''
+            case "$1" in
+            db1)
+                echo '["db1:4"]'
+                ;;
+            db2)
+                echo '["db2:5", "db2:6"]'
+                ;;
+            *)
+                echo '[]'
+                ;;
+            esac
+        ''')
+        fake_script(self, 'relation-list', '''
+            if [ "$2" = 4 ]; then
+                echo '["remoteapp1/0"]'
+            else
+                echo {} >&2
+                exit 2
+            fi
+        '''.format(err_msg))
+        fake_script(self, 'relation-get',
+                    """echo {} >&2 ; exit 2""".format(err_msg))
+
+        with self.assertRaises(ops.model.ModelError):
+            self.model.get_relation('db1', 'db1:4')
+        db1_4 = self.model.get_relation('db1', 4)
+        self.assertIsInstance(db1_4, ops.model.Relation)
+        dead_rel = self.model.get_relation('db1', 7)
+        self.assertIsInstance(dead_rel, ops.model.Relation)
+        self.assertEqual(set(dead_rel.data.keys()), {self.model.unit, self.model.unit.app})
+        self.assertEqual(dead_rel.data[self.model.unit], {})
+        self.assertIsNone(self.model.get_relation('db0'))
+        self.assertIs(self.model.get_relation('db1'), db1_4)
+        with self.assertRaises(ops.model.TooManyRelatedAppsError):
+            self.model.get_relation('db2')
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+            ['relation-list', '-r', '7', '--format=json'],
+            ['relation-get', '-r', '7', '-', 'myapp/0', '--app=False', '--format=json'],
+            ['relation-ids', 'db0', '--format=json'],
+            ['relation-ids', 'db2', '--format=json'],
+            ['relation-list', '-r', '5', '--format=json'],
+            ['relation-list', '-r', '6', '--format=json']
+        ])
+
+    def test_peer_relation_app(self):
+        meta = ops.charm.CharmMeta()
+        meta.relations = {'dbpeer': RelationMeta('peers', 'dbpeer',
+                                                 {'interface': 'dbpeer', 'scope': 'global'})}
+        self.model = ops.model.Model('myapp/0', meta, self.backend)
+
+        err_msg = 'ERROR invalid value "$2" for option -r: relation not found'
+        fake_script(self, 'relation-ids',
+                    '''([ "$1" = dbpeer ] && echo '["dbpeer:0"]') || echo "[]"''')
+        fake_script(self, 'relation-list',
+                    '''([ "$2" = 0 ] && echo "[]") || (echo {} >&2 ; exit 2)'''.format(err_msg))
+
+        db1_4 = self.model.get_relation('dbpeer')
+        self.assertIs(db1_4.app, self.model.app)
+
+    def test_remote_units_is_our(self):
+        fake_script(self, 'relation-ids',
+                    """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list',
+                    """[ "$2" = 4 ] && echo '["remoteapp1/0", "remoteapp1/1"]' || exit 2""")
+
+        for u in self.model.get_relation('db1').units:
+            self.assertFalse(u._is_our_unit)
+            self.assertFalse(u.app._is_our_app)
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json']
+        ])
+
+    def test_our_unit_is_our(self):
+        self.assertTrue(self.model.unit._is_our_unit)
+        self.assertTrue(self.model.unit.app._is_our_app)
+
+    def test_unit_relation_data(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""")
+        fake_script(self, 'relation-get', """
+if [ "$2" = 4 ] && [ "$4" = "remoteapp1/0" ]; then
+    echo '{"host": "remoteapp1-0"}'
+else
+    exit 2
+fi
+""")
+
+        random_unit = self.model._cache.get(ops.model.Unit, 'randomunit/0')
+        with self.assertRaises(KeyError):
+            self.model.get_relation('db1').data[random_unit]
+        remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0',
+                                   self.model.get_relation('db1').units))
+        self.assertEqual(self.model.get_relation('db1').data[remoteapp1_0],
+                         {'host': 'remoteapp1-0'})
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+            ['relation-get', '-r', '4', '-', 'remoteapp1/0', '--app=False', '--format=json']
+        ])
+
+    def test_remote_app_relation_data(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list', '''
+            if [ "$2" = 4 ]; then
+                echo '["remoteapp1/0", "remoteapp1/1"]'
+            else
+                exit 2
+            fi
+        ''')
+        fake_script(self, 'relation-get', '''
+            if [ "$2" = 4 ] && [ "$4" = remoteapp1 ]; then
+                echo '{"secret": "cafedeadbeef"}'
+            else
+                exit 2
+            fi
+        ''')
+
+        # Try to get relation data for an invalid remote application.
+        random_app = self.model._cache.get(ops.model.Application, 'randomapp')
+        with self.assertRaises(KeyError):
+            self.model.get_relation('db1').data[random_app]
+
+        remoteapp1 = self.model.get_relation('db1').app
+        self.assertEqual(self.model.get_relation('db1').data[remoteapp1],
+                         {'secret': 'cafedeadbeef'})
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+            ['relation-get', '-r', '4', '-', 'remoteapp1', '--app=True', '--format=json'],
+        ])
+
+    def test_relation_data_modify_remote(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""")
+        fake_script(self, 'relation-get', """
+if [ "$2" = 4 ] && [ "$4" = "remoteapp1/0" ]; then
+    echo '{"host": "remoteapp1-0"}'
+else
+    exit 2
+fi
+""")
+
+        rel_db1 = self.model.get_relation('db1')
+        remoteapp1_0 = next(filter(lambda u: u.name == 'remoteapp1/0',
+                                   self.model.get_relation('db1').units))
+        # Force memory cache to be loaded.
+        self.assertIn('host', rel_db1.data[remoteapp1_0])
+        with self.assertRaises(ops.model.RelationDataError):
+            rel_db1.data[remoteapp1_0]['foo'] = 'bar'
+        self.assertNotIn('foo', rel_db1.data[remoteapp1_0])
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+            ['relation-get', '-r', '4', '-', 'remoteapp1/0', '--app=False', '--format=json']
+        ])
+
+    def test_relation_data_modify_our(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""")
+        fake_script(self, 'relation-set', '''[ "$2" = 4 ] && exit 0 || exit 2''')
+        fake_script(self, 'relation-get', '''
+            if [ "$2" = 4 ] && [ "$4" = "myapp/0" ]; then
+                echo '{"host": "bar"}'
+            else
+                exit 2
+            fi
+        ''')
+
+        rel_db1 = self.model.get_relation('db1')
+        # Force memory cache to be loaded.
+        self.assertIn('host', rel_db1.data[self.model.unit])
+        rel_db1.data[self.model.unit]['host'] = 'bar'
+        self.assertEqual(rel_db1.data[self.model.unit]['host'], 'bar')
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+            ['relation-get', '-r', '4', '-', 'myapp/0', '--app=False', '--format=json'],
+            ['relation-set', '-r', '4', 'host=bar', '--app=False']
+        ])
+
+    def test_app_relation_data_modify_local_as_leader(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list', '''
+            if [ "$2" = 4 ]; then
+                echo '["remoteapp1/0", "remoteapp1/1"]'
+            else
+                exit 2
+            fi
+        ''')
+        fake_script(self, 'relation-get', '''
+            if [ "$2" = 4 ] && [ "$4" = myapp ]; then
+                echo '{"password": "deadbeefcafe"}'
+            else
+                exit 2
+            fi
+        ''')
+        fake_script(self, 'relation-set', """[ "$2" = 4 ] && exit 0 || exit 2""")
+        fake_script(self, 'is-leader', 'echo true')
+
+        local_app = self.model.unit.app
+
+        rel_db1 = self.model.get_relation('db1')
+        self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'})
+
+        rel_db1.data[local_app]['password'] = 'foo'
+
+        self.assertEqual(rel_db1.data[local_app]['password'], 'foo')
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+            ['relation-get', '-r', '4', '-', 'myapp', '--app=True', '--format=json'],
+            ['is-leader', '--format=json'],
+            ['relation-set', '-r', '4', 'password=foo', '--app=True'],
+        ])
+
+    def test_app_relation_data_modify_local_as_minion(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list', '''
+            if [ "$2" = 4 ]; then
+                echo '["remoteapp1/0", "remoteapp1/1"]'
+            else
+                exit 2
+            fi
+        ''')
+        fake_script(self, 'relation-get', '''
+            if [ "$2" = 4 ] && [ "$4" = myapp ]; then
+                echo '{"password": "deadbeefcafe"}'
+            else
+                exit 2
+            fi
+        ''')
+        fake_script(self, 'is-leader', 'echo false')
+
+        local_app = self.model.unit.app
+
+        rel_db1 = self.model.get_relation('db1')
+        self.assertEqual(rel_db1.data[local_app], {'password': 'deadbeefcafe'})
+
+        with self.assertRaises(ops.model.RelationDataError):
+            rel_db1.data[local_app]['password'] = 'foobar'
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+            ['relation-get', '-r', '4', '-', 'myapp', '--app=True', '--format=json'],
+            ['is-leader', '--format=json'],
+        ])
+
+    def test_relation_data_del_key(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""")
+        fake_script(self, 'relation-set', '''[ "$2" = 4 ] && exit 0 || exit 2''')
+        fake_script(self, 'relation-get', '''
+            if [ "$2" = 4 ] && [ "$4" = "myapp/0" ]; then
+                echo '{"host": "bar"}'
+            else
+                exit 2
+            fi
+        ''')
+
+        rel_db1 = self.model.get_relation('db1')
+        # Force memory cache to be loaded.
+        self.assertIn('host', rel_db1.data[self.model.unit])
+        del rel_db1.data[self.model.unit]['host']
+        fake_script(self, 'relation-get', '''
+            if [ "$2" = 4 ] && [ "$4" = "myapp/0" ]; then
+                echo '{}'
+            else
+                exit 2
+            fi
+        ''')
+        self.assertNotIn('host', rel_db1.data[self.model.unit])
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+            ['relation-get', '-r', '4', '-', 'myapp/0', '--app=False', '--format=json'],
+            ['relation-set', '-r', '4', 'host=', '--app=False']
+        ])
+
+    def test_relation_set_fail(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db2 ] && echo '["db2:5"]' || echo '[]'""")
+        fake_script(self, 'relation-list',
+                    """[ "$2" = 5 ] && echo '["remoteapp1/0"]' || exit 2""")
+        fake_script(self, 'relation-get', '''
+            if [ "$2" = 5 ] && [ "$4" = "myapp/0" ]; then
+                echo '{"host": "myapp-0"}'
+            else
+                exit 2
+            fi
+        ''')
+        fake_script(self, 'relation-set', 'exit 2')
+
+        rel_db2 = self.model.relations['db2'][0]
+        # Force memory cache to be loaded.
+        self.assertIn('host', rel_db2.data[self.model.unit])
+        with self.assertRaises(ops.model.ModelError):
+            rel_db2.data[self.model.unit]['host'] = 'bar'
+        self.assertEqual(rel_db2.data[self.model.unit]['host'], 'myapp-0')
+        with self.assertRaises(ops.model.ModelError):
+            del rel_db2.data[self.model.unit]['host']
+        self.assertIn('host', rel_db2.data[self.model.unit])
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db2', '--format=json'],
+            ['relation-list', '-r', '5', '--format=json'],
+            ['relation-get', '-r', '5', '-', 'myapp/0', '--app=False', '--format=json'],
+            ['relation-set', '-r', '5', 'host=bar', '--app=False'],
+            ['relation-set', '-r', '5', 'host=', '--app=False']
+        ])
+
+    def test_relation_get_set_is_app_arg(self):
+        self.backend = ops.model.ModelBackend()
+
+        # No is_app provided.
+        with self.assertRaises(TypeError):
+            self.backend.relation_set(1, 'fookey', 'barval')
+
+        with self.assertRaises(TypeError):
+            self.backend.relation_get(1, 'fooentity')
+
+        # Invalid types for is_app.
+        for is_app_v in [None, 1, 2.0, 'a', b'beef']:
+            with self.assertRaises(TypeError):
+                self.backend.relation_set(1, 'fookey', 'barval', is_app=is_app_v)
+
+            with self.assertRaises(TypeError):
+                self.backend.relation_get(1, 'fooentity', is_app=is_app_v)
+
+    def test_relation_data_type_check(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list',
+                    """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""")
+        fake_script(self, 'relation-get', '''
+            if [ "$2" = 4 ] && [ "$4" = "myapp/0" ]; then
+                echo '{"host": "myapp-0"}'
+            else
+                exit 2
+            fi
+        ''')
+
+        rel_db1 = self.model.get_relation('db1')
+        with self.assertRaises(ops.model.RelationDataError):
+            rel_db1.data[self.model.unit]['foo'] = 1
+        with self.assertRaises(ops.model.RelationDataError):
+            rel_db1.data[self.model.unit]['foo'] = {'foo': 'bar'}
+        with self.assertRaises(ops.model.RelationDataError):
+            rel_db1.data[self.model.unit]['foo'] = None
+
+        self.assertEqual(fake_script_calls(self), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json']
+        ])
+
+    def test_config(self):
+        fake_script(self, 'config-get', """echo '{"foo":"foo","bar":1,"qux":true}'""")
+        self.assertEqual(self.model.config, {
+            'foo': 'foo',
+            'bar': 1,
+            'qux': True,
+        })
+        with self.assertRaises(TypeError):
+            # Confirm that we cannot modify config values.
+            self.model.config['foo'] = 'bar'
+
+        self.assertEqual(fake_script_calls(self), [['config-get', '--format=json']])
+
+    def test_is_leader(self):
+        def check_remote_units():
+            fake_script(self, 'relation-ids',
+                        """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+
+            fake_script(self, 'relation-list',
+                        """[ "$2" = 4 ] && echo '["remoteapp1/0", "remoteapp1/1"]' || exit 2""")
+
+            # Cannot determine leadership for remote units.
+            for u in self.model.get_relation('db1').units:
+                with self.assertRaises(RuntimeError):
+                    u.is_leader()
+
+        fake_script(self, 'is-leader', 'echo true')
+        self.assertTrue(self.model.unit.is_leader())
+
+        check_remote_units()
+
+        # Create a new model and backend to drop a cached is-leader output.
+        self.backend = ops.model.ModelBackend()
+        meta = ops.charm.CharmMeta()
+        meta.relations = {
+            'db0': RelationMeta('provides', 'db0', {'interface': 'db0', 'scope': 'global'}),
+            'db1': RelationMeta('requires', 'db1', {'interface': 'db1', 'scope': 'global'}),
+            'db2': RelationMeta('peers', 'db2', {'interface': 'db2', 'scope': 'global'}),
+        }
+        self.model = ops.model.Model('myapp/0', meta, self.backend)
+
+        fake_script(self, 'is-leader', 'echo false')
+        self.assertFalse(self.model.unit.is_leader())
+
+        check_remote_units()
+
+        self.assertEqual(fake_script_calls(self), [
+            ['is-leader', '--format=json'],
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+            ['is-leader', '--format=json'],
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+        ])
+
+    def test_is_leader_refresh(self):
+        fake_script(self, 'is-leader', 'echo false')
+        self.assertFalse(self.model.unit.is_leader())
+
+        # Change the leadership status and force a recheck.
+        fake_script(self, 'is-leader', 'echo true')
+        self.backend._leader_check_time = None
+        self.assertTrue(self.model.unit.is_leader())
+
+        # Force a recheck without changing the leadership status.
+        fake_script(self, 'is-leader', 'echo true')
+        self.backend._leader_check_time = None
+        self.assertTrue(self.model.unit.is_leader())
+
+    def test_workload_version(self):
+        fake_script(self, 'application-version-set', 'exit 0')
+        self.model.unit.set_workload_version('1.2.3')
+        self.assertEqual(fake_script_calls(self), [['application-version-set', '--', '1.2.3']])
+
+    def test_workload_version_invalid(self):
+        fake_script(self, 'application-version-set', 'exit 0')
+        with self.assertRaises(TypeError) as cm:
+            self.model.unit.set_workload_version(5)
+        self.assertEqual(str(cm.exception), "workload version must be a str, not int: 5")
+        self.assertEqual(fake_script_calls(self), [])
+
+    def test_resources(self):
+        meta = ops.charm.CharmMeta()
+        meta.resources = {'foo': None, 'bar': None}
+        model = ops.model.Model('myapp/0', meta, self.backend)
+
+        with self.assertRaises(RuntimeError):
+            model.resources.fetch('qux')
+
+        fake_script(self, 'resource-get', 'exit 1')
+        with self.assertRaises(ops.model.ModelError):
+            model.resources.fetch('foo')
+
+        fake_script(self, 'resource-get',
+                    'echo /var/lib/juju/agents/unit-test-0/resources/$1/$1.tgz')
+        self.assertEqual(model.resources.fetch('foo').name, 'foo.tgz')
+        self.assertEqual(model.resources.fetch('bar').name, 'bar.tgz')
+
+    def test_pod_spec(self):
+        fake_script(self, 'pod-spec-set', """
+                    cat $2 > $(dirname $0)/spec.json
+                    [[ -n $4 ]] && cat $4 > $(dirname $0)/k8s_res.json || true
+                    """)
+        fake_script(self, 'is-leader', 'echo true')
+        spec_path = self.fake_script_path / 'spec.json'
+        k8s_res_path = self.fake_script_path / 'k8s_res.json'
+
+        def check_calls(calls):
+            # There may 1 or 2 calls because of is-leader.
+            self.assertLessEqual(len(fake_calls), 2)
+            pod_spec_call = next(filter(lambda c: c[0] == 'pod-spec-set', calls))
+            self.assertEqual(pod_spec_call[:2], ['pod-spec-set', '--file'])
+
+            # 8 bytes are used as of python 3.4.0, see Python bug #12015.
+            # Other characters are from POSIX 3.282 (Portable Filename
+            # Character Set) a subset of which Python's mkdtemp uses.
+            self.assertRegex(pod_spec_call[2], '.*/tmp[A-Za-z0-9._-]{8}-pod-spec-set')
+
+        self.model.pod.set_spec({'foo': 'bar'})
+        self.assertEqual(spec_path.read_text(), '{"foo": "bar"}')
+        self.assertFalse(k8s_res_path.exists())
+
+        fake_calls = fake_script_calls(self, clear=True)
+        check_calls(fake_calls)
+
+        self.model.pod.set_spec({'bar': 'foo'}, {'qux': 'baz'})
+        self.assertEqual(spec_path.read_text(), '{"bar": "foo"}')
+        self.assertEqual(k8s_res_path.read_text(), '{"qux": "baz"}')
+
+        fake_calls = fake_script_calls(self, clear=True)
+        check_calls(fake_calls)
+
+        # Create a new model to drop is-leader caching result.
+        self.backend = ops.model.ModelBackend()
+        meta = ops.charm.CharmMeta()
+        self.model = ops.model.Model('myapp/0', meta, self.backend)
+        fake_script(self, 'is-leader', 'echo false')
+        with self.assertRaises(ops.model.ModelError):
+            self.model.pod.set_spec({'foo': 'bar'})
+
+    def test_base_status_instance_raises(self):
+        with self.assertRaises(TypeError):
+            ops.model.StatusBase('test')
+
+    def test_active_message_default(self):
+        self.assertEqual(ops.model.ActiveStatus().message, '')
+
+    def test_local_set_valid_unit_status(self):
+        test_cases = [(
+            ops.model.ActiveStatus('Green'),
+            lambda: fake_script(self, 'status-set', 'exit 0'),
+            lambda: self.assertEqual(fake_script_calls(self, True),
+                                     [['status-set', '--application=False', 'active', 'Green']]),
+        ), (
+            ops.model.MaintenanceStatus('Yellow'),
+            lambda: fake_script(self, 'status-set', 'exit 0'),
+            lambda: self.assertEqual(
+                fake_script_calls(self, True),
+                [['status-set', '--application=False', 'maintenance', 'Yellow']]),
+        ), (
+            ops.model.BlockedStatus('Red'),
+            lambda: fake_script(self, 'status-set', 'exit 0'),
+            lambda: self.assertEqual(fake_script_calls(self, True),
+                                     [['status-set', '--application=False', 'blocked', 'Red']]),
+        ), (
+            ops.model.WaitingStatus('White'),
+            lambda: fake_script(self, 'status-set', 'exit 0'),
+            lambda: self.assertEqual(fake_script_calls(self, True),
+                                     [['status-set', '--application=False', 'waiting', 'White']]),
+        )]
+
+        for target_status, setup_tools, check_tool_calls in test_cases:
+            setup_tools()
+
+            self.model.unit.status = target_status
+
+            self.assertEqual(self.model.unit.status, target_status)
+
+            check_tool_calls()
+
+    def test_local_set_valid_app_status(self):
+        fake_script(self, 'is-leader', 'echo true')
+        test_cases = [(
+            ops.model.ActiveStatus('Green'),
+            lambda: fake_script(self, 'status-set', 'exit 0'),
+            lambda: self.assertIn(['status-set', '--application=True', 'active', 'Green'],
+                                  fake_script_calls(self, True)),
+        ), (
+            ops.model.MaintenanceStatus('Yellow'),
+            lambda: fake_script(self, 'status-set', 'exit 0'),
+            lambda: self.assertIn(['status-set', '--application=True', 'maintenance', 'Yellow'],
+                                  fake_script_calls(self, True)),
+        ), (
+            ops.model.BlockedStatus('Red'),
+            lambda: fake_script(self, 'status-set', 'exit 0'),
+            lambda: self.assertIn(['status-set', '--application=True', 'blocked', 'Red'],
+                                  fake_script_calls(self, True)),
+        ), (
+            ops.model.WaitingStatus('White'),
+            lambda: fake_script(self, 'status-set', 'exit 0'),
+            lambda: self.assertIn(['status-set', '--application=True', 'waiting', 'White'],
+                                  fake_script_calls(self, True)),
+        )]
+
+        for target_status, setup_tools, check_tool_calls in test_cases:
+            setup_tools()
+
+            self.model.app.status = target_status
+
+            self.assertEqual(self.model.app.status, target_status)
+
+            check_tool_calls()
+
+    def test_set_app_status_non_leader_raises(self):
+        fake_script(self, 'is-leader', 'echo false')
+
+        with self.assertRaises(RuntimeError):
+            self.model.app.status
+
+        with self.assertRaises(RuntimeError):
+            self.model.app.status = ops.model.ActiveStatus()
+
+    def test_local_set_invalid_status(self):
+        fake_script(self, 'status-set', 'exit 1')
+        fake_script(self, 'is-leader', 'echo true')
+
+        with self.assertRaises(ops.model.ModelError):
+            self.model.unit.status = ops.model.UnknownStatus()
+
+        self.assertEqual(fake_script_calls(self, True), [
+            ['status-set', '--application=False', 'unknown', ''],
+        ])
+
+        with self.assertRaises(ops.model.ModelError):
+            self.model.app.status = ops.model.UnknownStatus()
+
+        # A leadership check is needed for application status.
+        self.assertEqual(fake_script_calls(self, True), [
+            ['is-leader', '--format=json'],
+            ['status-set', '--application=True', 'unknown', ''],
+        ])
+
+    def test_status_set_is_app_not_bool_raises(self):
+        self.backend = ops.model.ModelBackend()
+
+        for is_app_v in [None, 1, 2.0, 'a', b'beef', object]:
+            with self.assertRaises(TypeError):
+                self.backend.status_set(ops.model.ActiveStatus, is_app=is_app_v)
+
+    def test_remote_unit_status(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list', '''
+            if [ "$2" = 4 ]; then
+                echo '["remoteapp1/0", "remoteapp1/1"]'
+            else
+                exit 2
+            fi
+        ''')
+
+        remote_unit = next(filter(lambda u: u.name == 'remoteapp1/0',
+                                  self.model.get_relation('db1').units))
+
+        test_statuses = (
+            ops.model.UnknownStatus(),
+            ops.model.ActiveStatus('Green'),
+            ops.model.MaintenanceStatus('Yellow'),
+            ops.model.BlockedStatus('Red'),
+            ops.model.WaitingStatus('White'),
+        )
+
+        for target_status in test_statuses:
+            with self.assertRaises(RuntimeError):
+                remote_unit.status = target_status
+
+    def test_remote_app_status(self):
+        fake_script(self, 'relation-ids', """[ "$1" = db1 ] && echo '["db1:4"]' || echo '[]'""")
+        fake_script(self, 'relation-list', '''
+            if [ "$2" = 4 ]; then
+                echo '["remoteapp1/0", "remoteapp1/1"]'
+            else
+                exit 2
+            fi
+        ''')
+
+        remoteapp1 = self.model.get_relation('db1').app
+
+        # Remote application status is always unknown.
+        self.assertIsInstance(remoteapp1.status, ops.model.UnknownStatus)
+
+        test_statuses = (
+            ops.model.UnknownStatus(),
+            ops.model.ActiveStatus(),
+            ops.model.MaintenanceStatus('Upgrading software'),
+            ops.model.BlockedStatus('Awaiting manual resolution'),
+            ops.model.WaitingStatus('Awaiting related app updates'),
+        )
+        for target_status in test_statuses:
+            with self.assertRaises(RuntimeError):
+                remoteapp1.status = target_status
+
+        self.assertEqual(fake_script_calls(self, clear=True), [
+            ['relation-ids', 'db1', '--format=json'],
+            ['relation-list', '-r', '4', '--format=json'],
+        ])
+
+    def test_storage(self):
+        meta = ops.charm.CharmMeta()
+        meta.storages = {'disks': None, 'data': None}
+        self.model = ops.model.Model('myapp/0', meta, self.backend)
+
+        fake_script(self, 'storage-list', '''
+            if [ "$1" = disks ]; then
+                echo '["disks/0", "disks/1"]'
+            else
+                echo '[]'
+            fi
+        ''')
+        fake_script(self, 'storage-get', '''
+            if [ "$2" = disks/0 ]; then
+                echo '"/var/srv/disks/0"'
+            elif [ "$2" = disks/1 ]; then
+                echo '"/var/srv/disks/1"'
+            else
+                exit 2
+            fi
+        ''')
+        fake_script(self, 'storage-add', '')
+
+        self.assertEqual(len(self.model.storages), 2)
+        self.assertEqual(self.model.storages.keys(), meta.storages.keys())
+        self.assertIn('disks', self.model.storages)
+        test_cases = {
+            0: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/0')},
+            1: {'name': 'disks', 'location': pathlib.Path('/var/srv/disks/1')},
+        }
+        for storage in self.model.storages['disks']:
+            self.assertEqual(storage.name, 'disks')
+            self.assertIn(storage.id, test_cases)
+            self.assertEqual(storage.name, test_cases[storage.id]['name'])
+            self.assertEqual(storage.location, test_cases[storage.id]['location'])
+
+        self.assertEqual(fake_script_calls(self, clear=True), [
+            ['storage-list', 'disks', '--format=json'],
+            ['storage-get', '-s', 'disks/0', 'location', '--format=json'],
+            ['storage-get', '-s', 'disks/1', 'location', '--format=json'],
+        ])
+
+        self.assertSequenceEqual(self.model.storages['data'], [])
+        self.model.storages.request('data', count=3)
+        self.assertEqual(fake_script_calls(self), [
+            ['storage-list', 'data', '--format=json'],
+            ['storage-add', 'data=3'],
+        ])
+
+        # Try to add storage not present in charm metadata.
+        with self.assertRaises(ops.model.ModelError):
+            self.model.storages.request('deadbeef')
+
+        # Invalid count parameter types.
+        for count_v in [None, False, 2.0, 'a', b'beef', object]:
+            with self.assertRaises(TypeError):
+                self.model.storages.request('data', count_v)
+
+
+class TestModelBindings(unittest.TestCase):
+
+    def setUp(self):
+        def restore_env(env):
+            os.environ.clear()
+            os.environ.update(env)
+        self.addCleanup(restore_env, os.environ.copy())
+
+        os.environ['JUJU_UNIT_NAME'] = 'myapp/0'
+
+        meta = ops.charm.CharmMeta()
+        meta.relations = {
+            'db0': RelationMeta('provides', 'db0', {'interface': 'db0', 'scope': 'global'}),
+            'db1': RelationMeta('requires', 'db1', {'interface': 'db1', 'scope': 'global'}),
+            'db2': RelationMeta('peers', 'db2', {'interface': 'db2', 'scope': 'global'}),
+        }
+        self.backend = ops.model.ModelBackend()
+        self.model = ops.model.Model('myapp/0', meta, self.backend)
+
+        fake_script(self, 'relation-ids',
+                    """([ "$1" = db0 ] && echo '["db0:4"]') || echo '[]'""")
+        fake_script(self, 'relation-list', """[ "$2" = 4 ] && echo '["remoteapp1/0"]' || exit 2""")
+        self.network_get_out = '''{
+  "bind-addresses": [
+    {
+      "mac-address": "de:ad:be:ef:ca:fe",
+      "interface-name": "lo",
+      "addresses": [
+        {
+          "hostname": "",
+          "value": "192.0.2.2",
+          "cidr": "192.0.2.0/24"
+        },
+        {
+          "hostname": "deadbeef.example",
+          "value": "dead:beef::1",
+          "cidr": "dead:beef::/64"
+        }
+      ]
+    },
+    {
+      "mac-address": "",
+      "interface-name": "tun",
+      "addresses": [
+        {
+          "hostname": "",
+          "value": "192.0.3.3",
+          "cidr": ""
+        },
+        {
+          "hostname": "",
+          "value": "2001:db8::3",
+          "cidr": ""
+        },
+        {
+          "hostname": "deadbeef.local",
+          "value": "fe80::1:1",
+          "cidr": "fe80::/64"
+        }
+      ]
+    }
+  ],
+  "egress-subnets": [
+    "192.0.2.2/32",
+    "192.0.3.0/24",
+    "dead:beef::/64",
+    "2001:db8::3/128"
+  ],
+  "ingress-addresses": [
+    "192.0.2.2",
+    "192.0.3.3",
+    "dead:beef::1",
+    "2001:db8::3"
+  ]
+}'''
+
+    def _check_binding_data(self, binding_name, binding):
+        self.assertEqual(binding.name, binding_name)
+        self.assertEqual(binding.network.bind_address, ipaddress.ip_address('192.0.2.2'))
+        self.assertEqual(binding.network.ingress_address, ipaddress.ip_address('192.0.2.2'))
+        # /32 and /128 CIDRs are valid one-address networks for IPv{4,6}Network types respectively.
+        self.assertEqual(binding.network.egress_subnets, [ipaddress.ip_network('192.0.2.2/32'),
+                                                          ipaddress.ip_network('192.0.3.0/24'),
+                                                          ipaddress.ip_network('dead:beef::/64'),
+                                                          ipaddress.ip_network('2001:db8::3/128')])
+
+        for (i, (name, address, subnet)) in enumerate([
+                ('lo', '192.0.2.2', '192.0.2.0/24'),
+                ('lo', 'dead:beef::1', 'dead:beef::/64'),
+                ('tun', '192.0.3.3', '192.0.3.3/32'),
+                ('tun', '2001:db8::3', '2001:db8::3/128'),
+                ('tun', 'fe80::1:1', 'fe80::/64')]):
+            self.assertEqual(binding.network.interfaces[i].name, name)
+            self.assertEqual(binding.network.interfaces[i].address, ipaddress.ip_address(address))
+            self.assertEqual(binding.network.interfaces[i].subnet, ipaddress.ip_network(subnet))
+
+    def test_invalid_keys(self):
+        # Basic validation for passing invalid keys.
+        for name in (object, 0):
+            with self.assertRaises(ops.model.ModelError):
+                self.model.get_binding(name)
+
+    def test_dead_relations(self):
+        fake_script(
+            self,
+            'network-get',
+            '''
+                if [ "$1" = db0 ] && [ "$2" = --format=json ]; then
+                    echo '{}'
+                else
+                    echo ERROR invalid value "$2" for option -r: relation not found >&2
+                    exit 2
+                fi
+            '''.format(self.network_get_out))
+        # Validate the behavior for dead relations.
+        binding = ops.model.Binding('db0', 42, self.model._backend)
+        self.assertEqual(binding.network.bind_address, ipaddress.ip_address('192.0.2.2'))
+        self.assertEqual(fake_script_calls(self, clear=True), [
+            ['network-get', 'db0', '-r', '42', '--format=json'],
+            ['network-get', 'db0', '--format=json'],
+        ])
+
+    def test_binding_by_relation_name(self):
+        fake_script(self, 'network-get',
+                    '''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(self.network_get_out))
+        binding_name = 'db0'
+        expected_calls = [['network-get', 'db0', '--format=json']]
+
+        binding = self.model.get_binding(binding_name)
+        self._check_binding_data(binding_name, binding)
+        self.assertEqual(fake_script_calls(self, clear=True), expected_calls)
+
+    def test_binding_by_relation(self):
+        fake_script(self, 'network-get',
+                    '''[ "$1" = db0 ] && echo '{}' || exit 1'''.format(self.network_get_out))
+        binding_name = 'db0'
+        expected_calls = [
+            ['relation-ids', 'db0', '--format=json'],
+            # The two invocations below are due to the get_relation call.
+            ['relation-list', '-r', '4', '--format=json'],
+            ['network-get', 'db0', '-r', '4', '--format=json'],
+        ]
+        binding = self.model.get_binding(self.model.get_relation(binding_name))
+        self._check_binding_data(binding_name, binding)
+        self.assertEqual(fake_script_calls(self, clear=True), expected_calls)
+
+
+class TestModelBackend(unittest.TestCase):
+
+    def setUp(self):
+        os.environ['JUJU_UNIT_NAME'] = 'myapp/0'
+        self.addCleanup(os.environ.pop, 'JUJU_UNIT_NAME')
+
+        self._backend = None
+
+    @property
+    def backend(self):
+        if self._backend is None:
+            self._backend = ops.model.ModelBackend()
+        return self._backend
+
+    def test_relation_tool_errors(self):
+        err_msg = 'ERROR invalid value "$2" for option -r: relation not found'
+
+        test_cases = [(
+            lambda: fake_script(self, 'relation-list', 'echo fooerror >&2 ; exit 1'),
+            lambda: self.backend.relation_list(3),
+            ops.model.ModelError,
+            [['relation-list', '-r', '3', '--format=json']],
+        ), (
+            lambda: fake_script(self, 'relation-list', 'echo {} >&2 ; exit 2'.format(err_msg)),
+            lambda: self.backend.relation_list(3),
+            ops.model.RelationNotFoundError,
+            [['relation-list', '-r', '3', '--format=json']],
+        ), (
+            lambda: fake_script(self, 'relation-set', 'echo fooerror >&2 ; exit 1'),
+            lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False),
+            ops.model.ModelError,
+            [['relation-set', '-r', '3', 'foo=bar', '--app=False']],
+        ), (
+            lambda: fake_script(self, 'relation-set', 'echo {} >&2 ; exit 2'.format(err_msg)),
+            lambda: self.backend.relation_set(3, 'foo', 'bar', is_app=False),
+            ops.model.RelationNotFoundError,
+            [['relation-set', '-r', '3', 'foo=bar', '--app=False']],
+        ), (
+            lambda: fake_script(self, 'relation-get', 'echo fooerror >&2 ; exit 1'),
+            lambda: self.backend.relation_get(3, 'remote/0', is_app=False),
+            ops.model.ModelError,
+            [['relation-get', '-r', '3', '-', 'remote/0', '--app=False', '--format=json']],
+        ), (
+            lambda: fake_script(self, 'relation-get', 'echo {} >&2 ; exit 2'.format(err_msg)),
+            lambda: self.backend.relation_get(3, 'remote/0', is_app=False),
+            ops.model.RelationNotFoundError,
+            [['relation-get', '-r', '3', '-', 'remote/0', '--app=False', '--format=json']],
+        )]
+
+        for do_fake, run, exception, calls in test_cases:
+            do_fake()
+            with self.assertRaises(exception):
+                run()
+            self.assertEqual(fake_script_calls(self, clear=True), calls)
+
+    def test_status_is_app_forced_kwargs(self):
+        fake_script(self, 'status-get', 'exit 1')
+        fake_script(self, 'status-set', 'exit 1')
+
+        test_cases = (
+            lambda: self.backend.status_get(False),
+            lambda: self.backend.status_get(True),
+            lambda: self.backend.status_set('active', '', False),
+            lambda: self.backend.status_set('active', '', True),
+        )
+
+        for case in test_cases:
+            with self.assertRaises(TypeError):
+                case()
+
+    def test_storage_tool_errors(self):
+        test_cases = [(
+            lambda: fake_script(self, 'storage-list', 'echo fooerror >&2 ; exit 1'),
+            lambda: self.backend.storage_list('foobar'),
+            ops.model.ModelError,
+            [['storage-list', 'foobar', '--format=json']],
+        ), (
+            lambda: fake_script(self, 'storage-get', 'echo fooerror >&2 ; exit 1'),
+            lambda: self.backend.storage_get('foobar', 'someattr'),
+            ops.model.ModelError,
+            [['storage-get', '-s', 'foobar', 'someattr', '--format=json']],
+        ), (
+            lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'),
+            lambda: self.backend.storage_add('foobar', count=2),
+            ops.model.ModelError,
+            [['storage-add', 'foobar=2']],
+        ), (
+            lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'),
+            lambda: self.backend.storage_add('foobar', count=object),
+            TypeError,
+            [],
+        ), (
+            lambda: fake_script(self, 'storage-add', 'echo fooerror >&2 ; exit 1'),
+            lambda: self.backend.storage_add('foobar', count=True),
+            TypeError,
+            [],
+        )]
+        for do_fake, run, exception, calls in test_cases:
+            do_fake()
+            with self.assertRaises(exception):
+                run()
+            self.assertEqual(fake_script_calls(self, clear=True), calls)
+
+    def test_network_get(self):
+        network_get_out = '''{
+  "bind-addresses": [
+    {
+      "mac-address": "",
+      "interface-name": "",
+      "addresses": [
+        {
+          "hostname": "",
+          "value": "192.0.2.2",
+          "cidr": ""
+        }
+      ]
+    }
+  ],
+  "egress-subnets": [
+    "192.0.2.2/32"
+  ],
+  "ingress-addresses": [
+    "192.0.2.2"
+  ]
+}'''
+        fake_script(self, 'network-get',
+                    '''[ "$1" = deadbeef ] && echo '{}' || exit 1'''.format(network_get_out))
+        network_info = self.backend.network_get('deadbeef')
+        self.assertEqual(network_info, json.loads(network_get_out))
+        self.assertEqual(fake_script_calls(self, clear=True),
+                         [['network-get', 'deadbeef', '--format=json']])
+
+        network_info = self.backend.network_get('deadbeef', 1)
+        self.assertEqual(network_info, json.loads(network_get_out))
+        self.assertEqual(fake_script_calls(self, clear=True),
+                         [['network-get', 'deadbeef', '-r', '1', '--format=json']])
+
+    def test_network_get_errors(self):
+        err_no_endpoint = 'ERROR no network config found for binding "$2"'
+        err_no_rel = 'ERROR invalid value "$3" for option -r: relation not found'
+
+        test_cases = [(
+            lambda: fake_script(self, 'network-get',
+                                'echo {} >&2 ; exit 1'.format(err_no_endpoint)),
+            lambda: self.backend.network_get("deadbeef"),
+            ops.model.ModelError,
+            [['network-get', 'deadbeef', '--format=json']],
+        ), (
+            lambda: fake_script(self, 'network-get', 'echo {} >&2 ; exit 2'.format(err_no_rel)),
+            lambda: self.backend.network_get("deadbeef", 3),
+            ops.model.RelationNotFoundError,
+            [['network-get', 'deadbeef', '-r', '3', '--format=json']],
+        )]
+        for do_fake, run, exception, calls in test_cases:
+            do_fake()
+            with self.assertRaises(exception):
+                run()
+            self.assertEqual(fake_script_calls(self, clear=True), calls)
+
+    def test_action_get_error(self):
+        fake_script(self, 'action-get', '')
+        fake_script(self, 'action-get', 'echo fooerror >&2 ; exit 1')
+        with self.assertRaises(ops.model.ModelError):
+            self.backend.action_get()
+        calls = [['action-get', '--format=json']]
+        self.assertEqual(fake_script_calls(self, clear=True), calls)
+
+    def test_action_set_error(self):
+        fake_script(self, 'action-get', '')
+        fake_script(self, 'action-set', 'echo fooerror >&2 ; exit 1')
+        with self.assertRaises(ops.model.ModelError):
+            self.backend.action_set(OrderedDict([('foo', 'bar'), ('dead', 'beef cafe')]))
+        calls = [["action-set", "foo=bar", "dead=beef cafe"]]
+        self.assertEqual(fake_script_calls(self, clear=True), calls)
+
+    def test_action_log_error(self):
+        fake_script(self, 'action-get', '')
+        fake_script(self, 'action-log', 'echo fooerror >&2 ; exit 1')
+        with self.assertRaises(ops.model.ModelError):
+            self.backend.action_log('log-message')
+        calls = [["action-log", "log-message"]]
+        self.assertEqual(fake_script_calls(self, clear=True), calls)
+
+    def test_action_get(self):
+        fake_script(self, 'action-get', """echo '{"foo-name": "bar", "silent": false}'""")
+        params = self.backend.action_get()
+        self.assertEqual(params['foo-name'], 'bar')
+        self.assertEqual(params['silent'], False)
+        self.assertEqual(fake_script_calls(self), [['action-get', '--format=json']])
+
+    def test_action_set(self):
+        fake_script(self, 'action-get', 'exit 1')
+        fake_script(self, 'action-set', 'exit 0')
+        self.backend.action_set(OrderedDict([('x', 'dead beef'), ('y', 1)]))
+        self.assertEqual(fake_script_calls(self), [['action-set', 'x=dead beef', 'y=1']])
+
+    def test_action_fail(self):
+        fake_script(self, 'action-get', 'exit 1')
+        fake_script(self, 'action-fail', 'exit 0')
+        self.backend.action_fail('error 42')
+        self.assertEqual(fake_script_calls(self), [['action-fail', 'error 42']])
+
+    def test_action_log(self):
+        fake_script(self, 'action-get', 'exit 1')
+        fake_script(self, 'action-log', 'exit 0')
+        self.backend.action_log('progress: 42%')
+        self.assertEqual(fake_script_calls(self), [['action-log', 'progress: 42%']])
+
+    def test_application_version_set(self):
+        fake_script(self, 'application-version-set', 'exit 0')
+        self.backend.application_version_set('1.2b3')
+        self.assertEqual(fake_script_calls(self), [['application-version-set', '--', '1.2b3']])
+
+    def test_application_version_set_invalid(self):
+        fake_script(self, 'application-version-set', 'exit 0')
+        with self.assertRaises(TypeError):
+            self.backend.application_version_set(2)
+        with self.assertRaises(TypeError):
+            self.backend.application_version_set()
+        self.assertEqual(fake_script_calls(self), [])
+
+    def test_juju_log(self):
+        fake_script(self, 'juju-log', 'exit 0')
+        self.backend.juju_log('WARNING', 'foo')
+        self.assertEqual(fake_script_calls(self, clear=True),
+                         [['juju-log', '--log-level', 'WARNING', 'foo']])
+
+        with self.assertRaises(TypeError):
+            self.backend.juju_log('DEBUG')
+        self.assertEqual(fake_script_calls(self, clear=True), [])
+
+        fake_script(self, 'juju-log', 'exit 1')
+        with self.assertRaises(ops.model.ModelError):
+            self.backend.juju_log('BAR', 'foo')
+        self.assertEqual(fake_script_calls(self, clear=True),
+                         [['juju-log', '--log-level', 'BAR', 'foo']])
+
+    def test_valid_metrics(self):
+        fake_script(self, 'add-metric', 'exit 0')
+        test_cases = [(
+            OrderedDict([('foo', 42), ('b-ar', 4.5), ('ba_-z', 4.5), ('a', 1)]),
+            OrderedDict([('de', 'ad'), ('be', 'ef_ -')]),
+            [['add-metric', '--labels', 'de=ad,be=ef_ -',
+              'foo=42', 'b-ar=4.5', 'ba_-z=4.5', 'a=1']]
+        ), (
+            OrderedDict([('foo1', 0), ('b2r', 4.5)]),
+            OrderedDict([('d3', 'aд'), ('b33f', '3_ -')]),
+            [['add-metric', '--labels', 'd3=aд,b33f=3_ -', 'foo1=0', 'b2r=4.5']],
+        )]
+        for metrics, labels, expected_calls in test_cases:
+            self.backend.add_metrics(metrics, labels)
+            self.assertEqual(fake_script_calls(self, clear=True), expected_calls)
+
+    def test_invalid_metric_names(self):
+        invalid_inputs = [
+            ({'': 4.2}, {}),
+            ({'1': 4.2}, {}),
+            ({'1': -4.2}, {}),
+            ({'123': 4.2}, {}),
+            ({'1foo': 4.2}, {}),
+            ({'-foo': 4.2}, {}),
+            ({'_foo': 4.2}, {}),
+            ({'foo-': 4.2}, {}),
+            ({'foo_': 4.2}, {}),
+            ({'a-': 4.2}, {}),
+            ({'a_': 4.2}, {}),
+            ({'BAЯ': 4.2}, {}),
+        ]
+        for metrics, labels in invalid_inputs:
+            with self.assertRaises(ops.model.ModelError):
+                self.backend.add_metrics(metrics, labels)
+
+    def test_invalid_metric_values(self):
+        invalid_inputs = [
+            ({'a': float('+inf')}, {}),
+            ({'a': float('-inf')}, {}),
+            ({'a': float('nan')}, {}),
+            ({'foo': 'bar'}, {}),
+            ({'foo': '1O'}, {}),
+        ]
+        for metrics, labels in invalid_inputs:
+            with self.assertRaises(ops.model.ModelError):
+                self.backend.add_metrics(metrics, labels)
+
+    def test_invalid_metric_labels(self):
+        invalid_inputs = [
+            ({'foo': 4.2}, {'': 'baz'}),
+            ({'foo': 4.2}, {',bar': 'baz'}),
+            ({'foo': 4.2}, {'b=a=r': 'baz'}),
+            ({'foo': 4.2}, {'BAЯ': 'baz'}),
+        ]
+        for metrics, labels in invalid_inputs:
+            with self.assertRaises(ops.model.ModelError):
+                self.backend.add_metrics(metrics, labels)
+
+    def test_invalid_metric_label_values(self):
+        invalid_inputs = [
+            ({'foo': 4.2}, {'bar': ''}),
+            ({'foo': 4.2}, {'bar': 'b,az'}),
+            ({'foo': 4.2}, {'bar': 'b=az'}),
+        ]
+        for metrics, labels in invalid_inputs:
+            with self.assertRaises(ops.model.ModelError):
+                self.backend.add_metrics(metrics, labels)
+
+
+class TestLazyMapping(unittest.TestCase):
+
+    def test_invalidate(self):
+        loaded = []
+
+        class MyLazyMap(ops.model.LazyMapping):
+            def _load(self):
+                loaded.append(1)
+                return {'foo': 'bar'}
+
+        map = MyLazyMap()
+        self.assertEqual(map['foo'], 'bar')
+        self.assertEqual(loaded, [1])
+        self.assertEqual(map['foo'], 'bar')
+        self.assertEqual(loaded, [1])
+        map._invalidate()
+        self.assertEqual(map['foo'], 'bar')
+        self.assertEqual(loaded, [1, 1])
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_testing.py b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..1645e5a2a6329c0f6f23e8e2de028051a3783978
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/mod/operator/test/test_testing.py
@@ -0,0 +1,756 @@
+#!/usr/bin/python3
+# Copyright 2019 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import importlib
+import pathlib
+import shutil
+import sys
+import tempfile
+import textwrap
+import unittest
+
+from ops.charm import (
+    CharmBase,
+    RelationEvent,
+)
+from ops.framework import (
+    Object,
+)
+from ops.model import (
+    ModelError,
+    RelationNotFoundError,
+)
+from ops.testing import Harness
+
+
+class TestHarness(unittest.TestCase):
+
+    def test_add_relation(self):
+        harness = Harness(CharmBase, meta='''
+            name: test-app
+            requires:
+                db:
+                    interface: pgsql
+            ''')
+        rel_id = harness.add_relation('db', 'postgresql')
+        self.assertIsInstance(rel_id, int)
+        backend = harness._backend
+        self.assertEqual(backend.relation_ids('db'), [rel_id])
+        self.assertEqual(backend.relation_list(rel_id), [])
+        # Make sure the initial data bags for our app and unit are empty.
+        self.assertEqual(backend.relation_get(rel_id, 'test-app', is_app=True), {})
+        self.assertEqual(backend.relation_get(rel_id, 'test-app/0', is_app=False), {})
+
+    def test_add_relation_and_unit(self):
+        harness = Harness(CharmBase, meta='''
+            name: test-app
+            requires:
+                db:
+                    interface: pgsql
+            ''')
+        rel_id = harness.add_relation('db', 'postgresql')
+        self.assertIsInstance(rel_id, int)
+        harness.add_relation_unit(rel_id, 'postgresql/0')
+        harness.update_relation_data(rel_id, 'postgresql/0', {'foo': 'bar'})
+        backend = harness._backend
+        self.assertEqual(backend.relation_ids('db'), [rel_id])
+        self.assertEqual(backend.relation_list(rel_id), ['postgresql/0'])
+        self.assertEqual(
+            backend.relation_get(rel_id, 'postgresql/0', is_app=False),
+            {'foo': 'bar'})
+
+    def test_add_relation_with_remote_app_data(self):
+        # language=YAML
+        harness = Harness(CharmBase, meta='''
+            name: test-app
+            requires:
+                db:
+                    interface: pgsql
+            ''')
+        remote_app = 'postgresql'
+        rel_id = harness.add_relation('db', remote_app)
+        harness.update_relation_data(rel_id, 'postgresql', {'app': 'data'})
+        self.assertIsInstance(rel_id, int)
+        backend = harness._backend
+        self.assertEqual([rel_id], backend.relation_ids('db'))
+        self.assertEqual({'app': 'data'}, backend.relation_get(rel_id, remote_app, is_app=True))
+
+    def test_add_relation_with_our_initial_data(self):
+
+        class InitialDataTester(CharmBase):
+            """Record the relation-changed events."""
+
+            def __init__(self, framework, charm_name):
+                super().__init__(framework, charm_name)
+                self.observed_events = []
+                self.framework.observe(self.on.db_relation_changed, self._on_db_relation_changed)
+
+            def _on_db_relation_changed(self, event):
+                self.observed_events.append(event)
+
+        # language=YAML
+        harness = Harness(InitialDataTester, meta='''
+            name: test-app
+            requires:
+                db:
+                    interface: pgsql
+            ''')
+        rel_id = harness.add_relation('db', 'postgresql')
+        harness.update_relation_data(rel_id, 'test-app', {'k': 'v1'})
+        harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.1'})
+        backend = harness._backend
+        self.assertEqual({'k': 'v1'}, backend.relation_get(rel_id, 'test-app', is_app=True))
+        self.assertEqual({'ingress-address': '192.0.2.1'},
+                         backend.relation_get(rel_id, 'test-app/0', is_app=False))
+
+        harness.begin()
+        self.assertEqual({'k': 'v1'}, backend.relation_get(rel_id, 'test-app', is_app=True))
+        self.assertEqual({'ingress-address': '192.0.2.1'},
+                         backend.relation_get(rel_id, 'test-app/0', is_app=False))
+        # Make sure no relation-changed events are emitted for our own data bags.
+        self.assertEqual([], harness.charm.observed_events)
+
+        # A remote unit can still update our app relation data bag since our unit is not a leader.
+        harness.update_relation_data(rel_id, 'test-app', {'k': 'v2'})
+        # And we get an event
+        self.assertEqual([], harness.charm.observed_events)
+        # We can also update our own relation data, even if it is a bit 'cheaty'
+        harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'})
+        # But no event happens
+
+        # Updating our data app relation data bag and our unit data bag does not generate events.
+        harness.set_leader(True)
+        harness.update_relation_data(rel_id, 'test-app', {'k': 'v3'})
+        harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'})
+        self.assertEqual([], harness.charm.observed_events)
+
+    def test_add_peer_relation_with_initial_data_leader(self):
+
+        class InitialDataTester(CharmBase):
+            """Record the relation-changed events."""
+
+            def __init__(self, framework, charm_name):
+                super().__init__(framework, charm_name)
+                self.observed_events = []
+                self.framework.observe(self.on.cluster_relation_changed,
+                                       self._on_cluster_relation_changed)
+
+            def _on_cluster_relation_changed(self, event):
+                self.observed_events.append(event)
+
+        # language=YAML
+        harness = Harness(InitialDataTester, meta='''
+            name: test-app
+            peers:
+                cluster:
+                    interface: cluster
+            ''')
+        # TODO: dmitriis 2020-04-07 test a minion unit and initial peer relation app data
+        # events when the harness begins to emit events for initial data.
+        harness.set_leader(is_leader=True)
+        rel_id = harness.add_relation('cluster', 'test-app')
+        harness.update_relation_data(rel_id, 'test-app', {'k': 'v'})
+        harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.1'})
+        backend = harness._backend
+        self.assertEqual({'k': 'v'}, backend.relation_get(rel_id, 'test-app', is_app=True))
+        self.assertEqual({'ingress-address': '192.0.2.1'},
+                         backend.relation_get(rel_id, 'test-app/0', is_app=False))
+
+        harness.begin()
+        self.assertEqual({'k': 'v'}, backend.relation_get(rel_id, 'test-app', is_app=True))
+        self.assertEqual({'ingress-address': '192.0.2.1'},
+                         backend.relation_get(rel_id, 'test-app/0', is_app=False))
+        # Make sure no relation-changed events are emitted for our own data bags.
+        self.assertEqual([], harness.charm.observed_events)
+
+        # Updating our app relation data bag and our unit data bag does not trigger events
+        harness.update_relation_data(rel_id, 'test-app', {'k': 'v2'})
+        harness.update_relation_data(rel_id, 'test-app/0', {'ingress-address': '192.0.2.2'})
+        self.assertEqual([], harness.charm.observed_events)
+
+        # If our unit becomes a minion, updating app relation data indirectly becomes possible
+        # and our charm gets notifications.
+        harness.set_leader(False)
+        harness.update_relation_data(rel_id, 'test-app', {'k': 'v3'})
+        self.assertEqual({'k': 'v3'}, backend.relation_get(rel_id, 'test-app', is_app=True))
+        self.assertTrue(len(harness.charm.observed_events), 1)
+        self.assertIsInstance(harness.charm.observed_events[0], RelationEvent)
+
+    def test_relation_events(self):
+        harness = Harness(RelationEventCharm, meta='''
+            name: test-app
+            requires:
+                db:
+                    interface: pgsql
+        ''')
+        harness.begin()
+        harness.charm.observe_relation_events('db')
+        self.assertEqual(harness.charm.get_changes(), [])
+        rel_id = harness.add_relation('db', 'postgresql')
+        self.assertEqual(
+            harness.charm.get_changes(),
+            [{'name': 'relation-created',
+              'data': {
+                  'app': 'postgresql',
+                  'unit': None,
+                  'relation_id': rel_id,
+              }}])
+        harness.add_relation_unit(rel_id, 'postgresql/0')
+        self.assertEqual(
+            harness.charm.get_changes(),
+            [{'name': 'relation-joined',
+              'data': {
+                  'app': 'postgresql',
+                  'unit': 'postgresql/0',
+                  'relation_id': rel_id,
+              }}])
+        harness.update_relation_data(rel_id, 'postgresql', {'foo': 'bar'})
+        self.assertEqual(
+            harness.charm.get_changes(),
+            [{'name': 'relation-changed',
+              'data': {
+                  'app': 'postgresql',
+                  'unit': None,
+                  'relation_id': rel_id,
+              }}])
+        harness.update_relation_data(rel_id, 'postgresql/0', {'baz': 'bing'})
+        self.assertEqual(
+            harness.charm.get_changes(),
+            [{'name': 'relation-changed',
+              'data': {
+                  'app': 'postgresql',
+                  'unit': 'postgresql/0',
+                  'relation_id': rel_id,
+              }}])
+
+    def test_get_relation_data(self):
+        harness = Harness(CharmBase, meta='''
+            name: test-app
+            requires:
+                db:
+                    interface: pgsql
+            ''')
+        rel_id = harness.add_relation('db', 'postgresql')
+        harness.update_relation_data(rel_id, 'postgresql', {'remote': 'data'})
+        self.assertEqual(harness.get_relation_data(rel_id, 'test-app'), {})
+        self.assertEqual(harness.get_relation_data(rel_id, 'test-app/0'), {})
+        self.assertEqual(harness.get_relation_data(rel_id, 'test-app/1'), None)
+        self.assertEqual(harness.get_relation_data(rel_id, 'postgresql'), {'remote': 'data'})
+        with self.assertRaises(KeyError):
+            # unknown relation id
+            harness.get_relation_data(99, 'postgresql')
+
+    def test_create_harness_twice(self):
+        metadata = '''
+            name: my-charm
+            requires:
+              db:
+                interface: pgsql
+            '''
+        harness1 = Harness(CharmBase, meta=metadata)
+        harness2 = Harness(CharmBase, meta=metadata)
+        harness1.begin()
+        harness2.begin()
+        helper1 = DBRelationChangedHelper(harness1.charm, "helper1")
+        helper2 = DBRelationChangedHelper(harness2.charm, "helper2")
+        rel_id = harness2.add_relation('db', 'postgresql')
+        harness2.update_relation_data(rel_id, 'postgresql', {'key': 'value'})
+        # Helper2 should see the event triggered by harness2, but helper1 should see no events.
+        self.assertEqual(helper1.changes, [])
+        self.assertEqual(helper2.changes, [(rel_id, 'postgresql')])
+
+    def test_begin_twice(self):
+        # language=YAML
+        harness = Harness(CharmBase, meta='''
+            name: test-app
+            requires:
+                db:
+                    interface: pgsql
+            ''')
+        harness.begin()
+        with self.assertRaises(RuntimeError):
+            harness.begin()
+
+    def test_update_relation_exposes_new_data(self):
+        harness = Harness(CharmBase, meta='''
+            name: my-charm
+            requires:
+              db:
+                interface: pgsql
+            ''')
+        harness.begin()
+        viewer = RelationChangedViewer(harness.charm, 'db')
+        rel_id = harness.add_relation('db', 'postgresql')
+        harness.add_relation_unit(rel_id, 'postgresql/0')
+        harness.update_relation_data(rel_id, 'postgresql/0', {'initial': 'data'})
+        self.assertEqual(viewer.changes, [{'initial': 'data'}])
+        harness.update_relation_data(rel_id, 'postgresql/0', {'new': 'value'})
+        self.assertEqual(viewer.changes, [{'initial': 'data'},
+                                          {'initial': 'data', 'new': 'value'}])
+
+    def test_update_relation_no_local_unit_change_event(self):
+        # language=YAML
+        harness = Harness(CharmBase, meta='''
+            name: my-charm
+            requires:
+              db:
+                interface: pgsql
+            ''')
+        harness.begin()
+        helper = DBRelationChangedHelper(harness.charm, "helper")
+        rel_id = harness.add_relation('db', 'postgresql')
+        rel = harness.charm.model.get_relation('db')
+        rel.data[harness.charm.model.unit]['key'] = 'value'
+        # there should be no event for updating our own data
+        harness.update_relation_data(rel_id, 'my-charm/0', {'new': 'other'})
+        # but the data will be updated.
+        self.assertEqual({'key': 'value', 'new': 'other'}, rel.data[harness.charm.model.unit])
+
+        rel.data[harness.charm.model.unit]['new'] = 'value'
+        # Our unit data bag got updated.
+        self.assertEqual(rel.data[harness.charm.model.unit]['new'], 'value')
+        # But there were no changed events registered by our unit.
+        self.assertEqual([], helper.changes)
+
+    def test_update_peer_relation_no_local_unit_change_event(self):
+        # language=YAML
+        harness = Harness(CharmBase, meta='''
+            name: postgresql
+            peers:
+              db:
+                interface: pgsql
+            ''')
+        harness.begin()
+        helper = DBRelationChangedHelper(harness.charm, "helper")
+        rel_id = harness.add_relation('db', 'postgresql')
+
+        rel = harness.charm.model.get_relation('db')
+        rel.data[harness.charm.model.unit]['key'] = 'value'
+        rel = harness.charm.model.get_relation('db')
+        harness.update_relation_data(rel_id, 'postgresql/0', {'key': 'v1'})
+        self.assertEqual({'key': 'v1'}, rel.data[harness.charm.model.unit])
+        # Make sure there was no event
+        self.assertEqual([], helper.changes)
+
+        rel.data[harness.charm.model.unit]['key'] = 'v2'
+        # Our unit data bag got updated.
+        self.assertEqual({'key': 'v2'}, dict(rel.data[harness.charm.model.unit]))
+        # But there were no changed events registered by our unit.
+        self.assertEqual([], helper.changes)
+
+        # Same for when our unit is a leader.
+        harness.set_leader(is_leader=True)
+        harness.update_relation_data(rel_id, 'postgresql/0', {'key': 'v3'})
+        self.assertEqual({'key': 'v3'}, dict(rel.data[harness.charm.model.unit]))
+        self.assertEqual([], helper.changes)
+
+        rel.data[harness.charm.model.unit]['key'] = 'v4'
+        self.assertEqual(rel.data[harness.charm.model.unit]['key'], 'v4')
+        self.assertEqual([], helper.changes)
+
+    def test_update_peer_relation_app_data(self):
+        # language=YAML
+        harness = Harness(CharmBase, meta='''
+            name: postgresql
+            peers:
+              db:
+                interface: pgsql
+            ''')
+        harness.begin()
+        harness.set_leader(is_leader=True)
+        helper = DBRelationChangedHelper(harness.charm, "helper")
+        rel_id = harness.add_relation('db', 'postgresql')
+        rel = harness.charm.model.get_relation('db')
+        rel.data[harness.charm.app]['key'] = 'value'
+        harness.update_relation_data(rel_id, 'postgresql', {'key': 'v1'})
+        self.assertEqual({'key': 'v1'}, rel.data[harness.charm.app])
+        self.assertEqual([], helper.changes)
+
+        rel.data[harness.charm.app]['key'] = 'v2'
+        # Our unit data bag got updated.
+        self.assertEqual(rel.data[harness.charm.model.app]['key'], 'v2')
+        # But there were no changed events registered by our unit.
+        self.assertEqual([], helper.changes)
+
+        # If our unit is not a leader unit we get an update about peer app relation data changes.
+        harness.set_leader(is_leader=False)
+        harness.update_relation_data(rel_id, 'postgresql', {'k2': 'v2'})
+        self.assertEqual(rel.data[harness.charm.model.app]['k2'], 'v2')
+        self.assertEqual(helper.changes, [(0, 'postgresql')])
+
+    def test_update_relation_no_local_app_change_event(self):
+        # language=YAML
+        harness = Harness(CharmBase, meta='''
+            name: my-charm
+            requires:
+              db:
+                interface: pgsql
+            ''')
+        harness.begin()
+        harness.set_leader(False)
+        helper = DBRelationChangedHelper(harness.charm, "helper")
+        rel_id = harness.add_relation('db', 'postgresql')
+        # TODO: remove this as soon as https://github.com/canonical/operator/issues/175 is fixed.
+        harness.add_relation_unit(rel_id, 'postgresql/0')
+        self.assertEqual(helper.changes, [])
+
+        harness.update_relation_data(rel_id, 'my-charm', {'new': 'value'})
+        rel = harness.charm.model.get_relation('db')
+        self.assertEqual(rel.data[harness.charm.app]['new'], 'value')
+
+        # Our app data bag got updated.
+        self.assertEqual(rel.data[harness.charm.model.app]['new'], 'value')
+        # But there were no changed events registered by our unit.
+        self.assertEqual(helper.changes, [])
+
+    def test_update_relation_remove_data(self):
+        harness = Harness(CharmBase, meta='''
+            name: my-charm
+            requires:
+              db:
+                interface: pgsql
+            ''')
+        harness.begin()
+        viewer = RelationChangedViewer(harness.charm, 'db')
+        rel_id = harness.add_relation('db', 'postgresql')
+        harness.add_relation_unit(rel_id, 'postgresql/0')
+        harness.update_relation_data(rel_id, 'postgresql/0', {'initial': 'data'})
+        harness.update_relation_data(rel_id, 'postgresql/0', {'initial': ''})
+        self.assertEqual(viewer.changes, [{'initial': 'data'}, {}])
+
+    def test_update_config(self):
+        harness = Harness(RecordingCharm)
+        harness.begin()
+        harness.update_config(key_values={'a': 'foo', 'b': 2})
+        self.assertEqual(
+            harness.charm.changes,
+            [{'name': 'config', 'data': {'a': 'foo', 'b': 2}}])
+        harness.update_config(key_values={'b': 3})
+        self.assertEqual(
+            harness.charm.changes,
+            [{'name': 'config', 'data': {'a': 'foo', 'b': 2}},
+             {'name': 'config', 'data': {'a': 'foo', 'b': 3}}])
+        # you can set config values to the empty string, you can use unset to actually remove items
+        harness.update_config(key_values={'a': ''}, unset=set('b'))
+        self.assertEqual(
+            harness.charm.changes,
+            [{'name': 'config', 'data': {'a': 'foo', 'b': 2}},
+             {'name': 'config', 'data': {'a': 'foo', 'b': 3}},
+             {'name': 'config', 'data': {'a': ''}},
+             ])
+
+    def test_set_leader(self):
+        harness = Harness(RecordingCharm)
+        # No event happens here
+        harness.set_leader(False)
+        harness.begin()
+        self.assertFalse(harness.charm.model.unit.is_leader())
+        harness.set_leader(True)
+        self.assertEqual(harness.charm.get_changes(reset=True), [{'name': 'leader-elected'}])
+        self.assertTrue(harness.charm.model.unit.is_leader())
+        harness.set_leader(False)
+        self.assertFalse(harness.charm.model.unit.is_leader())
+        # No hook event when you lose leadership.
+        # TODO: verify if Juju always triggers `leader-settings-changed` if you
+        #   lose leadership.
+        self.assertEqual(harness.charm.get_changes(reset=True), [])
+        harness.disable_hooks()
+        harness.set_leader(True)
+        # No hook event if you have disabled them
+        self.assertEqual(harness.charm.get_changes(reset=True), [])
+
+    def test_relation_set_app_not_leader(self):
+        harness = Harness(RecordingCharm, meta='''
+            name: test-charm
+            requires:
+                db:
+                    interface: pgsql
+            ''')
+        harness.begin()
+        harness.set_leader(False)
+        rel_id = harness.add_relation('db', 'postgresql')
+        harness.add_relation_unit(rel_id, 'postgresql/0')
+        rel = harness.charm.model.get_relation('db')
+        with self.assertRaises(ModelError):
+            rel.data[harness.charm.app]['foo'] = 'bar'
+        # The data has not actually been changed
+        self.assertEqual(harness.get_relation_data(rel_id, 'test-charm'), {})
+        harness.set_leader(True)
+        rel.data[harness.charm.app]['foo'] = 'bar'
+        self.assertEqual(harness.get_relation_data(rel_id, 'test-charm'), {'foo': 'bar'})
+
+    def test_hooks_enabled_and_disabled(self):
+        harness = Harness(RecordingCharm, meta='''
+            name: test-charm
+        ''')
+        # Before begin() there are no events.
+        harness.update_config({'value': 'first'})
+        # By default, after begin the charm is set up to receive events.
+        harness.begin()
+        harness.update_config({'value': 'second'})
+        self.assertEqual(
+            harness.charm.get_changes(reset=True),
+            [{'name': 'config', 'data': {'value': 'second'}}])
+        # Once disabled, we won't see config-changed when we make an update
+        harness.disable_hooks()
+        harness.update_config({'third': '3'})
+        self.assertEqual(harness.charm.get_changes(reset=True), [])
+        harness.enable_hooks()
+        harness.update_config({'value': 'fourth'})
+        self.assertEqual(
+            harness.charm.get_changes(reset=True),
+            [{'name': 'config', 'data': {'value': 'fourth', 'third': '3'}}])
+
+    def test_metadata_from_directory(self):
+        tmp = pathlib.Path(tempfile.mkdtemp())
+        self.addCleanup(shutil.rmtree, str(tmp))
+        metadata_filename = tmp / 'metadata.yaml'
+        with metadata_filename.open('wt') as metadata:
+            metadata.write(textwrap.dedent('''
+            name: my-charm
+            requires:
+                db:
+                    interface: pgsql
+            '''))
+        harness = self._get_dummy_charm_harness(tmp)
+        harness.begin()
+        self.assertEqual(list(harness.model.relations), ['db'])
+        # The charm_dir also gets set
+        self.assertEqual(harness.framework.charm_dir, tmp)
+
+    def test_actions_from_directory(self):
+        tmp = pathlib.Path(tempfile.mkdtemp())
+        self.addCleanup(shutil.rmtree, str(tmp))
+        actions_filename = tmp / 'actions.yaml'
+        with actions_filename.open('wt') as actions:
+            actions.write(textwrap.dedent('''
+            test:
+                description: a dummy action
+            '''))
+        harness = self._get_dummy_charm_harness(tmp)
+        harness.begin()
+        self.assertEqual(list(harness.framework.meta.actions), ['test'])
+        # The charm_dir also gets set
+        self.assertEqual(harness.framework.charm_dir, tmp)
+
+    def _get_dummy_charm_harness(self, tmp):
+        self._write_dummy_charm(tmp)
+        charm_mod = importlib.import_module('charm')
+        harness = Harness(charm_mod.MyTestingCharm)
+        return harness
+
+    def _write_dummy_charm(self, tmp):
+        srcdir = tmp / 'src'
+        srcdir.mkdir(0o755)
+        charm_filename = srcdir / 'charm.py'
+        with charm_filename.open('wt') as charmpy:
+            # language=Python
+            charmpy.write(textwrap.dedent('''
+                from ops.charm import CharmBase
+                class MyTestingCharm(CharmBase):
+                    pass
+                '''))
+        orig = sys.path[:]
+        sys.path.append(str(srcdir))
+
+        def cleanup():
+            sys.path = orig
+            sys.modules.pop('charm')
+
+        self.addCleanup(cleanup)
+
+    def test_actions_passed_in(self):
+        harness = Harness(
+            CharmBase,
+            meta='''
+                name: test-app
+            ''',
+            actions='''
+                test-action:
+                    description: a dummy test action
+            ''')
+        self.assertEqual(list(harness.framework.meta.actions), ['test-action'])
+
+    def test_relation_set_deletes(self):
+        harness = Harness(CharmBase, meta='''
+            name: test-charm
+            requires:
+                db:
+                    interface: pgsql
+            ''')
+        harness.begin()
+        harness.set_leader(False)
+        rel_id = harness.add_relation('db', 'postgresql')
+        harness.update_relation_data(rel_id, 'test-charm/0', {'foo': 'bar'})
+        harness.add_relation_unit(rel_id, 'postgresql/0')
+        rel = harness.charm.model.get_relation('db', rel_id)
+        del rel.data[harness.charm.model.unit]['foo']
+        self.assertEqual({}, harness.get_relation_data(rel_id, 'test-charm/0'))
+
+    def test_set_workload_version(self):
+        harness = Harness(CharmBase, meta='''
+            name: app
+            ''')
+        harness.begin()
+        self.assertIsNone(harness.get_workload_version())
+        harness.charm.model.unit.set_workload_version('1.2.3')
+        self.assertEqual(harness.get_workload_version(), '1.2.3')
+
+
+class DBRelationChangedHelper(Object):
+    def __init__(self, parent, key):
+        super().__init__(parent, key)
+        self.changes = []
+        parent.framework.observe(parent.on.db_relation_changed, self.on_relation_changed)
+
+    def on_relation_changed(self, event):
+        if event.unit is not None:
+            self.changes.append((event.relation.id, event.unit.name))
+        else:
+            self.changes.append((event.relation.id, event.app.name))
+
+
+class RelationChangedViewer(Object):
+    """Track relation_changed events and saves the data seen in the relation bucket."""
+
+    def __init__(self, charm, relation_name):
+        super().__init__(charm, relation_name)
+        self.changes = []
+        charm.framework.observe(charm.on[relation_name].relation_changed, self.on_relation_changed)
+
+    def on_relation_changed(self, event):
+        if event.unit is not None:
+            data = event.relation.data[event.unit]
+        else:
+            data = event.relation.data[event.app]
+        self.changes.append(dict(data))
+
+
+class RecordingCharm(CharmBase):
+    """Record the events that we see, and any associated data."""
+
+    def __init__(self, framework, charm_name):
+        super().__init__(framework, charm_name)
+        self.changes = []
+        self.framework.observe(self.on.config_changed, self.on_config_changed)
+        self.framework.observe(self.on.leader_elected, self.on_leader_elected)
+
+    def get_changes(self, reset=True):
+        changes = self.changes
+        if reset:
+            self.changes = []
+        return changes
+
+    def on_config_changed(self, _):
+        self.changes.append(dict(name='config', data=dict(self.framework.model.config)))
+
+    def on_leader_elected(self, _):
+        self.changes.append(dict(name='leader-elected'))
+
+
+class RelationEventCharm(RecordingCharm):
+    """Record events related to relation lifecycles."""
+
+    def __init__(self, framework, charm_name):
+        super().__init__(framework, charm_name)
+
+    def observe_relation_events(self, relation_name):
+        self.framework.observe(self.on[relation_name].relation_created, self._on_relation_created)
+        self.framework.observe(self.on[relation_name].relation_joined, self._on_relation_joined)
+        self.framework.observe(self.on[relation_name].relation_changed, self._on_relation_changed)
+        self.framework.observe(self.on[relation_name].relation_departed,
+                               self._on_relation_departed)
+        self.framework.observe(self.on[relation_name].relation_broken, self._on_relation_broken)
+
+    def _on_relation_created(self, event):
+        self._observe_relation_event('relation-created', event)
+
+    def _on_relation_joined(self, event):
+        self._observe_relation_event('relation-joined', event)
+
+    def _on_relation_changed(self, event):
+        self._observe_relation_event('relation-changed', event)
+
+    def _on_relation_departed(self, event):
+        self._observe_relation_event('relation-departed', event)
+
+    def _on_relation_broken(self, event):
+        self._observe_relation_event('relation-broken', event)
+
+    def _observe_relation_event(self, event_name, event):
+        unit_name = None
+        if event.unit is not None:
+            unit_name = event.unit.name
+        app_name = None
+        if event.app is not None:
+            app_name = event.app.name
+        self.changes.append(
+            dict(name=event_name,
+                 data=dict(app=app_name, unit=unit_name, relation_id=event.relation.id)))
+
+
+class TestTestingModelBackend(unittest.TestCase):
+
+    def test_status_set_get_unit(self):
+        harness = Harness(CharmBase, meta='''
+            name: app
+            ''')
+        backend = harness._backend
+        backend.status_set('blocked', 'message', is_app=False)
+        self.assertEqual(backend.status_get(is_app=False), ('blocked', 'message'))
+        self.assertEqual(backend.status_get(is_app=True), None)
+
+    def test_status_set_get_app(self):
+        harness = Harness(CharmBase, meta='''
+            name: app
+            ''')
+        backend = harness._backend
+        backend.status_set('blocked', 'message', is_app=True)
+        self.assertEqual(backend.status_get(is_app=True), ('blocked', 'message'))
+        self.assertEqual(backend.status_get(is_app=False), None)
+
+    def test_relation_ids_unknown_relation(self):
+        harness = Harness(CharmBase, meta='''
+            name: test-charm
+            provides:
+              db:
+                interface: mydb
+            ''')
+        backend = harness._backend
+        # With no relations added, we just get an empty list for the interface
+        self.assertEqual(backend.relation_ids('db'), [])
+        # But an unknown interface raises a ModelError
+        with self.assertRaises(ModelError):
+            backend.relation_ids('unknown')
+
+    def test_relation_get_unknown_relation_id(self):
+        harness = Harness(CharmBase, meta='''
+            name: test-charm
+            ''')
+        backend = harness._backend
+        with self.assertRaises(RelationNotFoundError):
+            backend.relation_get(1234, 'unit/0', False)
+
+    def test_relation_list_unknown_relation_id(self):
+        harness = Harness(CharmBase, meta='''
+            name: test-charm
+            ''')
+        backend = harness._backend
+        with self.assertRaises(RelationNotFoundError):
+            backend.relation_list(1234)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/magma/hackfest_squid_cnf/charms/squid/src/charm.py b/magma/hackfest_squid_cnf/charms/squid/src/charm.py
new file mode 100755
index 0000000000000000000000000000000000000000..6e2eff40c9534c822815c88dea3c5c0955192aab
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/src/charm.py
@@ -0,0 +1,120 @@
+#! /usr/bin/env python3
+# -*- coding: utf-8 -*-
+# vim:fenc=utf-8
+# Copyright © 2020 Dominik Fleischmann dominik.fleischmann@canonical.com
+
+"""Operator Charm main library."""
+# Load modules from lib directory
+import logging
+
+import setuppath  # noqa:F401
+from ops.charm import CharmBase
+from ops.framework import StoredState
+from ops.main import main
+from ops.model import ActiveStatus, MaintenanceStatus
+
+
+class SquidK8SCharm(CharmBase):
+    """Class reprisenting this Operator charm."""
+
+    state = StoredState()
+
+    def __init__(self, *args):
+        """Initialize charm and configure states and events to observe."""
+        super().__init__(*args)
+        # -- standard hook observation
+        self.framework.observe(self.on.install, self.on_install)
+        self.framework.observe(self.on.start, self.on_start)
+        self.framework.observe(self.on.config_changed, self.on_config_changed)
+#        self.framework.observe(self.on.addconfig_action, self)
+        # -- initialize states --
+        self.state.set_default(installed=False)
+        self.state.set_default(configured=False)
+        self.state.set_default(started=False)
+
+    def make_pod_spec(self):
+        config = self.framework.model.config
+        ports = [{"name": "squid", "containerPort": config["port"], "protocol": "TCP"}]
+
+        spec = {
+            "containers": [{
+                "name": self.framework.model.app.name,
+                "image": config["image"],
+                "ports": ports,
+            }],
+        }
+
+        return spec
+
+    def _apply_spec(self, spec):
+        # Only apply the spec if this unit is a leader
+        if self.framework.model.unit.is_leader():
+            self.framework.model.pod.set_spec(spec)
+            self.state.spec = spec
+
+    def on_install(self, event):
+        """Handle install state."""
+        self.unit.status = MaintenanceStatus("Installing charm software")
+        # Perform install tasks
+        self.unit.status = MaintenanceStatus("Install complete")
+        logging.info("Install of software complete")
+        self.state.installed = True
+
+    def on_config_changed(self, event):
+        """Handle config changed."""
+
+        if not self.state.installed:
+            logging.warning("Config changed called before install complete, deferring event: {}.".format(event.handle))
+            self._defer_once(event)
+
+            return
+
+        if self.state.started:
+            # Stop if necessary for reconfig
+            logging.info("Stopping for configuration, event handle: {}".format(event.handle))
+        # Configure the software
+        logging.info("Configuring")
+        self.state.configured = True
+
+    def on_start(self, event):
+        """Handle start state."""
+
+        if not self.state.configured:
+            logging.warning("Start called before configuration complete, deferring event: {}".format(event.handle))
+            self._defer_once(event)
+
+            return
+        self.unit.status = MaintenanceStatus("Applying pod spec")
+        # Start software
+        new_pod_spec = self.make_pod_spec()
+        self._apply_spec(new_pod_spec)
+
+        self.unit.status = ActiveStatus("Unit is ready")
+        self.state.started = True
+        logging.info("Started")
+
+    def _defer_once(self, event):
+        """Defer the given event, but only once."""
+        notice_count = 0
+        handle = str(event.handle)
+
+        for event_path, _, _ in self.framework._storage.notices(None):
+            if event_path.startswith(handle.split('[')[0]):
+                notice_count += 1
+                logging.debug("Found event: {} x {}".format(event_path, notice_count))
+
+        if notice_count > 1:
+            logging.debug("Not deferring {} notice count of {}".format(handle, notice_count))
+        else:
+            logging.debug("Deferring {} notice count of {}".format(handle, notice_count))
+            event.defer()
+
+    def on_addconfig_action(self, event):
+        """Handle the example_action action."""
+        event.log("Hello from the example action.")
+        event.set_results({"success": "true"})
+
+
+if __name__ == "__main__":
+    from ops.main import main
+    main(SquidK8SCharm)
diff --git a/magma/hackfest_squid_cnf/charms/squid/src/setuppath.py b/magma/hackfest_squid_cnf/charms/squid/src/setuppath.py
new file mode 100644
index 0000000000000000000000000000000000000000..736da20c9af4f47e0184168c10de70e23759495a
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/src/setuppath.py
@@ -0,0 +1,2 @@
+import sys
+sys.path.append('lib')
diff --git a/magma/hackfest_squid_cnf/charms/squid/tests/functional/requirements.txt b/magma/hackfest_squid_cnf/charms/squid/tests/functional/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b7c9112db138bffa4c67040e8744b42d68d95855
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/tests/functional/requirements.txt
@@ -0,0 +1 @@
+git+https://github.com/openstack-charmers/zaza.git#egg=zaza
diff --git a/magma/hackfest_squid_cnf/charms/squid/tests/functional/tests/bundles/bionic.yaml b/magma/hackfest_squid_cnf/charms/squid/tests/functional/tests/bundles/bionic.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ef41a680a0b67e54534aac274f6aaa096ac13fbe
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/tests/functional/tests/bundles/bionic.yaml
@@ -0,0 +1,6 @@
+series: bionic
+
+applications:
+    squid:
+        charm: ../../../../
+        num_units: 1
diff --git a/magma/hackfest_squid_cnf/charms/squid/tests/functional/tests/bundles/xenial.yaml b/magma/hackfest_squid_cnf/charms/squid/tests/functional/tests/bundles/xenial.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c4e9580146a4369981ee4c5ce567b5b1db4333e4
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/tests/functional/tests/bundles/xenial.yaml
@@ -0,0 +1,7 @@
+series: xenial
+
+applications:
+    squid:
+        charm: ../../../../
+        num_units: 1
+
diff --git a/magma/hackfest_squid_cnf/charms/squid/tests/functional/tests/tests.yaml b/magma/hackfest_squid_cnf/charms/squid/tests/functional/tests/tests.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aa92ccb5d20cfc6e7e877aed029f51dd64141146
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/tests/functional/tests/tests.yaml
@@ -0,0 +1,17 @@
+tests:
+# - bionic_model:
+#   - zaza.charm_tests.noop.tests.NoopTestBionic
+# - xenial_model:
+#   - zaza.charm_tests.noop.tests.NoopTestXenial
+ - zaza.charm_tests.noop.tests.NoopTest
+configure:
+# - bionic_model:
+#   - zaza.charm_tests.noop.setup.basic_setup_bionic
+# - xenial_model:
+#   - zaza.charm_tests.noop.setup.basic_setup_xenial
+ - zaza.charm_tests.noop.setup.basic_setup
+gate_bundles:
+    - xenial
+    - bionic
+smoke_bundles:
+    - focal
diff --git a/magma/hackfest_squid_cnf/charms/squid/tests/unit/operator_fixtures.py b/magma/hackfest_squid_cnf/charms/squid/tests/unit/operator_fixtures.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9462fc97e0f4174113eb1a4496dac28f99e6394
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/tests/unit/operator_fixtures.py
@@ -0,0 +1,113 @@
+#! /usr/bin/env python3
+# -*- coding: utf-8 -*-
+# vim:fenc=utf-8
+# Copyright © 2020 Dominik Fleischmann dominik.fleischmann@canonical.com
+# Distributed under terms of the GPL license.
+"""Operator Charm test fixtures."""
+
+import os
+import tempfile
+import unittest
+
+import setuppath  # noqa:F401
+import mock
+import ops
+import ops.main
+from src.charm import Squid-K8SCharm
+
+
+class OperatorTestCase(unittest.TestCase):
+    """Fixtures for unit testing operator charms."""
+
+    @classmethod
+    def setUpClass(cls):
+        """Setup class fixture."""
+        # Setup a tmpdir
+        cls.tmpdir = tempfile.TemporaryDirectory()
+
+        # Store patchers for test cases that want them
+        cls.patchers = {}
+
+        # Prevent framwork from trying to call subprocesses
+        run_patcher = mock.patch("ops.model.ModelBackend._run")
+        cls.patchers["ops.model.ModelBackend._run"] = run_patcher.start()
+
+        # Stop unit test from calling fchown
+        fchown_patcher = mock.patch("os.fchown")
+        cls.patchers["os.fchown"] = fchown_patcher.start()
+        chown_patcher = mock.patch("os.chown")
+        cls.patchers["os.chown"] = chown_patcher.start()
+
+        # Setup mock JUJU Environment variables
+        os.environ["JUJU_UNIT_NAME"] = "mock/0"
+        os.environ["JUJU_CHARM_DIR"] = "."
+
+    @classmethod
+    def tearDownClass(cls):
+        """Tear down class fixture."""
+        mock.patch.stopall()
+        cls.tmpdir.cleanup()
+
+    def setUp(self):
+        """Setup test fixture."""
+        # Create a charm instance
+        model_backend = ops.model.ModelBackend()
+        ops.main.setup_root_logging(model_backend)
+        charm_dir = ops.main._get_charm_dir()
+        metadata, actions_metadata = ops.main._load_metadata(charm_dir)
+        meta = ops.charm.CharmMeta(metadata, actions_metadata)
+        unit_name = os.environ["JUJU_UNIT_NAME"]
+        model = ops.model.Model(unit_name, meta, model_backend)
+        framework = ops.framework.Framework(":memory:", charm_dir, meta, model)
+        charm = Squid-K8SCharm(framework, None)
+        self.charm = charm
+
+    def tearDown(self):
+        """Clean up test fixture."""
+        # Remove runtime class attributes to avoid error on next setUp
+
+        for relation_name in self.charm.framework.meta.relations:
+            relation_name = relation_name.replace("-", "_")
+            delattr(ops.charm.CharmEvents, relation_name + "_relation_joined")
+            delattr(ops.charm.CharmEvents, relation_name + "_relation_changed")
+            delattr(ops.charm.CharmEvents, relation_name + "_relation_departed")
+            delattr(ops.charm.CharmEvents, relation_name + "_relation_broken")
+
+        for storage_name in self.charm.framework.meta.storages:
+            storage_name = storage_name.replace("-", "_")
+            delattr(ops.charm.CharmEvents, storage_name + "_storage_attached")
+            delattr(ops.charm.CharmEvents, storage_name + "_storage_detaching")
+
+        for action_name in self.charm.framework.meta.actions:
+            action_name = action_name.replace("-", "_")
+            delattr(ops.charm.CharmEvents, action_name + "_action")
+
+    def emit(self, event):
+        """Emit the named hook on the charm."""
+        self.charm.framework.reemit()
+
+        if "_relation_" in event:
+            relation_name = event.split("_relation")[0].replace("_", "-")
+            with mock.patch.dict(
+                "os.environ",
+                {
+                    "JUJU_RELATION": relation_name,
+                    "JUJU_RELATION_ID": "1",
+                    "JUJU_REMOTE_APP": "mock",
+                    "JUJU_REMOTE_UNIT": "mock/0",
+                },
+            ):
+                ops.main._emit_charm_event(self.charm, event)
+        else:
+            ops.main._emit_charm_event(self.charm, event)
+
+    def get_notice_count(self, hook):
+        """Return the notice count for a given charm hook."""
+        notice_count = 0
+        handle = "Squid-K8SCharm/on/{}".format(hook)
+
+        for event_path, _, _ in self.charm.framework._storage.notices(None):
+            if event_path.startswith(handle):
+                notice_count += 1
+
+        return notice_count
diff --git a/magma/hackfest_squid_cnf/charms/squid/tests/unit/requirements.txt b/magma/hackfest_squid_cnf/charms/squid/tests/unit/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ba6b0ee1ed8feb744c0fcc2ff0477dc80d32dff3
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/tests/unit/requirements.txt
@@ -0,0 +1,3 @@
+mock
+pyyaml
+coverage
diff --git a/magma/hackfest_squid_cnf/charms/squid/tests/unit/setuppath.py b/magma/hackfest_squid_cnf/charms/squid/tests/unit/setuppath.py
new file mode 100644
index 0000000000000000000000000000000000000000..736da20c9af4f47e0184168c10de70e23759495a
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/tests/unit/setuppath.py
@@ -0,0 +1,2 @@
+import sys
+sys.path.append('lib')
diff --git a/magma/hackfest_squid_cnf/charms/squid/tests/unit/test_charm.py b/magma/hackfest_squid_cnf/charms/squid/tests/unit/test_charm.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc64b799608c598358d3d010c53f9dfcbc7b7fb5
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/tests/unit/test_charm.py
@@ -0,0 +1,19 @@
+import unittest
+
+import setuppath  # noqa:F401
+from operator_fixtures import OperatorTestCase
+
+
+class TestCharm(OperatorTestCase):
+    def test_create_charm(self):
+        """Verify fixtures and create a charm."""
+        self.assertEqual(self.charm.state.installed, False)
+
+    def test_install(self):
+        """Test emitting an install hook."""
+        self.emit("install")
+        self.assertEqual(self.charm.state.installed, True)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/magma/hackfest_squid_cnf/charms/squid/tox.ini b/magma/hackfest_squid_cnf/charms/squid/tox.ini
new file mode 100644
index 0000000000000000000000000000000000000000..a2476324f8eea6fdb222b4f382057849e8d85f44
--- /dev/null
+++ b/magma/hackfest_squid_cnf/charms/squid/tox.ini
@@ -0,0 +1,46 @@
+[tox]
+skipsdist = True
+envlist = unit, functional
+skip_missing_interpreters = True
+
+[testenv]
+basepython = python3
+setenv =
+  PYTHONPATH = {toxinidir}/lib/:{toxinidir}
+passenv = HOME
+
+[testenv:unit]
+commands = 
+    coverage run -m unittest discover -s {toxinidir}/tests/unit -v
+    coverage report \
+	--omit tests/*,mod/*,.tox/*
+    coverage html \
+	--omit tests/*,mod/*,.tox/*
+deps = -r{toxinidir}/tests/unit/requirements.txt
+
+[testenv:functional]
+changedir = {toxinidir}/tests/functional
+commands = functest-run-suite {posargs}
+deps = -r{toxinidir}/tests/functional/requirements.txt
+
+[testenv:lint]
+commands = flake8
+deps =
+    flake8
+    flake8-docstrings
+    flake8-import-order
+    pep8-naming
+    flake8-colors
+
+[flake8]
+exclude =
+    .git,
+    __pycache__,
+    .tox,
+    mod,
+max-line-length = 120
+max-complexity = 10
+import-order-style = google
+
+[isort]
+force_to_top=setuppath
diff --git a/magma/hackfest_squid_cnf/juju-bundles/bundle.yaml b/magma/hackfest_squid_cnf/juju-bundles/bundle.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e1234d755b1f5fffae975327cfd9f1daedc08f79
--- /dev/null
+++ b/magma/hackfest_squid_cnf/juju-bundles/bundle.yaml
@@ -0,0 +1,9 @@
+description: Squid Bundle
+bundle: kubernetes
+applications:
+  squid:
+    charm: '../charms/squid'
+    scale: 1
+    #    storage:
+    #      docker: 50M
+    #      spool: 50M
diff --git a/magma/hackfest_squid_cnf/squid_vnf.yaml b/magma/hackfest_squid_cnf/squid_vnf.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e77ebb3f82da08e99ea30e357591917db37f4448
--- /dev/null
+++ b/magma/hackfest_squid_cnf/squid_vnf.yaml
@@ -0,0 +1,41 @@
+vnfd-catalog:
+    vnfd:
+    -   id: squid-vnf
+        name: squid-vnf
+        connection-point:
+        -   name: mgmtnet
+        mgmt-interface:
+            cp: mgmt
+        kdu:
+        -   name: squid-kdu
+            juju-bundle: bundle.yaml
+            kdu-configuration:
+                config-primitive:
+                -   name: restart
+                    parameter:
+                    -   name: application-name
+                        data-type: STRING
+                        default-value: squid
+                -   name: start
+                    parameter:
+                    -   name: application-name
+                        data-type: STRING
+                        default-value: squid
+                -   name: stop
+                    parameter:
+                    -   name: application-name
+                        data-type: STRING
+                        default-value: squid
+                -   name: addurl
+                    parameter:
+                    -   name: application-name
+                        data-type: STRING
+                        default-value: squid
+                    -   name: url
+                        data-type: STRING
+                        default-value: ""
+        k8s-cluster:
+            nets:
+            -   id: mgmtnet
+                external-connection-point-ref: mgmt
+
diff --git a/magma/hackfest_squid_cnf_ns/squid_cnf_ns.yaml b/magma/hackfest_squid_cnf_ns/squid_cnf_ns.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..719279578a9d9b4b33880fcb8c2cd6d6dd233be8
--- /dev/null
+++ b/magma/hackfest_squid_cnf_ns/squid_cnf_ns.yaml
@@ -0,0 +1,24 @@
+nsd:nsd-catalog:
+    nsd:
+    -   id: squid-cnf-ns
+        name: squid-cnf-ns
+        short-name: squid-cnf-ns
+        description: NS with 1 KDU connected to the mgmtnet VL
+        version: '1.0'
+        logo: osm.png
+        constituent-vnfd:
+        -   vnfd-id-ref: squid-vnf
+            member-vnf-index: squid-vnf
+        vld:
+        -   id: mgmtnet
+            name: mgmtnet
+            short-name: mgmtnet
+            type: ELAN
+            mgmt-network: 'true'
+            vim-network-name: mgmt
+            vnfd-connection-point-ref:
+            -   member-vnf-index-ref: squid-vnf
+                vnfd-id-ref: squid-vnf
+                vnfd-connection-point-ref: mgmtnet
+
+