diff --git a/Hackfest_Demos/OSM-MR10/HD1.2-Preparation/git-clone.sh b/Hackfest_Demos/OSM-MR10/HD1.2-Preparation/git-clone.sh new file mode 100755 index 0000000000000000000000000000000000000000..75cf947a2681bd371d4edeb0066905873a97818e --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD1.2-Preparation/git-clone.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +git clone --recurse-submodules -j8 https://osm.etsi.org/gitlab/vnf-onboarding/osm-packages.git + diff --git a/Hackfest_Demos/OSM-MR10/HD1.5-Checkpoint/basic-build.sh b/Hackfest_Demos/OSM-MR10/HD1.5-Checkpoint/basic-build.sh new file mode 100755 index 0000000000000000000000000000000000000000..3b240c35d505ba889ad25728df412056ae22db8d --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD1.5-Checkpoint/basic-build.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +if [ ! -d hackfest_basic_ns ] ; then + echo "It does not look like we are in the osm-packages directory, exiting" + exit 1 +fi + + +echo "========================================================================" +echo "Cleaning out any prior versions of the descriptors from OSM" +echo "========================================================================" +osm nsd-delete hackfest_basic-ns +osm vnfd-delete hackfest_basic-vnf + +echo "========================================================================" +echo "Building packages" +echo "========================================================================" +osm package-build hackfest_basic_vnf +osm package-build hackfest_basic_ns + +echo "========================================================================" +echo "Uploading packages" +echo "========================================================================" +osm upload-package hackfest_basic_vnf.tar.gz +osm upload-package hackfest_basic_ns.tar.gz +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD1.5-Checkpoint/basic-delete.sh b/Hackfest_Demos/OSM-MR10/HD1.5-Checkpoint/basic-delete.sh new file mode 100755 index 0000000000000000000000000000000000000000..84668b031c62dee4cb8c25f4d596d2b14f0347f9 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD1.5-Checkpoint/basic-delete.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +echo "========================================================================" +echo "Deleting network service" +echo "========================================================================" + +osm ns-delete basic-vnf + +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD1.5-Checkpoint/basic-launch.sh b/Hackfest_Demos/OSM-MR10/HD1.5-Checkpoint/basic-launch.sh new file mode 100755 index 0000000000000000000000000000000000000000..4c4c83be06c6a2360e5b98091833bf7271e4200e --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD1.5-Checkpoint/basic-launch.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +VIMID=`osm vim-list | grep osm_ | awk '{ print $4 }'` +echo "========================================================================" +echo "Launching network service with VIMID ${VIMID}" +echo "========================================================================" +osm ns-create --ns_name basic-vnf \ + --nsd_name hackfest_basic-ns \ + --vim_account ${VIMID} \ + --ssh_keys ~/.ssh/id_rsa.pub \ + --config \ + '{vld: [ {name: mgmtnet, vim-network-name: osm-ext} ] }' +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-actions.sh b/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-actions.sh new file mode 100755 index 0000000000000000000000000000000000000000..ba6a8c7650452358b1c31988471a3c7c5e290b19 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-actions.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +echo "========================================================================" +echo "Here are some of the actions you can run" +echo "========================================================================" + +cat << 'EOF' + +DESKTOP_IP=`osm ns-show virtual-desktop --literal | yq e '.vcaStatus.*.machines.0.network_interfaces.ens3.ip_addresses.0' -` + +osm ns-action firewall --vnf_name VYOS-PNF --action_name add-port-forward --params "{ruleNumber: '10', sourcePort: '3389', destinationAddress: \"${DESKTOP_IP}\", destinationPort: '3389'}" +osm ns-action firewall --vnf_name VYOS-PNF --action_name remove-port-forward --params '{ruleNumber: "10"}' + +EOF  diff --git a/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-build.sh b/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-build.sh new file mode 100755 index 0000000000000000000000000000000000000000..77b298362adb0b8ba7616bb22cbfc325cb5a82cc --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-build.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +if [ ! -d hackfest_firewall_pnf ] ; then + echo "It does not look like we are in the osm-packages directory, exiting" + exit 1 +fi + +echo "========================================================================" +echo "Building operator charms" +echo "========================================================================" +cd hackfest_firewall_pnf/charms/vyos-config-src +#virtualenv -p python3 venv +#source venv/bin/activate +#pip install -r requirements-dev.txt +#pip install charmcraft +#./venv/bin/charmcraft build +rm -rf venv +charmcraft build +cd - +cd hackfest_firewall_pnf/charms +mkdir -p vyos-config/ +rm -rf vyos-config/* +cp -r vyos-config-src/build/* vyos-config/ +cd - + +echo "========================================================================" +echo "Cleaning out any prior versions of the descriptors from OSM" +echo "========================================================================" +osm nsd-delete hackfest_firewall_pnf_ns +osm vnfd-delete hackfest_firewall_pnf +osm pdu-delete router01 +rm -v hackfest_firewall_pnf*.tar.gz + +echo "========================================================================" +echo "Building packages" +echo "========================================================================" +osm package-build hackfest_firewall_pnf +osm package-build hackfest_firewall_pnf_ns + +echo "========================================================================" +echo "Uploading packages" +echo "========================================================================" +osm upload-package hackfest_firewall_pnf.tar.gz +osm upload-package hackfest_firewall_pnf_ns.tar.gz + +VIMID=`osm vim-list | grep osm_ | awk '{ print $4 }'` +echo "========================================================================" +echo "Registering PDU 172.21.19.${HFID} with $VIMID" +echo "========================================================================" + +cat << EOF > firewall-pdu.yaml +name: router01 +description: VyOS Router +type: gateway +shared: false +interfaces: + - name: gateway_public + ip-address: 172.21.19.${HFID} + mgmt: true + vim-network-name: osm-ext + - name: vnf_internal + ip-address: 192.168.239.250 + mgmt: false + vim-network-name: private +EOF + +osm pdu-create --descriptor_file firewall-pdu.yaml \ + --vim_account $VIMID +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-launch.sh b/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-launch.sh new file mode 100755 index 0000000000000000000000000000000000000000..f2626b6a511ff124517b9a94bc02b6c6c4b525e9 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-launch.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +VIMID=`osm vim-list | grep osm_ | awk '{ print $4 }'` +echo "========================================================================" +echo "Launching network service with VIMID ${VIMID}" +echo "========================================================================" + +osm ns-create --ns_name firewall \ + --nsd_name hackfest_firewall_pnf_ns \ + --vim_account $VIMID + +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-watch-progress.sh b/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-watch-progress.sh new file mode 100755 index 0000000000000000000000000000000000000000..40c026b47eaef48b7126afaa75ccb1ec4025f993 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD1.7-PNF/firewall-watch-progress.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +watch -- 'osm ns-show firewall | grep -i status' diff --git a/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-actions.sh b/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-actions.sh new file mode 100755 index 0000000000000000000000000000000000000000..2ff1033fe13abf1826a72ee91c4c68060aea10cb --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-actions.sh @@ -0,0 +1,15 @@ +#!/bin/bash +echo "========================================================================" +echo "Here are some of the actions you can run" +echo "========================================================================" + +cat << 'EOF' +osm ns-action virtual-desktop --vnf_name 1 --action_name update-system +osm ns-action virtual-desktop --vnf_name 1 --action_name add-package --params '{package: "ubuntu-mate-wallpapers-disco,ubuntu-mate-wallpapers-eoan"}' +osm ns-action virtual-desktop --vnf_name 1 --action_name remove-package --params '{package: "ubuntu-mate-wallpapers-disco"}' +osm ns-action virtual-desktop --vnf_name 1 --action_name add-snap --params '{package: "code --classic"}' +osm ns-action virtual-desktop --vnf_name 1 --action_name remove-snap --params '{package: "code"}' +osm ns-action virtual-desktop --vnf_name 1 --action_name reboot +osm ns-action virtual-desktop --vnf_name 1 --action_name announce --params '{message: "Hello from the Hackfest!"}' +EOF + diff --git a/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-build.sh b/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-build.sh new file mode 100755 index 0000000000000000000000000000000000000000..264926e63911dbb7765f21df5637e0a265e1daf1 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-build.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +if [ ! -d hackfest_virtual-pc_vnfd ] ; then + echo "It does not look like we are in the osm-packages directory, exiting" + exit 1 +fi + +cd hackfest_virtual-pc_vnfd/charms/virtual-pc-src + +echo "========================================================================" +echo "Building operator charms" +echo "========================================================================" +rm -rf venv +charmcraft build +cd - +cd hackfest_virtual-pc_vnfd/charms +cp -r virtual-pc-src/build/* virtual-pc/ +cd - + +echo "========================================================================" +echo "Cleaning out any prior versions of the descriptors from OSM" +echo "========================================================================" +osm nsd-delete hackfest_virtual-pc_ns +osm vnfd-delete hackfest_virtual-pc_vnf + +echo "========================================================================" +echo "Building packages" +echo "========================================================================" +osm package-build hackfest_virtual-pc_vnfd +osm package-build hackfest_virtual-pc_ns + +echo "========================================================================" +echo "Uploading packages" +echo "========================================================================" +osm upload-package hackfest_virtual-pc_vnfd.tar.gz +osm upload-package hackfest_virtual-pc_ns.tar.gz +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-launch.sh b/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-launch.sh new file mode 100755 index 0000000000000000000000000000000000000000..dc67f2c8305e2f5ef2bc901cad7543636c2fb1ed --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-launch.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +VIMID=`osm vim-list | grep osm_ | awk '{ print $4 }'` +echo "========================================================================" +echo "Launching network service with VIMID ${VIMID}" +echo "========================================================================" +osm ns-create --ns_name virtual-desktop \ + --nsd_name hackfest_virtual-pc_ns \ + --vim_account ${VIMID} \ + --config \ + '{vld: [ {name: mgmtnet, vim-network-name: osm-ext}, + {name: private, vim-network-name: private} ] }' +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-watch-progress.sh b/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-watch-progress.sh new file mode 100755 index 0000000000000000000000000000000000000000..5578d0b67796bf3717b7c70d6fe707fc61430a16 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.1-VNF-Primitives/virtual-pc-watch-progress.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +watch -- 'osm ns-show virtual-desktop | grep -i status' diff --git a/Hackfest_Demos/OSM-MR10/HD2.2-Scaling/wiki-build.sh b/Hackfest_Demos/OSM-MR10/HD2.2-Scaling/wiki-build.sh new file mode 100755 index 0000000000000000000000000000000000000000..add8202e208d067be74af17916a75b6f6c7665e4 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.2-Scaling/wiki-build.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +OSM_PACKAGE_DIR=$HOME/osm-packages +WIKI_VNFD_PACKAGE_NAME=wiki_webserver_autoscale_vnfd +WIKI_NSD_PACKAGE_NAME=wiki_webserver_autoscale_nsd +WIKI_VNF_PACKAGE_DIR=$OSM_PACKAGE_DIR/$WIKI_VNFD_PACKAGE_NAME +WIKI_NS_PACKAGE_DIR=$OSM_PACKAGE_DIR/$WIKI_NSD_PACKAGE_NAME +WIKI_VNFD_NAME=wiki_webserver_autoscale_vnf +WIKI_NSD_NAME=wiki_webserver_autoscale_ns +USER_ID=$OSM_USER + +echo "========================================================================" +echo "Downloading and modifying the wiki package" +echo "========================================================================" +# clone from git +rm -rf $OSM_PACKAGE_DIR +git clone https://osm.etsi.org/gitlab/vnf-onboarding/osm-packages.git +# Append user id to NSD and VNFD names +sed -i "s/${WIKI_VNFD_NAME}.*/${WIKI_VNFD_NAME}_${USER_ID}/" $WIKI_VNF_PACKAGE_DIR/wiki_webserver_autoscale_vnfd.yaml +sed -i "s/${WIKI_VNFD_NAME}.*/${WIKI_VNFD_NAME}_${USER_ID}/" $WIKI_NS_PACKAGE_DIR/wiki_webserver_autoscale_nsd.yaml +sed -i "s/${WIKI_NSD_NAME}.*/${WIKI_NSD_NAME}_${USER_ID}/" $WIKI_NS_PACKAGE_DIR/wiki_webserver_autoscale_nsd.yaml +# Add 'runcmd' in cloud-init to modify configuration and restart haproxy service +echo 'runcmd:' >> $WIKI_VNF_PACKAGE_DIR/cloud_init/cloud_init_haproxy +echo ' - ip=$(ifconfig | grep -A 1 "ens4" | tail -1 | cut -d ":" -f 2 | cut -d " " -f 1)' >> $WIKI_VNF_PACKAGE_DIR/cloud_init/cloud_init_haproxy +echo ' - sudo -S sed -i "s/ipv4@\(.*:9999\)/ipv4@${ip}\:9999/" /etc/haproxy/haproxy.cfg' >> $WIKI_VNF_PACKAGE_DIR/cloud_init/cloud_init_haproxy +echo ' - sleep 60' >> $WIKI_VNF_PACKAGE_DIR/cloud_init/cloud_init_haproxy +echo ' - echo "osm2021" | sudo service haproxy restart' >> $WIKI_VNF_PACKAGE_DIR/cloud_init/cloud_init_haproxy + +echo "========================================================================" +echo "Cleaning out any prior versions of the descriptors from OSM" +echo "========================================================================" +osm nsd-delete $WIKI_NSD_NAME"_"$USER_ID +osm vnfd-delete $WIKI_VNFD_NAME"_"$USER_ID + +echo "========================================================================" +echo "Building packages" +echo "========================================================================" +cd $OSM_PACKAGE_DIR +osm package-build $WIKI_VNFD_PACKAGE_NAME +osm package-build $WIKI_NSD_PACKAGE_NAME + +echo "========================================================================" +echo "Uploading packages" +echo "========================================================================" +osm upload-package $WIKI_VNFD_PACKAGE_NAME.tar.gz +osm upload-package $WIKI_NSD_PACKAGE_NAME.tar.gz + +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.2-Scaling/wiki-launch.sh b/Hackfest_Demos/OSM-MR10/HD2.2-Scaling/wiki-launch.sh new file mode 100755 index 0000000000000000000000000000000000000000..20caea0107cb8d134fb7926baf5e52a00821ca0c --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.2-Scaling/wiki-launch.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +VIMID=`osm vim-list | grep osm_ | awk '{ print $4 }'` + + +USER_ID=$OSM_USER +NSD_NAME=wiki_webserver_autoscale_ns_$USER_ID + + +echo "========================================================================" +echo "Launching network service with VIMID ${VIMID}" +echo "========================================================================" +osm ns-create --ns_name wiki \ + --nsd_name wiki_webserver_autoscale_ns_$USER_ID \ + --vim_account ${VIMID} +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-build-and-onboard.sh b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-build-and-onboard.sh new file mode 100755 index 0000000000000000000000000000000000000000..a3a30f7091eb84f331a021012ce9e448f66d0cd5 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-build-and-onboard.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +if [ ! -d openldap_knf ] ; then + echo "It does not look like we are in the osm-packages directory, exiting" + exit 1 +fi + +echo "========================================================================" +echo "Cleaning out any prior versions of the descriptors from OSM" +echo "========================================================================" +osm nspkg-delete openldap_ns +osm nfpkg-delete openldap_knf + +echo "========================================================================" +echo "Validating packages" +echo "========================================================================" +osm package-validate --no-recursive openldap_knf +osm package-validate --no-recursive openldap_ns + +echo "========================================================================" +echo "Building packages" +echo "========================================================================" +osm package-build openldap_knf +osm package-build openldap_ns + +echo "========================================================================" +echo "Uploading packages" +echo "========================================================================" +osm nfpkg-create openldap_knf.tar.gz +osm nspkg-create openldap_ns.tar.gz +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-check-k8s-status.sh b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-check-k8s-status.sh new file mode 100755 index 0000000000000000000000000000000000000000..69b58d7615d3375bac60358bebaf61e1365b4a37 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-check-k8s-status.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +PROJECT_ID=`osm project-list | grep $OSM_PROJECT | awk '{ print $4 }'` +kubectl -n ${PROJECT_ID} get all +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-launch.sh b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-launch.sh new file mode 100755 index 0000000000000000000000000000000000000000..1efc7c98d04e322c82a1e4b2b7125d54890ba163 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-launch.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +VIMID=`osm vim-list | grep $OSM_PROJECT | awk '{ print $4 }'` +echo "========================================================================" +echo "Launching network service in VIM with ID ${VIMID}" +echo "========================================================================" +osm ns-create --ns_name ldap \ + --nsd_name openldap_ns \ + --vim_account ${VIMID} \ + --config_file $HOME/openldap-params.yaml +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-params.yaml b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-params.yaml new file mode 100644 index 0000000000000000000000000000000000000000..60f3f8099a17a5de146a635f40fff545da69ef1d --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-params.yaml @@ -0,0 +1,22 @@ +vld: +- name: mgmtnet + vim-network-name: osm-ext +additionalParamsForVnf: +- member-vnf-index: openldap + additionalParamsForKdu: + - kdu_name: ldap + additionalParams: + # replicaCount: 2 + service: + type: LoadBalancer + loadBalancerIP: '172.21.251.X' # MetalLB IP Address + adminPassword: osm4u + configPassword: osm4u + env: + LDAP_ORGANISATION: "Example Inc." + LDAP_DOMAIN: "example.org" + LDAP_BACKEND: "hdb" + LDAP_TLS: "true" + LDAP_TLS_ENFORCE: "false" + LDAP_REMOVE_CONFIG_AFTER_SETUP: "true" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-show-vim-and-cluster.sh b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-show-vim-and-cluster.sh new file mode 100755 index 0000000000000000000000000000000000000000..b0f089922927965d21c6b89ef8d8d84f7e6ecf9c --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-show-vim-and-cluster.sh @@ -0,0 +1,21 @@ +#!/bin/bash +echo "========================================================================" +echo "Listing VIM" +echo "========================================================================" +osm vim-list +echo "========================================================================" +echo "Getting details of VIM $OSM_PROJECT" +echo "========================================================================" +osm vim-show $OSM_PROJECT +echo "========================================================================" +echo "Listing K8s clusters" +echo "========================================================================" +osm k8scluster-list +echo "========================================================================" +echo "Getting details of K8s cluster $OSM_PROJECT" +echo "========================================================================" +osm k8scluster-show $OSM_PROJECT +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-test.sh b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-test.sh new file mode 100755 index 0000000000000000000000000000000000000000..06a69ea19434ce1be8606d83cd09d7ced854b543 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-test.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +echo "========================================================================" +echo "Getting IP address of the LDAP server" +echo "========================================================================" +VNF_ID=`osm vnf-list --ns ldap|grep openldap |awk '{print $2}'` +IP_ADDR1=`osm vnf-show ${VNF_ID} --literal | yq e '.kdur[0].services[0].external_ip[0]' -` +PROJECT_ID=`osm project-list | grep $OSM_PROJECT | awk '{ print $4 }'` +IP_ADDR2=`kubectl -n ${PROJECT_ID} get svc|grep stable-openldap |awk '{print $4}'` +LB_IP=${IP_ADDR1} +[ "${LB_IP}" == "null" ] && LB_IP="" +[ -n "${LB_IP}" ] || LB_IP=${IP_ADDR2} +echo $LB_IP +echo "========================================================================" +echo "Testing LDAP server" +echo "========================================================================" +ldapsearch -x -H ldap://${LB_IP}:389 -b dc=example,dc=org -D "cn=admin,dc=example,dc=org" -w osm4u +echo "========================================================================" +echo "Done" +echo "========================================================================" + diff --git a/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-watch-progress.sh b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-watch-progress.sh new file mode 100755 index 0000000000000000000000000000000000000000..b9d5d315b11aa73ef1067ea604c726c8282b419a --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.4-CNF-Helm/openldap-watch-progress.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +watch -n 5 -- 'osm ns-show ldap | grep -i status' + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/build_charms.sh b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/build_charms.sh new file mode 100755 index 0000000000000000000000000000000000000000..d2240560cf132452271a02789536dff137cc18c3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/build_charms.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +CHARM_NAMES="grafana-operator prometheus-operator squid-operator" + +cd charms/ops +for charm in $CHARM_NAMES; do + echo "Building charm $charm" + cd $charm + charmcraft build + mkdir -p ../../$charm + rm -rf ../../$charm/* + mv build/* ../../$charm/ + cd .. +done +cd ../.. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/.flake8 b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..8ef84fcd43f3b7a46768c31b20f36cab48ffdfe0 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/.gitignore b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..7d315ecbda5024f3f81756c91caa6d7256970db0 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/.gitignore @@ -0,0 +1,4 @@ +build +*.charm +.idea +__pycache__ diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/LICENSE b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/README.md b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6f3abb7fe9ce429ce54cc9009e93e1efede56fec --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/README.md @@ -0,0 +1,64 @@ +# Grafana Charm + +## Description + +This is the Grafana charm for Kubernetes using the Operator Framework. + +## Usage + +Initial setup (ensure microk8s is a clean slate with `microk8s.reset` or a fresh install with `snap install microk8s --classic`: +```bash +microk8s.enable dns storage registry dashboard +juju bootstrap microk8s mk8s +juju add-model lma +juju create-storage-pool operator-storage kubernetes storage-class=microk8s-hostpath +``` + +Deploy Grafana on its own: +```bash +git clone git@github.com:canonical/grafana-operator.git +cd grafana-operator +charmcraft build +juju deploy ./grafana.charm --resource grafana-image=grafana/grafana:7.2.1 +``` + +View the dashboard in a browser: +1. `juju status` to check the IP of the of the running Grafana application +2. Navigate to `http://IP_ADDRESS:3000` +3. Log in with the default credentials username=admin, password=admin. + +Add Prometheus as a datasource: +```bash +git clone git@github.com:canonical/prometheus-operator.git +cd prometheus-operator +charmcraft build +juju deploy ./prometheus.charm +juju add-relation grafana prometheus +watch -c juju status --color # wait for things to settle down +``` +> Once the deployed charm and relation settles, you should be able to see Prometheus data propagating to the Grafana dashboard. + +### High Availability Grafana + +This charm is written to support a high-availability Grafana cluster, but a database relation is required (MySQL or Postgresql). + +If HA is not required, there is no need to add a database relation. + +> NOTE: HA should not be considered for production use. + +... + +## Developing + +Create and activate a virtualenv, +and install the development requirements, + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Testing + +Just run `run_tests`: + + ./run_tests diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/config.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c91c65a3567d2fba3572c126f52f9f626c2ef05f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/config.yaml @@ -0,0 +1,11 @@ +options: + port: + description: The port grafana will be listening on + type: int + default: 3000 + grafana_log_level: + type: string + description: | + Logging level for Grafana. Options are “debug”, “info”, + “warn”, “error”, and “critical”. + default: info \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/dispatch b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/dispatch new file mode 100755 index 0000000000000000000000000000000000000000..fe31c0567bdce62a6542a6470997cb6a874e4bd8 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/dispatch @@ -0,0 +1,3 @@ +#!/bin/sh + +JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/hooks/install b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/hooks/install new file mode 120000 index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/hooks/install @@ -0,0 +1 @@ +../dispatch \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/hooks/start b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/hooks/start new file mode 120000 index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/hooks/start @@ -0,0 +1 @@ +../dispatch \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/hooks/upgrade-charm b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/hooks/upgrade-charm new file mode 120000 index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/hooks/upgrade-charm @@ -0,0 +1 @@ +../dispatch \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/icon.svg b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/icon.svg new file mode 100644 index 0000000000000000000000000000000000000000..2ad84eebbd3188fa28bb7f2379b78ce1a0a1933f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/icon.svg @@ -0,0 +1,12 @@ + + + + + + + + + + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/metadata.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1979c9470f38862d1253b9a6ba62a169cfc48022 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/metadata.yaml @@ -0,0 +1,34 @@ +name: grafana +summary: Data visualization and observability with Grafana +maintainers: + - Justin Clark +description: | + Grafana provides dashboards for monitoring data and this + charm is written to allow for HA on Kubernetes and can take + multiple data sources (for example, Prometheus). +tags: + - lma + - grafana + - prometheus + - monitoring + - observability +series: + - kubernetes +provides: + grafana-source: + interface: grafana-datasource + grafana-dashboard: + interface: grafana-dash +requires: + database: + interface: db + limit: 1 +peers: + grafana: + interface: grafana-peers +storage: + sqlitedb: + type: filesystem + location: /var/lib/grafana +deployment: + service: loadbalancer diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/requirements-dev.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/requirements-dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..eded44146a5877d5d81b343988b516c4acaa4573 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/requirements-dev.txt @@ -0,0 +1,2 @@ +-r requirements.txt +flake8 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/requirements.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca625b4c913fa655ee7beb6ab2769131f7b5a21c --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/requirements.txt @@ -0,0 +1,2 @@ +ops +git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/run_tests b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/run_tests new file mode 100755 index 0000000000000000000000000000000000000000..14bb4f4e1b3a9a8ffef0da6da128bbddb8861ce5 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/run_tests @@ -0,0 +1,16 @@ +#!/bin/sh -e +# Copyright 2020 Justin +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH=src +else + export PYTHONPATH="src:$PYTHONPATH" +fi + +flake8 +python3 -m unittest -v "$@" diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/src/charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..1053f8f871535a9eaec0f1f0712ebddd2218f16d --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/src/charm.py @@ -0,0 +1,494 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +import logging +import hashlib +import textwrap + +from oci_image import OCIImageResource, OCIImageResourceError +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.main import main +from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus + +log = logging.getLogger() + + +# These are the required and optional relation data fields +# In other words, when relating to this charm, these are the fields +# that will be processed by this charm. +REQUIRED_DATASOURCE_FIELDS = { + 'private-address', # the hostname/IP of the data source server + 'port', # the port of the data source server + 'source-type', # the data source type (e.g. prometheus) +} + +OPTIONAL_DATASOURCE_FIELDS = { + 'source-name', # a human-readable name of the source +} + +# https://grafana.com/docs/grafana/latest/administration/configuration/#database +REQUIRED_DATABASE_FIELDS = { + 'type', # mysql, postgres or sqlite3 (sqlite3 doesn't work for HA) + 'host', # in the form ':', e.g. 127.0.0.1:3306 + 'name', + 'user', + 'password', +} + +# verify with Grafana documentation to ensure fields have valid values +# as this charm will not directly handle these cases +# TODO: fill with optional fields +OPTIONAL_DATABASE_FIELDS = set() + +VALID_DATABASE_TYPES = {'mysql', 'postgres', 'sqlite3'} + + +def get_container(pod_spec, container_name): + """Find and return the first container in pod_spec whose name is + container_name, otherwise return None.""" + for container in pod_spec['containers']: + if container['name'] == container_name: + return container + raise ValueError("Unable to find container named '{}' in pod spec".format( + container_name)) + + +class GrafanaK8s(CharmBase): + """Charm to run Grafana on Kubernetes. + + This charm allows for high-availability + (as long as a non-sqlite database relation is present). + + Developers of this charm should be aware of the Grafana provisioning docs: + https://grafana.com/docs/grafana/latest/administration/provisioning/ + """ + + datastore = StoredState() + + def __init__(self, *args): + log.debug('Initializing charm.') + super().__init__(*args) + + # -- get image information + self.image = OCIImageResource(self, 'grafana-image') + + # -- standard hooks + self.framework.observe(self.on.config_changed, self.on_config_changed) + self.framework.observe(self.on.update_status, self.on_update_status) + self.framework.observe(self.on.stop, self._on_stop) + + # -- grafana-source relation observations + self.framework.observe(self.on['grafana-source'].relation_changed, + self.on_grafana_source_changed) + self.framework.observe(self.on['grafana-source'].relation_broken, + self.on_grafana_source_broken) + + # -- grafana (peer) relation observations + self.framework.observe(self.on['grafana'].relation_changed, + self.on_peer_changed) + # self.framework.observe(self.on['grafana'].relation_departed, + # self.on_peer_departed) + + # -- database relation observations + self.framework.observe(self.on['database'].relation_changed, + self.on_database_changed) + self.framework.observe(self.on['database'].relation_broken, + self.on_database_broken) + + # -- initialize states -- + self.datastore.set_default(sources=dict()) # available data sources + self.datastore.set_default(source_names=set()) # unique source names + self.datastore.set_default(sources_to_delete=set()) + self.datastore.set_default(database=dict()) # db configuration + + @property + def has_peer(self) -> bool: + rel = self.model.get_relation('grafana') + return len(rel.units) > 0 if rel is not None else False + + @property + def has_db(self) -> bool: + """Only consider a DB connection if we have config info.""" + return len(self.datastore.database) > 0 + + def _on_stop(self, _): + """Go into maintenance state if the unit is stopped.""" + self.unit.status = MaintenanceStatus('Pod is terminating.') + + def on_config_changed(self, _): + self.configure_pod() + + def on_update_status(self, _): + """Various health checks of the charm.""" + self._check_high_availability() + + def on_grafana_source_changed(self, event): + """ Get relation data for Grafana source and set k8s pod spec. + + This event handler (if the unit is the leader) will get data for + an incoming grafana-source relation and make the relation data + is available in the app's datastore object (StoredState). + """ + + # if this unit is the leader, set the required data + # of the grafana-source in this charm's datastore + if not self.unit.is_leader(): + return + + # if there is no available unit, remove data-source info if it exists + if event.unit is None: + log.warning("event unit can't be None when setting data sources.") + return + + # dictionary of all the required/optional datasource field values + # using this as a more generic way of getting data source fields + datasource_fields = \ + {field: event.relation.data[event.unit].get(field) for field in + REQUIRED_DATASOURCE_FIELDS | OPTIONAL_DATASOURCE_FIELDS} + + missing_fields = [field for field + in REQUIRED_DATASOURCE_FIELDS + if datasource_fields.get(field) is None] + # check the relation data for missing required fields + if len(missing_fields) > 0: + log.error("Missing required data fields for grafana-source " + "relation: {}".format(missing_fields)) + self._remove_source_from_datastore(event.relation.id) + return + + # specifically handle optional fields if necessary + # check if source-name was not passed or if we have already saved the provided name + if datasource_fields['source-name'] is None\ + or datasource_fields['source-name'] in self.datastore.source_names: + default_source_name = '{}_{}'.format( + event.app.name, + event.relation.id + ) + log.warning("No name 'grafana-source' or provided name is already in use. " + "Using safe default: {}.".format(default_source_name)) + datasource_fields['source-name'] = default_source_name + + self.datastore.source_names.add(datasource_fields['source-name']) + + # set the first grafana-source as the default (needed for pod config) + # if `self.datastore.sources` is currently empty, this is the first + datasource_fields['isDefault'] = 'false' + if not dict(self.datastore.sources): + datasource_fields['isDefault'] = 'true' + + # add unit name so the source can be removed might be a + # duplicate of 'source-name', but this will guarantee lookup + datasource_fields['unit_name'] = event.unit.name + + # add the new datasource relation data to the current state + new_source_data = { + field: value for field, value in datasource_fields.items() + if value is not None + } + self.datastore.sources.update({event.relation.id: new_source_data}) + self.configure_pod() + + def on_grafana_source_broken(self, event): + """When a grafana-source is removed, delete from the datastore.""" + if self.unit.is_leader(): + self._remove_source_from_datastore(event.relation.id) + self.configure_pod() + + def on_peer_changed(self, _): + # TODO: https://grafana.com/docs/grafana/latest/tutorials/ha_setup/ + # According to these docs ^, as long as we have a DB, HA should + # work out of the box if we are OK with "Sticky Sessions" + # but having "Stateless Sessions" could require more config + + # if the config changed, set a new pod spec + self.configure_pod() + + def on_peer_departed(self, _): + """Sets pod spec with new info.""" + # TODO: setting pod spec shouldn't do anything now, + # but if we ever need to change config based peer units, + # we will want to make sure configure_pod() is called + self.configure_pod() + + def on_database_changed(self, event): + """Sets configuration information for database connection.""" + if not self.unit.is_leader(): + return + + if event.unit is None: + log.warning("event unit can't be None when setting db config.") + return + + # save the necessary configuration of this database connection + database_fields = \ + {field: event.relation.data[event.unit].get(field) for field in + REQUIRED_DATABASE_FIELDS | OPTIONAL_DATABASE_FIELDS} + + # if any required fields are missing, warn the user and return + missing_fields = [field for field + in REQUIRED_DATABASE_FIELDS + if database_fields.get(field) is None] + if len(missing_fields) > 0: + log.error("Missing required data fields for related database " + "relation: {}".format(missing_fields)) + return + + # check if the passed database type is not in VALID_DATABASE_TYPES + if database_fields['type'] not in VALID_DATABASE_TYPES: + log.error('Grafana can only accept databases of the following ' + 'types: {}'.format(VALID_DATABASE_TYPES)) + return + + # add the new database relation data to the datastore + self.datastore.database.update({ + field: value for field, value in database_fields.items() + if value is not None + }) + self.configure_pod() + + def on_database_broken(self, _): + """Removes database connection info from datastore. + + We are guaranteed to only have one DB connection, so clearing + datastore.database is all we need for the change to be propagated + to the pod spec.""" + if not self.unit.is_leader(): + return + + # remove the existing database info from datastore + self.datastore.database = dict() + + # set pod spec because datastore config has changed + self.configure_pod() + + def _remove_source_from_datastore(self, rel_id): + """Remove the grafana-source from the datastore. + + Once removed from the datastore, this datasource will not + part of the next pod spec.""" + log.info('Removing all data for relation: {}'.format(rel_id)) + removed_source = self.datastore.sources.pop(rel_id, None) + if removed_source is None: + log.warning('Could not remove source for relation: {}'.format( + rel_id)) + else: + # free name from charm's set of source names + # and save to set which will be used in set_pod_spec + self.datastore.source_names.remove(removed_source['source-name']) + self.datastore.sources_to_delete.add(removed_source['source-name']) + + def _check_high_availability(self): + """Checks whether the configuration allows for HA.""" + if self.has_peer: + if self.has_db: + log.info('high availability possible.') + status = MaintenanceStatus('Grafana ready for HA.') + else: + log.warning('high availability not possible ' + 'with current configuration.') + status = BlockedStatus('Need database relation for HA.') + else: + log.info('running Grafana on single node.') + status = MaintenanceStatus('Grafana ready on single node.') + + # make sure we don't have a maintenance status overwrite + # a currently active status + if isinstance(status, MaintenanceStatus) \ + and isinstance(self.unit.status, ActiveStatus): + return status + + self.unit.status = status + return status + + def _make_delete_datasources_config_text(self) -> str: + """Generate text of data sources to delete.""" + if not self.datastore.sources_to_delete: + return "\n" + + delete_datasources_text = textwrap.dedent(""" + deleteDatasources:""") + for name in self.datastore.sources_to_delete: + delete_datasources_text += textwrap.dedent(""" + - name: {} + orgId: 1""".format(name)) + + # clear datastore.sources_to_delete and return text result + self.datastore.sources_to_delete.clear() + return delete_datasources_text + '\n\n' + + def _make_data_source_config_text(self) -> str: + """Build config based on Data Sources section of provisioning docs.""" + # get starting text for the config file and sources to delete + delete_text = self._make_delete_datasources_config_text() + config_text = textwrap.dedent(""" + apiVersion: 1 + """) + config_text += delete_text + if self.datastore.sources: + config_text += "datasources:" + for rel_id, source_info in self.datastore.sources.items(): + # TODO: handle more optional fields and verify that current + # defaults are what we want (e.g. "access") + config_text += textwrap.dedent(""" + - name: {0} + type: {1} + access: proxy + url: http://{2}:{3} + isDefault: {4} + editable: true + orgId: 1""").format( + source_info['source-name'], + source_info['source-type'], + source_info['private-address'], + source_info['port'], + source_info['isDefault'], + ) + + # check if there these are empty + return config_text + '\n' + + def _update_pod_data_source_config_file(self, pod_spec): + """Adds datasources to pod configuration.""" + file_text = self._make_data_source_config_text() + data_source_file_meta = { + 'name': 'grafana-datasources', + 'mountPath': '/etc/grafana/provisioning/datasources', + 'files': [{ + 'path': 'datasources.yaml', + 'content': file_text, + }] + } + container = get_container(pod_spec, self.app.name) + container['volumeConfig'].append(data_source_file_meta) + + # get hash string of the new file text and put into container config + # if this changes, it will trigger a pod restart + file_text_hash = hashlib.md5(file_text.encode()).hexdigest() + if 'DATASOURCES_YAML' in container['envConfig'] \ + and container['envConfig']['DATASOURCES_YAML'] != file_text_hash: + log.info('datasources.yaml hash has changed. ' + 'Triggering pod restart.') + container['envConfig']['DATASOURCES_YAML'] = file_text_hash + + def _make_config_ini_text(self): + """Create the text of the config.ini file. + + More information about this can be found in the Grafana docs: + https://grafana.com/docs/grafana/latest/administration/configuration/ + """ + + config_text = textwrap.dedent(""" + [paths] + provisioning = /etc/grafana/provisioning + + [log] + mode = console + level = {0} + """.format( + self.model.config['grafana_log_level'], + )) + + # if there is a database available, add that information + if self.datastore.database: + db_config = self.datastore.database + config_text += textwrap.dedent(""" + [database] + type = {0} + host = {1} + name = {2} + user = {3} + password = {4} + url = {0}://{3}:{4}@{1}/{2}""".format( + db_config['type'], + db_config['host'], + db_config['name'], + db_config['user'], + db_config['password'], + )) + return config_text + + def _update_pod_config_ini_file(self, pod_spec): + file_text = self._make_config_ini_text() + config_ini_file_meta = { + 'name': 'grafana-config-ini', + 'mountPath': '/etc/grafana', + 'files': [{ + 'path': 'grafana.ini', + 'content': file_text + }] + } + container = get_container(pod_spec, self.app.name) + container['volumeConfig'].append(config_ini_file_meta) + + # get hash string of the new file text and put into container config + # if this changes, it will trigger a pod restart + file_text_hash = hashlib.md5(file_text.encode()).hexdigest() + if 'GRAFANA_INI' in container['envConfig'] \ + and container['envConfig']['GRAFANA_INI'] != file_text_hash: + log.info('grafana.ini hash has changed. Triggering pod restart.') + container['envConfig']['GRAFANA_INI'] = file_text_hash + + def _build_pod_spec(self): + """Builds the pod spec based on available info in datastore`.""" + + config = self.model.config + + spec = { + 'version': 3, + 'containers': [{ + 'name': self.app.name, + 'image': "ubuntu/grafana:latest", + 'ports': [{ + 'containerPort': config['port'], + 'protocol': 'TCP' + }], + 'volumeConfig': [], + 'envConfig': {}, # used to store hashes of config file text + 'kubernetes': { + 'readinessProbe': { + 'httpGet': { + 'path': '/api/health', + 'port': config['port'] + }, + 'initialDelaySeconds': 10, + 'timeoutSeconds': 30 + }, + }, + }] + } + + return spec + + def configure_pod(self): + """Set Juju / Kubernetes pod spec built from `_build_pod_spec()`.""" + + # check for valid high availability (or single node) configuration + self._check_high_availability() + + # in the case where we have peers but no DB connection, + # don't set the pod spec until it is resolved + if self.unit.status == BlockedStatus('Need database relation for HA.'): + log.error('Application is in a blocked state. ' + 'Please resolve before pod spec can be set.') + return + + if not self.unit.is_leader(): + self.unit.status = ActiveStatus() + return + + # general pod spec component updates + self.unit.status = MaintenanceStatus('Building pod spec.') + pod_spec = self._build_pod_spec() + if not pod_spec: + return + self._update_pod_data_source_config_file(pod_spec) + self._update_pod_config_ini_file(pod_spec) + + # set the pod spec with Juju + self.model.pod.set_spec(pod_spec) + self.unit.status = ActiveStatus() + + +if __name__ == '__main__': + main(GrafanaK8s) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/tests/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/tests/test_charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/tests/test_charm.py new file mode 100644 index 0000000000000000000000000000000000000000..e6b87e4151bf4ef5e87674bbd914adc12b49fd6a --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/tests/test_charm.py @@ -0,0 +1,490 @@ +import hashlib +import textwrap +import unittest + +from ops.testing import Harness +from ops.model import ( + TooManyRelatedAppsError, + ActiveStatus, +) +from charm import ( + GrafanaK8s, + MaintenanceStatus, + BlockedStatus, + get_container, +) + +BASE_CONFIG = { + 'port': 3000, + 'grafana_log_level': 'info', +} + + +class GrafanaCharmTest(unittest.TestCase): + + def setUp(self) -> None: + self.harness = Harness(GrafanaK8s) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + self.harness.add_oci_resource('grafana-image') + + def test__grafana_source_data(self): + + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.sources, {}) + + rel_id = self.harness.add_relation('grafana-source', 'prometheus') + self.harness.add_relation_unit(rel_id, 'prometheus/0') + self.assertIsInstance(rel_id, int) + + # test that the unit data propagates the correct way + # which is through the triggering of on_relation_changed + self.harness.update_relation_data(rel_id, + 'prometheus/0', + { + 'private-address': '192.0.2.1', + 'port': 1234, + 'source-type': 'prometheus', + 'source-name': 'prometheus-app', + }) + + expected_first_source_data = { + 'private-address': '192.0.2.1', + 'port': 1234, + 'source-name': 'prometheus-app', + 'source-type': 'prometheus', + 'isDefault': 'true', + 'unit_name': 'prometheus/0' + } + self.assertEqual(expected_first_source_data, + dict(self.harness.charm.datastore.sources[rel_id])) + + # test that clearing the relation data leads to + # the datastore for this data source being cleared + self.harness.update_relation_data(rel_id, + 'prometheus/0', + { + 'private-address': None, + 'port': None, + }) + self.assertEqual(None, self.harness.charm.datastore.sources.get(rel_id)) + + def test__ha_database_and_status_check(self): + """If there is a peer connection and no database (needed for HA), + the charm should put the application in a blocked state.""" + + # start charm with one peer and no database relation + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.unit.status, + ActiveStatus()) + + # ensure _check_high_availability() ends up with the correct status + status = self.harness.charm._check_high_availability() + self.assertEqual(status, MaintenanceStatus('Grafana ready on single node.')) + + # make sure that triggering 'update-status' hook does not + # overwrite the current active status + self.harness.charm.on.update_status.emit() + self.assertEqual(self.harness.charm.unit.status, + ActiveStatus()) + + peer_rel_id = self.harness.add_relation('grafana', 'grafana') + + # add main unit and its data + # self.harness.add_relation_unit(peer_rel_id, 'grafana/0') + # will trigger the grafana-changed hook + self.harness.update_relation_data(peer_rel_id, + 'grafana/0', + {'private-address': '10.1.2.3'}) + + # add peer unit and its data + self.harness.add_relation_unit(peer_rel_id, 'grafana/1') + self.harness.update_relation_data(peer_rel_id, + 'grafana/1', + {'private-address': '10.0.0.1'}) + + self.assertTrue(self.harness.charm.has_peer) + self.assertFalse(self.harness.charm.has_db) + self.assertEqual( + self.harness.charm.unit.status, + BlockedStatus('Need database relation for HA.') + ) + + # ensure update-status hook doesn't overwrite this + self.harness.charm.on.update_status.emit() + self.assertEqual(self.harness.charm.unit.status, + BlockedStatus('Need database relation for HA.')) + + # now add the database connection and the model should + # not have a blocked status + db_rel_id = self.harness.add_relation('database', 'mysql') + self.harness.add_relation_unit(db_rel_id, 'mysql/0') + self.harness.update_relation_data(db_rel_id, + 'mysql/0', + { + 'type': 'mysql', + 'host': '10.10.10.10:3306', + 'name': 'test_mysql_db', + 'user': 'test-admin', + 'password': 'super!secret!password', + }) + self.assertTrue(self.harness.charm.has_db) + self.assertEqual(self.harness.charm.unit.status, ActiveStatus()) + + # ensure _check_high_availability() ends up with the correct status + status = self.harness.charm._check_high_availability() + self.assertEqual(status, MaintenanceStatus('Grafana ready for HA.')) + + def test__database_relation_data(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.database, {}) + + # add relation and update relation data + rel_id = self.harness.add_relation('database', 'mysql') + rel = self.harness.model.get_relation('database') + self.harness.add_relation_unit(rel_id, 'mysql/0') + test_relation_data = { + 'type': 'mysql', + 'host': '0.1.2.3:3306', + 'name': 'my-test-db', + 'user': 'test-user', + 'password': 'super!secret!password', + } + self.harness.update_relation_data(rel_id, + 'mysql/0', + test_relation_data) + # check that charm datastore was properly set + self.assertEqual(dict(self.harness.charm.datastore.database), + test_relation_data) + + # now depart this relation and ensure the datastore is emptied + self.harness.charm.on.database_relation_broken.emit(rel) + self.assertEqual({}, dict(self.harness.charm.datastore.database)) + + def test__multiple_database_relation_handling(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.database, {}) + + # add first database relation + self.harness.add_relation('database', 'mysql') + + # add second database relation -- should fail here + with self.assertRaises(TooManyRelatedAppsError): + self.harness.add_relation('database', 'mysql') + self.harness.charm.model.get_relation('database') + + def test__multiple_source_relations(self): + """This will test data-source config text with multiple sources. + + Specifically, it will test multiple grafana-source relations.""" + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.sources, {}) + + # add first relation + rel_id0 = self.harness.add_relation('grafana-source', 'prometheus') + self.harness.add_relation_unit(rel_id0, 'prometheus/0') + + # add test data to grafana-source relation + # and test that _make_data_source_config_text() works as expected + prom_source_data = { + 'private-address': '192.0.2.1', + 'port': 4321, + 'source-type': 'prometheus' + } + self.harness.update_relation_data(rel_id0, 'prometheus/0', prom_source_data) + header_text = textwrap.dedent(""" + apiVersion: 1 + + datasources:""") + correct_config_text0 = header_text + textwrap.dedent(""" + - name: prometheus_0 + type: prometheus + access: proxy + url: http://192.0.2.1:4321 + isDefault: true + editable: true + orgId: 1""") + + generated_text = self.harness.charm._make_data_source_config_text() + self.assertEqual(correct_config_text0 + '\n', generated_text) + + # add another source relation and check the resulting config text + jaeger_source_data = { + 'private-address': '255.255.255.0', + 'port': 7890, + 'source-type': 'jaeger', + 'source-name': 'jaeger-application' + } + rel_id1 = self.harness.add_relation('grafana-source', 'jaeger') + self.harness.add_relation_unit(rel_id1, 'jaeger/0') + self.harness.update_relation_data(rel_id1, 'jaeger/0', jaeger_source_data) + + correct_config_text1 = correct_config_text0 + textwrap.dedent(""" + - name: jaeger-application + type: jaeger + access: proxy + url: http://255.255.255.0:7890 + isDefault: false + editable: true + orgId: 1""") + + generated_text = self.harness.charm._make_data_source_config_text() + self.assertEqual(correct_config_text1 + '\n', generated_text) + + # test removal of second source results in config_text + # that is the same as the original + self.harness.update_relation_data(rel_id1, + 'jaeger/0', + { + 'private-address': None, + 'port': None, + }) + generated_text = self.harness.charm._make_data_source_config_text() + correct_text_after_removal = textwrap.dedent(""" + apiVersion: 1 + + deleteDatasources: + - name: jaeger-application + orgId: 1 + + datasources: + - name: prometheus_0 + type: prometheus + access: proxy + url: http://192.0.2.1:4321 + isDefault: true + editable: true + orgId: 1""") + + self.assertEqual(correct_text_after_removal + '\n', generated_text) + + # now test that the 'deleteDatasources' is gone + generated_text = self.harness.charm._make_data_source_config_text() + self.assertEqual(correct_config_text0 + '\n', generated_text) + + def test__pod_spec_container_datasources(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.sources, {}) + + # add first relation + rel_id = self.harness.add_relation('grafana-source', 'prometheus') + self.harness.add_relation_unit(rel_id, 'prometheus/0') + + # add test data to grafana-source relation + # and test that _make_data_source_config_text() works as expected + prom_source_data = { + 'private-address': '192.0.2.1', + 'port': 4321, + 'source-type': 'prometheus' + } + self.harness.update_relation_data(rel_id, 'prometheus/0', prom_source_data) + + data_source_file_text = textwrap.dedent(""" + apiVersion: 1 + + datasources: + - name: prometheus_0 + type: prometheus + access: proxy + url: http://192.0.2.1:4321 + isDefault: true + editable: true + orgId: 1 + """) + + config_ini_file_text = textwrap.dedent(""" + [paths] + provisioning = /etc/grafana/provisioning + + [log] + mode = console + level = {0} + """).format( + self.harness.model.config['grafana_log_level'], + ) + + expected_container_files_spec = [ + { + 'name': 'grafana-datasources', + 'mountPath': '/etc/grafana/provisioning/datasources', + 'files': [{ + 'path': 'datasources.yaml', + 'content': data_source_file_text, + }], + }, + { + 'name': 'grafana-config-ini', + 'mountPath': '/etc/grafana', + 'files': [{ + 'path': 'grafana.ini', + 'content': config_ini_file_text, + }] + } + ] + pod_spec, _ = self.harness.get_pod_spec() + container = get_container(pod_spec, 'grafana') + actual_container_files_spec = container['volumeConfig'] + self.assertEqual(expected_container_files_spec, + actual_container_files_spec) + + def test__access_sqlite_storage_location(self): + expected_path = '/var/lib/grafana' + actual_path = self.harness.charm.meta.storages['sqlitedb'].location + self.assertEqual(expected_path, actual_path) + + def test__config_ini_without_database(self): + self.harness.update_config(BASE_CONFIG) + expected_config_text = textwrap.dedent(""" + [paths] + provisioning = /etc/grafana/provisioning + + [log] + mode = console + level = {0} + """).format( + self.harness.model.config['grafana_log_level'], + ) + + actual_config_text = self.harness.charm._make_config_ini_text() + self.assertEqual(expected_config_text, actual_config_text) + + def test__config_ini_with_database(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + + # add database relation and update relation data + rel_id = self.harness.add_relation('database', 'mysql') + self.harness.add_relation_unit(rel_id, 'mysql/0') + test_relation_data = { + 'type': 'mysql', + 'host': '0.1.2.3:3306', + 'name': 'my-test-db', + 'user': 'test-user', + 'password': 'super!secret!password', + } + self.harness.update_relation_data(rel_id, + 'mysql/0', + test_relation_data) + + # test the results of _make_config_ini_text() + expected_config_text = textwrap.dedent(""" + [paths] + provisioning = /etc/grafana/provisioning + + [log] + mode = console + level = {0} + + [database] + type = mysql + host = 0.1.2.3:3306 + name = my-test-db + user = test-user + password = super!secret!password + url = mysql://test-user:super!secret!password@0.1.2.3:3306/my-test-db""").format( + self.harness.model.config['grafana_log_level'], + ) + + actual_config_text = self.harness.charm._make_config_ini_text() + self.assertEqual(expected_config_text, actual_config_text) + + def test__duplicate_source_names(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.sources, {}) + + # add first relation + p_rel_id = self.harness.add_relation('grafana-source', 'prometheus') + p_rel = self.harness.model.get_relation('grafana-source', p_rel_id) + self.harness.add_relation_unit(p_rel_id, 'prometheus/0') + + # add test data to grafana-source relation + prom_source_data0 = { + 'private-address': '192.0.2.1', + 'port': 4321, + 'source-type': 'prometheus', + 'source-name': 'duplicate-source-name' + } + self.harness.update_relation_data(p_rel_id, 'prometheus/0', prom_source_data0) + expected_prom_source_data = { + 'private-address': '192.0.2.1', + 'port': 4321, + 'source-name': 'duplicate-source-name', + 'source-type': 'prometheus', + 'isDefault': 'true', + 'unit_name': 'prometheus/0' + } + self.assertEqual(dict(self.harness.charm.datastore.sources[p_rel_id]), + expected_prom_source_data) + + # add second source with the same name as the first source + g_rel_id = self.harness.add_relation('grafana-source', 'graphite') + g_rel = self.harness.model.get_relation('grafana-source', g_rel_id) + self.harness.add_relation_unit(g_rel_id, 'graphite/0') + + graphite_source_data0 = { + 'private-address': '192.12.23.34', + 'port': 4321, + 'source-type': 'graphite', + 'source-name': 'duplicate-source-name' + } + expected_graphite_source_data = { + 'isDefault': 'false', + 'port': 4321, + 'private-address': '192.12.23.34', + 'source-name': 'graphite_1', + 'source-type': 'graphite', + 'unit_name': 'graphite/0' + } + self.harness.update_relation_data(g_rel_id, 'graphite/0', graphite_source_data0) + self.assertEqual( + expected_graphite_source_data, + dict(self.harness.charm.datastore.sources.get(g_rel_id)) + ) + self.assertEqual(2, len(self.harness.charm.datastore.sources)) + + # now remove the relation and ensure datastore source-name is removed + self.harness.charm.on.grafana_source_relation_broken.emit(p_rel) + self.assertEqual(None, self.harness.charm.datastore.sources.get(p_rel_id)) + self.assertEqual(1, len(self.harness.charm.datastore.sources)) + + # remove graphite relation + self.harness.charm.on.grafana_source_relation_broken.emit(g_rel) + self.assertEqual(None, self.harness.charm.datastore.sources.get(g_rel_id)) + self.assertEqual(0, len(self.harness.charm.datastore.sources)) + + def test__idempotent_datasource_file_hash(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + + rel_id = self.harness.add_relation('grafana-source', 'prometheus') + self.harness.add_relation_unit(rel_id, 'prometheus/0') + self.assertIsInstance(rel_id, int) + + # test that the unit data propagates the correct way + # which is through the triggering of on_relation_changed + self.harness.update_relation_data(rel_id, + 'prometheus/0', + { + 'private-address': '192.0.2.1', + 'port': 1234, + 'source-type': 'prometheus', + 'source-name': 'prometheus-app', + }) + + # get a hash of the created file and check that it matches the pod spec + pod_spec, _ = self.harness.get_pod_spec() + container = get_container(pod_spec, 'grafana') + hash_text = hashlib.md5( + container['volumeConfig'][0]['files'][0]['content'].encode()).hexdigest() + self.assertEqual(container['envConfig']['DATASOURCES_YAML'], hash_text) + + # test the idempotence of the call by re-configuring the pod spec + self.harness.charm.configure_pod() + self.assertEqual(container['envConfig']['DATASOURCES_YAML'], hash_text) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/INSTALLER b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/LICENSE b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2f1b8e15e5627d92f0521605c9870bc8e5505cb4 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/METADATA b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..565f05b73714eb85d96beb669a1aa42920c21c3a --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/METADATA @@ -0,0 +1,46 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 5.4.1 +Summary: YAML parser and emitter for Python +Home-page: https://pyyaml.org/ +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Download-URL: https://pypi.org/project/PyYAML/ +Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues +Project-URL: CI, https://github.com/yaml/pyyaml/actions +Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation +Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core +Project-URL: Source Code, https://github.com/yaml/pyyaml +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/RECORD b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..68ca4da2c4df950a57ff47f60fbcacbb5256d161 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/RECORD @@ -0,0 +1,43 @@ +PyYAML-5.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyYAML-5.4.1.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101 +PyYAML-5.4.1.dist-info/METADATA,sha256=XnrM5LY-uS85ica26gKUK0dGG-xmPjmGfDTSLpIHQFk,2087 +PyYAML-5.4.1.dist-info/RECORD,, +PyYAML-5.4.1.dist-info/WHEEL,sha256=Dh4w5P6PPWbqyqoE6MHlzbFQwZXlM-voWJDf2WUsS2g,108 +PyYAML-5.4.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402 +_yaml/__pycache__/__init__.cpython-38.pyc,, +yaml/__init__.py,sha256=gfp2CbRVhzknghkiiJD2l6Z0pI-mv_iZHPSJ4aj0-nY,13170 +yaml/__pycache__/__init__.cpython-38.pyc,sha256=n0YyVkjiOLmcjlR2NXE5TIZf7Z2clZ6sqQ5KlyuTWSI,11845 +yaml/__pycache__/composer.cpython-38.pyc,sha256=OVPzAKAvC2-Tiv4HUwUUG9JHCzG17nvsRQcFTCtY9xs,3563 +yaml/__pycache__/constructor.cpython-38.pyc,sha256=EXPDY7Prtv3F6QbXiJc5F4BvJQyCCGRu83WF4u6X7Vo,20822 +yaml/__pycache__/cyaml.cpython-38.pyc,sha256=wI01UFU-WhUcdnnczL5QpKu0ZNQTttSzXbleIvIfcvM,3411 +yaml/__pycache__/dumper.cpython-38.pyc,sha256=9wIctrlMpF4ksMWuCc5QAyZSenGiRVyrtU-1pAfj54U,1823 +yaml/__pycache__/emitter.cpython-38.pyc,sha256=kd_QGJd0GjpfgQPN9DlG_7HwKfJnJ24JxtdiUOxM9iE,25353 +yaml/__pycache__/error.cpython-38.pyc,sha256=j6mkXgDmzV0y0lo6FeUrvZL2vHN6Vkc52k0_R0oOn6g,2300 +yaml/__pycache__/events.cpython-38.pyc,sha256=NFsoAO36pPL_uxoCO-xRxKndQ3vx47mkStOYjfoQVZ8,3974 +yaml/__pycache__/loader.cpython-38.pyc,sha256=lEMB2brjPrfMjXXTJpCEx6-ct4eI6LYovD4hW5ZuGsw,2164 +yaml/__pycache__/nodes.cpython-38.pyc,sha256=Kkxh_oL04gQg-YFWwnfjpIoYspsXO4GEqKTr3NbxOD8,1725 +yaml/__pycache__/parser.cpython-38.pyc,sha256=0R9Qx0cBMUoOLzMOWeXCyXsC4S4KJ7oPHdmTVPQ4FbQ,11924 +yaml/__pycache__/reader.cpython-38.pyc,sha256=ZpOMJ6rZDc8EWffI4vZR_Fhcu3WmhgT_GAkDrKkEtPo,4537 +yaml/__pycache__/representer.cpython-38.pyc,sha256=tR9wWffCThWXwQe47uYFdHg2bCkqNjBcwmG7RSHmWS4,10069 +yaml/__pycache__/resolver.cpython-38.pyc,sha256=zsLBuCKn8KAJPVGo5J_xZSytifJktdTtkUNnltOt__I,5498 +yaml/__pycache__/scanner.cpython-38.pyc,sha256=N8ubxRd6bZBjoRna6CU8wK1Imb_7TWOsudzPh9JDDkQ,25269 +yaml/__pycache__/serializer.cpython-38.pyc,sha256=9JDH7ONP5zFlep0f2yNWRoOSZr5Y28jL012O1EIbuug,3320 +yaml/__pycache__/tokens.cpython-38.pyc,sha256=haBW6UBDhVFog2xIe63OkrAP_9JRFyNKCROFPRJiyu0,4935 +yaml/_yaml.cpython-38-x86_64-linux-gnu.so,sha256=fxjEXaSdzion1SMwhu9Ikx-JOVNtcl6KvW_pyGBt-cU,2342916 +yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883 +yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639 +yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851 +yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837 +yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006 +yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533 +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061 +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495 +yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794 +yaml/representer.py,sha256=82UM3ZxUQKqsKAF4ltWOxCS6jGPIFtXpGs7mvqyv4Xs,14184 +yaml/resolver.py,sha256=Z1W8AOMA6Proy4gIO2OhUO4IPS_bFNAl0Ca3rwChpPg,8999 +yaml/scanner.py,sha256=KeQIKGNlSyPE8QDwionHxy9CgbqE5teJEz05FR9-nAg,51277 +yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165 +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/WHEEL b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..69d594f055a5127401ebe017f8837cef4c76c020 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: false +Tag: cp38-cp38-manylinux1_x86_64 + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/top_level.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6475e911f628412049bc4090d86f23ac403adde --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/PyYAML-5.4.1.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/_yaml/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/_yaml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7baa8c4b68127d5cdf0be9a799429e61347c2694 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/INSTALLER b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/METADATA b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..b21b997f84c57165c6e89728ebe53da0b518e944 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/METADATA @@ -0,0 +1,63 @@ +Metadata-Version: 2.1 +Name: oci-image +Version: 1.0.0 +Summary: Helper for dealing with OCI Image resources in the charm operator framework +Home-page: https://github.com/juju-solutions/resource-oci-image +Author: Cory Johns +Author-email: johnsca@gmail.com +License: Apache License 2.0 +Platform: UNKNOWN + +# OCI Image Resource helper + +This is a helper for working with OCI image resources in the charm operator +framework. + +## Installation + +Add it to your `requirements.txt`. Since it's not in PyPI, you'll need to use +the GitHub archive URL (or `git+` URL, if you want to pin to a specific commit): + +``` +https://github.com/juju-solutions/resource-oci-image/archive/master.zip +``` + +## Usage + +The `OCIImageResource` class will wrap the framework resource for the given +resource name, and calling `fetch` on it will either return the image info +or raise an `OCIImageResourceError` if it can't fetch or parse the image +info. The exception will have a `status` attribute you can use directly, +or a `status_message` attribute if you just want that. + +Example usage: + +```python +from ops.charm import CharmBase +from ops.main import main +from oci_image import OCIImageResource, OCIImageResourceError + +class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.image = OCIImageResource(self, 'resource-name') + self.framework.observe(self.on.start, self.on_start) + + def on_start(self, event): + try: + image_info = self.image.fetch() + except OCIImageResourceError as e: + self.model.unit.status = e.status + event.defer() + return + + self.model.pod.set_spec({'containers': [{ + 'name': 'my-charm', + 'imageDetails': image_info, + }]}) + +if __name__ == "__main__": + main(MyCharm) +``` + + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/RECORD b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..62c9a4d205c5f5c2ff6cda8bed53f0660d1659a9 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/RECORD @@ -0,0 +1,7 @@ +__pycache__/oci_image.cpython-38.pyc,, +oci_image-1.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +oci_image-1.0.0.dist-info/METADATA,sha256=QIpPa4JcSPa_Ci0n-DaCNp4PkKovZudFW8FnpnauJnQ,1808 +oci_image-1.0.0.dist-info/RECORD,, +oci_image-1.0.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92 +oci_image-1.0.0.dist-info/top_level.txt,sha256=M4dLaObLx7irI4EO-A4_VJP_b-A6dDD7hB5QyVKdHOY,10 +oci_image.py,sha256=c75VR2vSmOp9pPTP2cnsxo23CqhhFbRtnIOtMjzDyXY,1794 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/WHEEL b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..385faab0525ccdbfd1070a8bebcca3ac8617236e --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/top_level.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..cd6962384eaf5e60f5976c60d221b84ba5561a1d --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image-1.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +oci_image diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image.py new file mode 100644 index 0000000000000000000000000000000000000000..f4d3818f47c3bde81c97dd43a702e2aa4d0dde7f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/oci_image.py @@ -0,0 +1,53 @@ +from pathlib import Path + +import yaml +from ops.framework import Object +from ops.model import BlockedStatus, ModelError + + +class OCIImageResource(Object): + def __init__(self, charm, resource_name): + super().__init__(charm, resource_name) + self.resource_name = resource_name + + def fetch(self): + try: + resource_path = self.model.resources.fetch(self.resource_name) + except ModelError as e: + raise MissingResourceError(self.resource_name) from e + if not resource_path.exists(): + raise MissingResourceError(self.resource_name) + resource_text = Path(resource_path).read_text() + if not resource_text: + raise MissingResourceError(self.resource_name) + try: + resource_data = yaml.safe_load(resource_text) + except yaml.YAMLError as e: + raise InvalidResourceError(self.resource_name) from e + else: + # Translate the data from the format used by the charm store to the + # format used by the Juju K8s pod spec, since that is how this is + # typically used. + return { + 'imagePath': resource_data['registrypath'], + 'username': resource_data['username'], + 'password': resource_data['password'], + } + + +class OCIImageResourceError(ModelError): + status_type = BlockedStatus + status_message = 'Resource error' + + def __init__(self, resource_name): + super().__init__(resource_name) + self.status = self.status_type( + f'{self.status_message}: {resource_name}') + + +class MissingResourceError(OCIImageResourceError): + status_message = 'Missing resource' + + +class InvalidResourceError(OCIImageResourceError): + status_message = 'Invalid resource' diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/INSTALLER b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/LICENSE.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/METADATA b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..cd45af9374c8209252a546c5a4c5a4221c8fb8af --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/METADATA @@ -0,0 +1,263 @@ +Metadata-Version: 2.1 +Name: ops +Version: 1.1.0 +Summary: The Python library behind great charms +Home-page: https://github.com/canonical/operator +Author: The Charmcraft team at Canonical Ltd. +Author-email: charmcraft@lists.launchpad.net +License: Apache-2.0 +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3.5 +Description-Content-Type: text/markdown +Requires-Dist: PyYAML + +# The Operator Framework + +This Operator Framework simplifies [Kubernetes +operator](https://charmhub.io/about) development for +[model-driven application +management](https://juju.is/model-driven-operations). + +A Kubernetes operator is a container that drives lifecycle management, +configuration, integration and daily actions for an application. +Operators simplify software management and operations. They capture +reusable app domain knowledge from experts in a software component that +can be shared. + +This project extends the operator pattern to enable +[universal operators](https://juju.is/universal-operators), not just +for Kubernetes but also operators for traditional Linux or Windows +application management. + +Operators use an [Operator Lifecycle Manager +(OLM)](https://juju.is/operator-lifecycle-manager) to coordinate their +work in a cluster. The system uses Golang for concurrent event +processing under the hood, but enables the operators to be written in +Python. + +## Simple, composable operators + +Operators should 'do one thing and do it well'. Each operator drives a +single microservice and can be [composed with other +operators](https://juju.is/integration) to deliver a complex application. + +It is better to have small, reusable operators that each drive a single +microservice very well. The operator handles instantiation, scaling, +configuration, optimisation, networking, service mesh, observability, +and day-2 operations specific to that microservice. + +Operator composition takes place through declarative integration in +the OLM. Operators declare integration endpoints, and discover lines of +integration between those endpoints dynamically at runtime. + +## Pure Python operators + +The framework provides a standard Python library and object model that +represents the application graph, and an event distribution mechanism for +distributed system coordination and communication. + +The OLM is written in Golang for efficient concurrency in event handling +and distribution. Operators can be written in any language. We recommend +this Python framework for ease of design, development and collaboration. + +## Better collaboration + +Operator developers publish Python libraries that make it easy to integrate +your operator with their operator. The framework includes standard tools +to distribute these integration libraries and keep them up to date. + +Development collaboration happens at [Charmhub.io](https://charmhub.io/) where +operators are published along with integration libraries. Design and +code review discussions are hosted in the +[Charmhub forum](https://discourse.charmhub.io/). We recommend the +[Open Operator Manifesto](https://charmhub.io/manifesto) as a guideline for +high quality operator engineering. + +## Event serialization and operator services + +Distributed systems can be hard! So this framework exists to make it much +simpler to reason about operator behaviour, especially in complex deployments. +The OLM provides [operator services](https://juju.is/operator-services) such +as provisioning, event delivery, leader election and model management. + +Coordination between operators is provided by a cluster-wide event +distribution system. Events are serialized to avoid race conditions in any +given container or machine. This greatly simplifies the development of +operators for high availability, scale-out and integrated applications. + +## Model-driven Operator Lifecycle Manager + +A key goal of the project is to improve the user experience for admins +working with multiple different operators. + +We embrace [model-driven operations](https://juju.is/model-driven-operations) +in the Operator Lifecycle Manager. The model encompasses capacity, +storage, networking, the application graph and administrative access. + +Admins describe the application graph of integrated microservices, and +the OLM then drives instantiation. A change in the model is propagated +to all affected operators, reducing the duplication of effort and +repetition normally found in operating a complex topology of services. + +Administrative actions, updates, configuration and integration are all +driven through the OLM. + +# Getting started + +A package of operator code is called a charm. You will use `charmcraft` +to register your operator name, and publish it when you are ready. + +``` +$ sudo snap install charmcraft --beta +charmcraft (beta) 0.6.0 from John Lenton (chipaca) installed +``` + +Charms written using the operator framework are just Python code. The goal +is to feel natural for somebody used to coding in Python, and reasonably +easy to learn for somebody who is not a pythonista. + +The dependencies of the operator framework are kept as minimal as possible; +currently that's Python 3.5 or greater, and `PyYAML` (both are included by +default in Ubuntu's cloud images from 16.04 on). + +# A quick introduction + +Make an empty directory `my-charm` and cd into it. Then start a new charm +with: + +``` +$ charmcraft init +All done. +There are some notes about things we think you should do. +These are marked with ‘TODO:’, as is customary. Namely: + README.md: fill out the description + README.md: explain how to use the charm + metadata.yaml: fill out the charm's description + metadata.yaml: fill out the charm's summary +``` + +Charmed operators are just Python code. The entry point to your charm can +be any filename, by default this is `src/charm.py` which must be executable +(and probably have `#!/usr/bin/env python3` on the first line). + +You need a `metadata.yaml` to describe your charm, and if you will support +configuration of your charm then `config.yaml` files is required too. The +`requirements.txt` specifies any Python dependencies. + +``` +$ tree my-charm/ +my-charm/ +├── actions.yaml +├── config.yaml +├── LICENSE +├── metadata.yaml +├── README.md +├── requirements-dev.txt +├── requirements.txt +├── run_tests +├── src +│   └── charm.py +├── tests +│   ├── __init__.py +│   └── my_charm.py +``` + +`src/charm.py` here is the entry point to your charm code. At a minimum, it +needs to define a subclass of `CharmBase` and pass that into the framework +`main` function: + +```python +from ops.charm import CharmBase +from ops.main import main + +class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.start, self.on_start) + + def on_start(self, event): + # Handle the start event here. + +if __name__ == "__main__": + main(MyCharm) +``` + +That should be enough for you to be able to run + +``` +$ charmcraft build +Done, charm left in 'my-charm.charm' +$ juju deploy ./my-charm.charm +``` + +> 🛈 More information on [`charmcraft`](https://pypi.org/project/charmcraft/) can +> also be found on its [github page](https://github.com/canonical/charmcraft). + +Happy charming! + +# Testing your charms + +The operator framework provides a testing harness, so you can check your +charm does the right thing in different scenarios, without having to create +a full deployment. `pydoc3 ops.testing` has the details, including this +example: + +```python +harness = Harness(MyCharm) +# Do initial setup here +relation_id = harness.add_relation('db', 'postgresql') +# Now instantiate the charm to see events as the model changes +harness.begin() +harness.add_relation_unit(relation_id, 'postgresql/0') +harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) +# Check that charm has properly handled the relation_joined event for postgresql/0 +self.assertEqual(harness.charm. ...) +``` + +## Talk to us + +If you need help, have ideas, or would just like to chat with us, reach out on +IRC: we're in [#smooth-operator] on freenode (or try the [webchat]). + +We also pay attention to [Charmhub discourse](https://discourse.charmhub.io/) + +You can also deep dive into the [API docs] if that's your thing. + +[webchat]: https://webchat.freenode.net/#smooth-operator +[#smooth-operator]: irc://chat.freenode.net/%23smooth-operator +[discourse]: https://discourse.juju.is/c/charming +[API docs]: https://ops.rtfd.io/ + +## Operator Framework development + +To work in the framework itself you will need Python >= 3.5 and the +dependencies in `requirements-dev.txt` installed in your system, or a +virtualenv: + + virtualenv --python=python3 env + source env/bin/activate + pip install -r requirements-dev.txt + +Then you can try `./run_tests`, it should all go green. + +For improved performance on the tests, ensure that you have PyYAML +installed with the correct extensions: + + apt-get install libyaml-dev + pip install --force-reinstall --no-cache-dir pyyaml + +If you want to build the documentation you'll need the requirements from +`docs/requirements.txt`, or in your virtualenv + + pip install -r docs/requirements.txt + +and then you can run `./build_docs`. + + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/RECORD b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..35eb15f5359c1b967188c151b20fdcbc2d251c27 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/RECORD @@ -0,0 +1,28 @@ +ops-1.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +ops-1.1.0.dist-info/LICENSE.txt,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +ops-1.1.0.dist-info/METADATA,sha256=ffVuqPnEob6-iBYjEf3lPShSbToJL17obFFufoW2F4g,9485 +ops-1.1.0.dist-info/RECORD,, +ops-1.1.0.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92 +ops-1.1.0.dist-info/top_level.txt,sha256=enC05wWafSg8iDKIvj3gvtAtEP2kYCyN5Gmd689q-_I,4 +ops/__init__.py,sha256=WaHb0dfp1KEe6jFV8Pm_mcdJ3ModiWujnQ6xLjNzPNQ,819 +ops/__pycache__/__init__.cpython-38.pyc,, +ops/__pycache__/charm.cpython-38.pyc,, +ops/__pycache__/framework.cpython-38.pyc,, +ops/__pycache__/jujuversion.cpython-38.pyc,, +ops/__pycache__/log.cpython-38.pyc,, +ops/__pycache__/main.cpython-38.pyc,, +ops/__pycache__/model.cpython-38.pyc,, +ops/__pycache__/storage.cpython-38.pyc,, +ops/__pycache__/testing.cpython-38.pyc,, +ops/__pycache__/version.cpython-38.pyc,, +ops/charm.py,sha256=7KyaNNA0t_a0h0hrzehSEWm4xU_Y5JIqGWHTg747qfU,32817 +ops/framework.py,sha256=1ByOtFKRR6kRzOEbfWnGEMNevixOYf18U0oZxKq8LsA,43769 +ops/jujuversion.py,sha256=9wMlUmngcAENV9RkgVVLWtZsyRQaf6XNrQQqUeY_fHA,4139 +ops/lib/__init__.py,sha256=QizPpuRWXjqbH5Gv7mnH8CcPR9BX7q2YNFnxyoSsA0g,9213 +ops/lib/__pycache__/__init__.cpython-38.pyc,, +ops/log.py,sha256=JVpt_Vkf_lWO2cucUcJfXjAWVTattk4xBscSs65Sn3I,2155 +ops/main.py,sha256=BUJZM4soFpsY4bO6zJ1bSHQeWJcm028gq0MhJT3rC8M,15523 +ops/model.py,sha256=yvM1yhidNyGpVdxkG365jPJRhQuE42EiiojBHJ7tL3c,47930 +ops/storage.py,sha256=jEfszzQGYDrl5wa03I6txvea-7lI661Yq6n7sIPa0fU,14192 +ops/testing.py,sha256=sH8PoNzGmfPdVWM1lBjStxHcNfQHsasFjF-WzHfDhFA,34898 +ops/version.py,sha256=UuaLFU_UN-InNFu4I23Y22huxQdbsOgTQ_d_r623fx4,46 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/WHEEL b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..b552003ff90e66227ec90d1b159324f140d46001 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/top_level.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d81d3bb6fea804d1db7a1549d67244b513aa145 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops-1.1.0.dist-info/top_level.txt @@ -0,0 +1 @@ +ops diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f17b2969db298b21bc47bbe1d3614ccff93e9c6e --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Operator Framework.""" + +from .version import version as __version__ # noqa: F401 (imported but unused) + +# Import here the bare minimum to break the circular import between modules +from . import charm # noqa: F401 (imported but unused) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/charm.py new file mode 100644 index 0000000000000000000000000000000000000000..82ed2fdd8b85d423eb8645bddb4b64426ad180e1 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/charm.py @@ -0,0 +1,823 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base objects for the Charm, events and metadata.""" + +import enum +import os +import pathlib +import typing + +import yaml + +from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents +from ops import model + + +def _loadYaml(source): + if yaml.__with_libyaml__: + return yaml.load(source, Loader=yaml.CSafeLoader) + return yaml.load(source, Loader=yaml.SafeLoader) + + +class HookEvent(EventBase): + """Events raised by Juju to progress a charm's lifecycle. + + Hooks are callback methods of a charm class (a subclass of + :class:`CharmBase`) that are invoked in response to events raised + by Juju. These callback methods are the means by which a charm + governs the lifecycle of its application. + + The :class:`HookEvent` class is the base of a type hierarchy of events + related to the charm's lifecycle. + + :class:`HookEvent` subtypes are grouped into the following categories + + - Core lifecycle events + - Relation events + - Storage events + - Metric events + """ + + +class ActionEvent(EventBase): + """Events raised by Juju when an administrator invokes a Juju Action. + + This class is the data type of events triggered when an administrator + invokes a Juju Action. Callbacks bound to these events may be used + for responding to the administrator's Juju Action request. + + To read the parameters for the action, see the instance variable :attr:`params`. + To respond with the result of the action, call :meth:`set_results`. To add + progress messages that are visible as the action is progressing use + :meth:`log`. + + Attributes: + params: The parameters passed to the action. + """ + + def defer(self): + """Action events are not deferable like other events. + + This is because an action runs synchronously and the administrator + is waiting for the result. + """ + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot: dict) -> None: + """Used by the operator framework to record the action. + + Not meant to be called directly by charm code. + """ + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because + # the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results: typing.Mapping) -> None: + """Report the result of the action. + + Args: + results: The result of the action as a Dict + """ + self.framework.model._backend.action_set(results) + + def log(self, message: str) -> None: + """Send a message that a user will see while the action is running. + + Args: + message: The message for the user. + """ + self.framework.model._backend.action_log(message) + + def fail(self, message: str = '') -> None: + """Report that this action has failed. + + Args: + message: Optional message to record why it has failed. + """ + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + """Event triggered when a charm is installed. + + This event is triggered at the beginning of a charm's + lifecycle. Any associated callback method should be used to + perform one-time setup operations, such as installing prerequisite + software. + """ + + +class StartEvent(HookEvent): + """Event triggered immediately after first configuation change. + + This event is triggered immediately after the first + :class:`ConfigChangedEvent`. Callback methods bound to the event should be + used to ensure that the charm’s software is in a running state. Note that + the charm’s software should be configured so as to persist in this state + through reboots without further intervention on Juju’s part. + """ + + +class StopEvent(HookEvent): + """Event triggered when a charm is shut down. + + This event is triggered when an application's removal is requested + by the client. The event fires immediately before the end of the + unit’s destruction sequence. Callback methods bound to this event + should be used to ensure that the charm’s software is not running, + and that it will not start again on reboot. + """ + + +class RemoveEvent(HookEvent): + """Event triggered when a unit is about to be terminated. + + This event fires prior to Juju removing the charm and terminating its unit. + """ + + +class ConfigChangedEvent(HookEvent): + """Event triggered when a configuration change is requested. + + This event fires in several different situations. + + - immediately after the :class:`install ` event. + - after a :class:`relation is created `. + - after a :class:`leader is elected `. + - after changing charm configuration using the GUI or command line + interface + - when the charm :class:`starts `. + - when a new unit :class:`joins a relation `. + - when there is a :class:`change to an existing relation `. + + Any callback method bound to this event cannot assume that the + software has already been started; it should not start stopped + software, but should (if appropriate) restart running software to + take configuration changes into account. + """ + + +class UpdateStatusEvent(HookEvent): + """Event triggered by a status update request from Juju. + + This event is periodically triggered by Juju so that it can + provide constant feedback to the administrator about the status of + the application the charm is modeling. Any callback method bound + to this event should determine the "health" of the application and + set the status appropriately. + + The interval between :class:`update-status ` events can + be configured model-wide, e.g. ``juju model-config + update-status-hook-interval=1m``. + """ + + +class UpgradeCharmEvent(HookEvent): + """Event triggered by request to upgrade the charm. + + This event will be triggered when an administrator executes ``juju + upgrade-charm``. The event fires after Juju has unpacked the upgraded charm + code, and so this event will be handled by the callback method bound to the + event in the new codebase. The associated callback method is invoked + provided there is no existing error state. The callback method should be + used to reconcile current state written by an older version of the charm + into whatever form that is needed by the current charm version. + """ + + +class PreSeriesUpgradeEvent(HookEvent): + """Event triggered to prepare a unit for series upgrade. + + This event triggers when an administrator executes ``juju upgrade-series + MACHINE prepare``. The event will fire for each unit that is running on the + specified machine. Any callback method bound to this event must prepare the + charm for an upgrade to the series. This may include things like exporting + database content to a version neutral format, or evacuating running + instances to other machines. + + It can be assumed that only after all units on a machine have executed the + callback method associated with this event, the administrator will initiate + steps to actually upgrade the series. After the upgrade has been completed, + the :class:`PostSeriesUpgradeEvent` will fire. + """ + + +class PostSeriesUpgradeEvent(HookEvent): + """Event triggered after a series upgrade. + + This event is triggered after the administrator has done a distribution + upgrade (or rolled back and kept the same series). It is called in response + to ``juju upgrade-series MACHINE complete``. Associated charm callback + methods are expected to do whatever steps are necessary to reconfigure their + applications for the new series. This may include things like populating the + upgraded version of a database. Note however charms are expected to check if + the series has actually changed or whether it was rolled back to the + original series. + """ + + +class LeaderElectedEvent(HookEvent): + """Event triggered when a new leader has been elected. + + Juju will trigger this event when a new leader unit is chosen for + a given application. + + This event fires at least once after Juju selects a leader + unit. Callback methods bound to this event may take any action + required for the elected unit to assert leadership. Note that only + the elected leader unit will receive this event. + """ + + +class LeaderSettingsChangedEvent(HookEvent): + """Event triggered when leader changes any settings. + + DEPRECATED NOTICE + + This event has been deprecated in favor of using a Peer relation, + and having the leader set a value in the Application data bag for + that peer relation. (see :class:`RelationChangedEvent`). + """ + + +class CollectMetricsEvent(HookEvent): + """Event triggered by Juju to collect metrics. + + Juju fires this event every five minutes for the lifetime of the + unit. Callback methods bound to this event may use the :meth:`add_metrics` + method of this class to send measurements to Juju. + + Note that associated callback methods are currently sandboxed in + how they can interact with Juju. + """ + + def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None: + """Record metrics that have been gathered by the charm for this unit. + + Args: + metrics: A collection of {key: float} pairs that contains the + metrics that have been gathered + labels: {key:value} strings that can be applied to the + metrics that are being gathered + """ + self.framework.model._backend.add_metrics(metrics, labels) + + +class RelationEvent(HookEvent): + """A base class representing the various relation lifecycle events. + + Relation lifecycle events are generated when application units + participate in relations. Units can only participate in relations + after they have been "started", and before they have been + "stopped". Within that time window, the unit may participate in + several different relations at a time, including multiple + relations with the same name. + + Attributes: + relation: The :class:`~ops.model.Relation` involved in this event + app: The remote :class:`~ops.model.Application` that has triggered this + event + unit: The remote unit that has triggered this event. This may be + ``None`` if the relation event was triggered as an + :class:`~ops.model.Application` level event + + """ + + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit is not None and unit.app != app: + raise RuntimeError( + 'cannot create RelationEvent with application {} and unit {}'.format(app, unit)) + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self) -> dict: + """Used by the framework to serialize the event to disk. + + Not meant to be called by charm code. + """ + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot: dict) -> None: + """Used by the framework to deserialize the event from disk. + + Not meant to be called by charm code. + """ + self.relation = self.framework.model.get_relation( + snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationCreatedEvent(RelationEvent): + """Event triggered when a new relation is created. + + This is triggered when a new relation to another app is added in Juju. This + can occur before units for those applications have started. All existing + relations should be established before start. + """ + + +class RelationJoinedEvent(RelationEvent): + """Event triggered when a new unit joins a relation. + + This event is triggered whenever a new unit of a related + application joins the relation. The event fires only when that + remote unit is first observed by the unit. Callback methods bound + to this event may set any local unit settings that can be + determined using no more than the name of the joining unit and the + remote ``private-address`` setting, which is always available when + the relation is created and is by convention not deleted. + """ + + +class RelationChangedEvent(RelationEvent): + """Event triggered when relation data changes. + + This event is triggered whenever there is a change to the data bucket for a + related application or unit. Look at ``event.relation.data[event.unit/app]`` + to see the new information, where ``event`` is the event object passed to + the callback method bound to this event. + + This event always fires once, after :class:`RelationJoinedEvent`, and + will subsequently fire whenever that remote unit changes its settings for + the relation. Callback methods bound to this event should be the only ones + that rely on remote relation settings. They should not error if the settings + are incomplete, since it can be guaranteed that when the remote unit or + application changes its settings, the event will fire again. + + The settings that may be queried, or set, are determined by the relation’s + interface. + """ + + +class RelationDepartedEvent(RelationEvent): + """Event triggered when a unit leaves a relation. + + This is the inverse of the :class:`RelationJoinedEvent`, representing when a + unit is leaving the relation (the unit is being removed, the app is being + removed, the relation is being removed). It is fired once for each unit that + is going away. + + When the remote unit is known to be leaving the relation, this will result + in the :class:`RelationChangedEvent` firing at least once, after which the + :class:`RelationDepartedEvent` will fire. The :class:`RelationDepartedEvent` + will fire once only. Once the :class:`RelationDepartedEvent` has fired no + further :class:`RelationChangedEvent` will fire. + + Callback methods bound to this event may be used to remove all + references to the departing remote unit, because there’s no + guarantee that it’s still part of the system; it’s perfectly + probable (although not guaranteed) that the system running that + unit has already shut down. + + Once all callback methods bound to this event have been run for such a + relation, the unit agent will fire the :class:`RelationBrokenEvent`. + """ + + +class RelationBrokenEvent(RelationEvent): + """Event triggered when a relation is removed. + + If a relation is being removed (``juju remove-relation`` or ``juju + remove-application``), once all the units have been removed, this event will + fire to signal that the relationship has been fully terminated. + + The event indicates that the current relation is no longer valid, and that + the charm’s software must be configured as though the relation had never + existed. It will only be called after every callback method bound to + :class:`RelationDepartedEvent` has been run. If a callback method + bound to this event is being executed, it is gauranteed that no remote units + are currently known locally. + """ + + +class StorageEvent(HookEvent): + """Base class representing storage-related events. + + Juju can provide a variety of storage types to a charms. The + charms can define several different types of storage that are + allocated from Juju. Changes in state of storage trigger sub-types + of :class:`StorageEvent`. + """ + + +class StorageAttachedEvent(StorageEvent): + """Event triggered when new storage becomes available. + + This event is triggered when new storage is available for the + charm to use. + + Callback methods bound to this event allow the charm to run code + when storage has been added. Such methods will be run before the + :class:`InstallEvent` fires, so that the installation routine may + use the storage. The name prefix of this hook will depend on the + storage key defined in the ``metadata.yaml`` file. + """ + + +class StorageDetachingEvent(StorageEvent): + """Event triggered prior to removal of storage. + + This event is triggered when storage a charm has been using is + going away. + + Callback methods bound to this event allow the charm to run code + before storage is removed. Such methods will be run before storage + is detached, and always before the :class:`StopEvent` fires, thereby + allowing the charm to gracefully release resources before they are + removed and before the unit terminates. The name prefix of the + hook will depend on the storage key defined in the ``metadata.yaml`` + file. + """ + + +class CharmEvents(ObjectEvents): + """Events generated by Juju pertaining to application lifecycle. + + This class is used to create an event descriptor (``self.on``) attribute for + a charm class that inherits from :class:`CharmBase`. The event descriptor + may be used to set up event handlers for corresponding events. + + By default the following events will be provided through + :class:`CharmBase`:: + + self.on.install + self.on.start + self.on.remove + self.on.update_status + self.on.config_changed + self.on.upgrade_charm + self.on.pre_series_upgrade + self.on.post_series_upgrade + self.on.leader_elected + self.on.collect_metrics + + + In addition to these, depending on the charm's metadata (``metadata.yaml``), + named relation and storage events may also be defined. These named events + are created by :class:`CharmBase` using charm metadata. The named events may be + accessed as ``self.on[].`` + """ + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + remove = EventSource(RemoveEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + collect_metrics = EventSource(CollectMetricsEvent) + + +class CharmBase(Object): + """Base class that represents the charm overall. + + :class:`CharmBase` is used to create a charm. This is done by inheriting + from :class:`CharmBase` and customising the sub class as required. So to + create your own charm, say ``MyCharm``, define a charm class and set up the + required event handlers (“hooks”) in its constructor:: + + import logging + + from ops.charm import CharmBase + from ops.main import main + + logger = logging.getLogger(__name__) + + def MyCharm(CharmBase): + def __init__(self, *args): + logger.debug('Initializing Charm') + + super().__init__(*args) + + self.framework.observe(self.on.config_changed, self._on_config_changed) + self.framework.observe(self.on.stop, self._on_stop) + # ... + + if __name__ == "__main__": + main(MyCharm) + + As shown in the example above, a charm class is instantiated by + :func:`~ops.main.main` rather than charm authors directly instantiating a + charm. + + Args: + framework: The framework responsible for managing the Model and events for this + charm. + key: Ignored; will remove after deprecation period of the signature change. + + """ + + # note that without the #: below, sphinx will copy the whole of CharmEvents + # docstring inline which is less than ideal. + #: Used to set up event handlers; see :class:`CharmEvents`. + on = CharmEvents() + + def __init__(self, framework: Framework, key: typing.Optional = None): + super().__init__(framework, None) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent) + self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent) + self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent) + self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent) + self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent) + self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(action_name + '_action', ActionEvent) + + @property + def app(self) -> model.Application: + """Application that this unit is part of.""" + return self.framework.model.app + + @property + def unit(self) -> model.Unit: + """Unit that this execution is responsible for.""" + return self.framework.model.unit + + @property + def meta(self) -> 'CharmMeta': + """Metadata of this charm.""" + return self.framework.meta + + @property + def charm_dir(self) -> pathlib.Path: + """Root directory of the charm as it is running.""" + return self.framework.charm_dir + + @property + def config(self) -> model.ConfigData: + """A mapping containing the charm's config and current values.""" + return self.model.config + + +class CharmMeta: + """Object containing the metadata for the charm. + + This is read from ``metadata.yaml`` and/or ``actions.yaml``. Generally + charms will define this information, rather than reading it at runtime. This + class is mostly for the framework to understand what the charm has defined. + + The :attr:`maintainers`, :attr:`tags`, :attr:`terms`, :attr:`series`, and + :attr:`extra_bindings` attributes are all lists of strings. The + :attr:`requires`, :attr:`provides`, :attr:`peers`, :attr:`relations`, + :attr:`storages`, :attr:`resources`, and :attr:`payloads` attributes are all + mappings of names to instances of the respective :class:`RelationMeta`, + :class:`StorageMeta`, :class:`ResourceMeta`, or :class:`PayloadMeta`. + + The :attr:`relations` attribute is a convenience accessor which includes all + of the ``requires``, ``provides``, and ``peers`` :class:`RelationMeta` + items. If needed, the role of the relation definition can be obtained from + its :attr:`role ` attribute. + + Attributes: + name: The name of this charm + summary: Short description of what this charm does + description: Long description for this charm + maintainers: A list of strings of the email addresses of the maintainers + of this charm. + tags: Charm store tag metadata for categories associated with this charm. + terms: Charm store terms that should be agreed to before this charm can + be deployed. (Used for things like licensing issues.) + series: The list of supported OS series that this charm can support. + The first entry in the list is the default series that will be + used by deploy if no other series is requested by the user. + subordinate: True/False whether this charm is intended to be used as a + subordinate charm. + min_juju_version: If supplied, indicates this charm needs features that + are not available in older versions of Juju. + requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation. + provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation. + peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation. + relations: A dict containing all :class:`RelationMeta` attributes (merged from other + sections) + storages: A dict of {name: :class:`StorageMeta`} for each defined storage. + resources: A dict of {name: :class:`ResourceMeta`} for each defined resource. + payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload. + extra_bindings: A dict of additional named bindings that a charm can use + for network configuration. + actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined. + Args: + raw: a mapping containing the contents of metadata.yaml + actions_raw: a mapping containing the contents of actions.yaml + + """ + + def __init__(self, raw: dict = {}, actions_raw: dict = {}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta(RelationRole.requires, name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta(RelationRole.provides, name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta(RelationRole.peer, name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', {}) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml( + cls, metadata: typing.Union[str, typing.TextIO], + actions: typing.Optional[typing.Union[str, typing.TextIO]] = None): + """Instantiate a CharmMeta from a YAML description of metadata.yaml. + + Args: + metadata: A YAML description of charm metadata (name, relations, etc.) + This can be a simple string, or a file-like object. (passed to `yaml.safe_load`). + actions: YAML description of Actions for this charm (eg actions.yaml) + """ + meta = _loadYaml(metadata) + raw_actions = {} + if actions is not None: + raw_actions = _loadYaml(actions) + if raw_actions is None: + raw_actions = {} + return cls(meta, raw_actions) + + +class RelationRole(enum.Enum): + """An annotation for a charm's role in a relation. + + For each relation a charm's role may be + + - A Peer + - A service consumer in the relation ('requires') + - A service provider in the relation ('provides') + """ + peer = 'peer' + requires = 'requires' + provides = 'provides' + + def is_peer(self) -> bool: + """Return whether the current role is peer. + + A convenience to avoid having to import charm. + """ + return self is RelationRole.peer + + +class RelationMeta: + """Object containing metadata about a relation definition. + + Should not be constructed directly by charm code. Is gotten from one of + :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`, + or :attr:`CharmMeta.relations`. + + Attributes: + role: This is :class:`RelationRole`; one of peer/requires/provides + relation_name: Name of this relation from metadata.yaml + interface_name: Optional definition of the interface protocol. + scope: "global" or "container" scope based on how the relation should be used. + """ + + def __init__(self, role: RelationRole, relation_name: str, raw: dict): + if not isinstance(role, RelationRole): + raise TypeError("role should be a Role, not {!r}".format(role)) + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition. + + Attributes: + storage_name: Name of storage + type: Storage type + description: A text description of the storage + read_only: Whether or not the storage is read only + minimum_size: Minimum size of storage + location: Mount point of storage + multiple_range: Range of numeric qualifiers when multiple storage units are used + """ + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition. + + Attributes: + resource_name: Name of resource + filename: Name of file + description: A text description of resource + """ + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition. + + Attributes: + payload_name: Name of payload + type: Payload type + """ + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + """Object containing metadata about an action's definition.""" + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {: } + self.required = raw.get('required', []) # [, ...] diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/framework.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/framework.py new file mode 100644 index 0000000000000000000000000000000000000000..d20c0007ebcd58456a0bac90ae5dc0eaacb9a407 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/framework.py @@ -0,0 +1,1199 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Operator Framework infrastructure.""" + +import collections +import collections.abc +import inspect +import keyword +import logging +import marshal +import os +import pathlib +import pdb +import re +import sys +import types +import weakref + +from ops import charm +from ops.storage import ( + NoSnapshotError, + SQLiteStorage, +) + +logger = logging.getLogger(__name__) + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = "{}/{}[{}]".format(parent, kind, key) + else: + self._path = "{}/{}".format(parent, kind) + else: + if key: + self._path = "{}[{}]".format(kind, key) + else: + self._path = "{}".format(kind) + + def nest(self, kind, key): + """Create a new handle as child of the current one.""" + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + """Return own parent handle.""" + return self._parent + + @property + def kind(self): + """Return the handle's kind.""" + return self._kind + + @property + def key(self): + """Return the handle's key.""" + return self._key + + @property + def path(self): + """Return the handle's path.""" + return self._path + + @classmethod + def from_path(cls, path): + """Build a handle from the indicated path.""" + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {}".format(path)) + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + """The base for all the different Events. + + Inherit this and override 'snapshot' and 'restore' methods to build a custom event. + """ + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def __repr__(self): + return "<%s via %s>" % (self.__class__.__name__, self.handle) + + def defer(self): + """Defer the event to the future. + + Deferring an event from a handler puts that handler into a queue, to be + called again the next time the charm is invoked. This invocation may be + the result of an action, or any event other than metric events. The + queue of events will be dispatched before the new event is processed. + + From the above you may deduce, but it's important to point out: + + * ``defer()`` does not interrupt the execution of the current event + handler. In almost all cases, a call to ``defer()`` should be followed + by an explicit ``return`` from the handler; + + * the re-execution of the deferred event handler starts from the top of + the handler method (not where defer was called); + + * only the handlers that actually called ``defer()`` are called again + (that is: despite talking about “deferring an event” it is actually + the handler/event combination that is deferred); and + + * any deferred events get processed before the event (or action) that + caused the current invocation of the charm. + + The general desire to call ``defer()`` happens when some precondition + isn't yet met. However, care should be exercised as to whether it is + better to defer this event so that you see it again, or whether it is + better to just wait for the event that indicates the precondition has + been met. + + For example, if ``config-changed`` is fired, and you are waiting for + different config, there is no reason to defer the event because there + will be a *different* ``config-changed`` event when the config actually + changes, rather than checking to see if maybe config has changed prior + to every other event that occurs. + + Similarly, if you need 2 events to occur before you are ready to + proceed (say event A and B). When you see event A, you could chose to + ``defer()`` it because you haven't seen B yet. However, that leads to: + + 1. event A fires, calls defer() + + 2. event B fires, event A handler is called first, still hasn't seen B + happen, so is deferred again. Then B happens, which progresses since + it has seen A. + + 3. At some future time, event C happens, which also checks if A can + proceed. + + """ + logger.debug("Deferring %s.", self) + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError( + 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type)) + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def _set_name(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + 'EventSource({}) reused as {}.{} and {}.{}'.format( + self.event_type.__name__, + self.emitter_type.__name__, + self.event_kind, + emitter_type.__name__, + event_kind, + )) + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event + # rather than charm_instance.on.event, but in that case it couldn't be + # emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + """Event bound to an Object.""" + + def __repr__(self): + return ''.format( + self.event_type.__name__, + type(self.emitter).__name__, + self.event_kind, + hex(id(self)), + ) + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class _Metaclass(type): + """Helper class to ensure proper instantiation of Object-derived classes. + + This class currently has a single purpose: events derived from EventSource + that are class attributes of Object-derived classes need to be told what + their name is in that class. For example, in + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + the instance of EventSource needs to know it's called 'something_happened'. + + Starting from python 3.6 we could use __set_name__ on EventSource for this, + but until then this (meta)class does the equivalent work. + + TODO: when we drop support for 3.5 drop this class, and rename _set_name in + EventSource to __set_name__; everything should continue to work. + + """ + + def __new__(typ, *a, **kw): + k = super().__new__(typ, *a, **kw) + # k is now the Object-derived class; loop over its class attributes + for n, v in vars(k).items(): + # we could do duck typing here if we want to support + # non-EventSource-derived shenanigans. We don't. + if isinstance(v, EventSource): + # this is what 3.6+ does automatically for us: + v._set_name(k, n) + return k + + +class Object(metaclass=_Metaclass): + """Base class of all the charm-related objects.""" + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + """Shortcut for more simple access the model.""" + return self.framework.model + + +class ObjectEvents(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls: a type to define an event on. + + event_kind: an attribute name that will be used to access the + event. Must be a valid python identifier, not be a keyword + or an existing attribute. + + event_type: a type of the event to define. + + """ + prefix = 'unable to define an event with event_kind that ' + if not event_kind.isidentifier(): + raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind) + elif keyword.iskeyword(event_kind): + raise RuntimeError(prefix + 'is a python keyword: ' + event_kind) + try: + getattr(cls, event_kind) + raise RuntimeError( + prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind)) + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor._set_name(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def _event_kinds(self): + event_kinds = [] + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kinds.append(attr_name) + return event_kinds + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events.""" + return {event_kind: getattr(self, event_kind) for event_kind in self._event_kinds()} + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + def __repr__(self): + k = type(self) + event_kinds = ', '.join(sorted(self._event_kinds())) + return '<{}.{}: {}>'.format(k.__module__, k.__qualname__, event_kinds) + + +class PrefixedEvents: + """Events to be found in all events using a specific prefix.""" + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + """Events that will be emited first on commit.""" + + +class CommitEvent(EventBase): + """Events that will be emited second on commit.""" + + +class FrameworkEvents(ObjectEvents): + """Manager of all framework events.""" + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoTypeError(Exception): + """No class to hold it was found when restoring an event.""" + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return "cannot restore {} since no class was registered for it".format(self.handle_path) + + +# the message to show to the user when a pdb breakpoint goes active +_BREAKPOINT_WELCOME_MESSAGE = """ +Starting pdb to debug charm operator. +Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort. +Future breakpoints may interrupt execution again. +More details at https://discourse.jujucharms.com/t/debugging-charm-hooks + +""" + + +_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$' + + +class Framework(Object): + """Main interface to from the Charm to the Operator Framework internals.""" + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, storage, charm_dir, meta, model): + + super().__init__(self, None) + + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + if isinstance(storage, (str, pathlib.Path)): + logger.warning( + "deprecated: Framework now takes a Storage not a path") + storage = SQLiteStorage(storage) + self._storage = storage + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + # Flag to indicate that we already presented the welcome message in a debugger breakpoint + self._breakpoint_welcomed = False + + # Parse the env var once, which may be used multiple times later + debug_at = os.environ.get('JUJU_DEBUG_AT') + self._juju_debug_at = debug_at.split(',') if debug_at else () + + def set_breakpointhook(self): + """Hook into sys.breakpointhook so the builtin breakpoint() works as expected. + + This method is called by ``main``, and is not intended to be + called by users of the framework itself outside of perhaps + some testing scenarios. + + It returns the old value of sys.excepthook. + + The breakpoint function is a Python >= 3.7 feature. + + This method was added in ops 1.0; before that, it was done as + part of the Framework's __init__. + """ + old_breakpointhook = getattr(sys, 'breakpointhook', None) + if old_breakpointhook is not None: + # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do + # breakpoint() + sys.breakpointhook = self.breakpoint + return old_breakpointhook + + def close(self): + """Close the underlying backends.""" + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError( + 'two objects claiming to be {} have been created'.format(obj.handle.path)) + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + """Save changes to the underlying backends.""" + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + """Register a type to a handle.""" + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError( + 'cannot save {} values before registering that type'.format(type(value).__name__)) + data = value.snapshot() + + # Use marshal as a validator, enforcing the use of simple types, as we later the + # information is really pickled, which is too error prone for future evolution of the + # stored data (e.g. if the developer stores a custom object and later changes its + # class name; when unpickling the original class will not be there and event + # data loading will fail). + try: + marshal.dumps(data) + except ValueError: + msg = "unable to save the data for {}, it must contain only simple types: {!r}" + raise ValueError(msg.format(value.__class__.__name__, data)) + + self._storage.save_snapshot(value.handle.path, data) + + def load_snapshot(self, handle): + """Load a persistent snapshot.""" + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + data = self._storage.load_snapshot(handle.path) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + """Discard a persistent snapshot.""" + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event: BoundEvent, observer: types.MethodType): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style:: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as:: + + framework.observe(someobj.something_happened, self._on_something_happened) + + Raises: + RuntimeError: if bound_event or observer are the wrong type. + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError( + 'Framework.observe requires a BoundEvent as second parameter, got {}'.format( + bound_event)) + if not isinstance(observer, types.MethodType): + # help users of older versions of the framework + if isinstance(observer, charm.CharmBase): + raise TypeError( + 'observer methods must now be explicitly provided;' + ' please replace observe(self.on.{0}, self)' + ' with e.g. observe(self.on.{0}, self._on_{0})'.format( + bound_event.event_kind)) + raise RuntimeError( + 'Framework.observe requires a method as third parameter, got {}'.format(observer)) + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError( + 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__)) + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(observer) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + + method_name = observer.__name__ + observer = observer.__self__ + if not sig.parameters: + raise TypeError( + '{}.{} must accept event parameter'.format(type(observer).__name__, method_name)) + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError( + '{}.{} has extra required parameter'.format(type(observer).__name__, method_name)) + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 + # means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + saved = False + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of + # all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + if not saved: + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + saved = True + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + if saved: + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + if single_event_path is None: + logger.debug("Re-emitting %s.", event) + custom_handler = getattr(observer, method_name, None) + if custom_handler: + event_is_from_juju = isinstance(event, charm.HookEvent) + event_is_action = isinstance(event, charm.ActionEvent) + if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at: + # Present the welcome message and run under PDB. + self._show_debug_code_message() + pdb.runcall(custom_handler, event) + else: + # Regular call to the registered method. + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from + # scratch in the next path. + self.framework._forget(event) + + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + + def _show_debug_code_message(self): + """Present the welcome message (only once!) when using debugger functionality.""" + if not self._breakpoint_welcomed: + self._breakpoint_welcomed = True + print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='') + + def breakpoint(self, name=None): + """Add breakpoint, optionally named, at the place where this method is called. + + For the breakpoint to be activated the JUJU_DEBUG_AT environment variable + must be set to "all" or to the specific name parameter provided, if any. In every + other situation calling this method does nothing. + + The framework also provides a standard breakpoint named "hook", that will + stop execution when a hook event is about to be handled. + + For those reasons, the "all" and "hook" breakpoint names are reserved. + """ + # If given, validate the name comply with all the rules + if name is not None: + if not isinstance(name, str): + raise TypeError('breakpoint names must be strings') + if name in ('hook', 'all'): + raise ValueError('breakpoint names "all" and "hook" are reserved') + if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name): + raise ValueError('breakpoint names must look like "foo" or "foo-bar"') + + indicated_breakpoints = self._juju_debug_at + if not indicated_breakpoints: + return + + if 'all' in indicated_breakpoints or name in indicated_breakpoints: + self._show_debug_code_message() + + # If we call set_trace() directly it will open the debugger *here*, so indicating + # it to use our caller's frame + code_frame = inspect.currentframe().f_back + pdb.Pdb().set_trace(code_frame) + else: + logger.warning( + "Breakpoint %r skipped (not found in the requested breakpoints: %s)", + name, indicated_breakpoints) + + def remove_unreferenced_events(self): + """Remove events from storage that are not referenced. + + In older versions of the framework, events that had no observers would get recorded but + never deleted. This makes a best effort to find these events and remove them from the + database. + """ + event_regex = re.compile(_event_regex) + to_remove = [] + for handle_path in self._storage.list_snapshots(): + if event_regex.match(handle_path): + notices = self._storage.notices(handle_path) + if next(notices, None) is None: + # There are no notices for this handle_path, it is valid to remove it + to_remove.append(handle_path) + for handle_path in to_remove: + self._storage.drop_snapshot(handle_path) + + +class StoredStateData(Object): + """Manager of the stored data.""" + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + """Return the current state.""" + return self._cache + + def restore(self, snapshot): + """Restore current state to the given snapshot.""" + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + """Save changes to the storage backend.""" + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + """Stored state data bound to a specific Object.""" + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data.on_commit) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError("attribute '{}' is not stored".format(key)) + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError("attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)): + raise AttributeError( + 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format( + key, type(value).__name__)) + + self._data[key] = _unwrap_stored(self._data, value) + + def set_default(self, **kwargs): + """Set the value of any given key if it has not already been set.""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + """A class used to store data the charm needs persisted across invocations. + + Example:: + + class MyClass(Object): + _stored = StoredState() + + Instances of `MyClass` can transparently save state between invocations by + setting attributes on `_stored`. Initial state should be set with + `set_default` on the bound object, that is:: + + class MyClass(Object): + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self._stored.set_default(seen=set()) + self.framework.observe(self.on.seen, self._on_seen) + + def _on_seen(self, event): + self._stored.seen.add(event.uuid) + + """ + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is not None and self.parent_type not in parent_type.mro(): + # the StoredState instance is being shared between two unrelated classes + # -> unclear what is exepcted of us -> bail out + raise RuntimeError( + 'StoredState shared by {} and {}'.format( + self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + # accessing via the class directly (e.g. MyClass.stored) + return self + + bound = None + if self.attr_name is not None: + bound = parent.__dict__.get(self.attr_name) + if bound is not None: + # we already have the thing from a previous pass, huzzah + return bound + + # need to find ourselves amongst the parent's bases + for cls in parent_type.mro(): + for attr_name, attr_value in cls.__dict__.items(): + if attr_value is not self: + continue + # we've found ourselves! is it the first time? + if bound is not None: + # the StoredState instance is being stored in two different + # attributes -> unclear what is expected of us -> bail out + raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format( + cls.__name__, self.attr_name, attr_name)) + # we've found ourselves for the first time; save where, and bind the object + self.attr_name = attr_name + self.parent_type = cls + bound = BoundStoredState(parent, attr_name) + + if bound is not None: + # cache the bound object to avoid the expensive lookup the next time + # (don't use setattr, to keep things symmetric with the fast-path lookup above) + parent.__dict__[self.attr_name] = bound + return bound + + raise AttributeError( + 'cannot find {} attribute in type {}'.format( + self.__class__.__name__, parent_type.__name__)) + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +def _wrapped_repr(obj): + t = type(obj) + if obj._under: + return "{}.{}({!r})".format(t.__module__, t.__name__, obj._under) + else: + return "{}.{}()".format(t.__module__, t.__name__) + + +class StoredDict(collections.abc.MutableMapping): + """A dict-like object that uses the StoredState as backend.""" + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + __repr__ = _wrapped_repr + + +class StoredList(collections.abc.MutableSequence): + """A list-like object that uses the StoredState as backend.""" + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + """Insert value before index.""" + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + """Append value to the end of the list.""" + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + __repr__ = _wrapped_repr + + +class StoredSet(collections.abc.MutableSet): + """A set-like object that uses the StoredState as backend.""" + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + """Add a key to a set. + + This has no effect if the key is already present. + """ + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + """Remove a key from a set if it is a member. + + If the key is not a member, do nothing. + """ + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented + + __repr__ = _wrapped_repr diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/jujuversion.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/jujuversion.py new file mode 100644 index 0000000000000000000000000000000000000000..61d420d369d9b0e75b9c2c242574ddcd4b89be51 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/jujuversion.py @@ -0,0 +1,114 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A helper to work with the Juju version.""" + +import os +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + """Helper to work with the Juju version. + + It knows how to parse the ``JUJU_VERSION`` environment variable, and exposes different + capabilities according to the specific version, allowing also to compare with other + versions. + """ + + PATTERN = r'''^ + (?P\d{1,9})\.(?P\d{1,9}) # and numbers are always there + ((?:\.|-(?P[a-z]+))(?P\d{1,9}))? # sometimes with . or - + (\.(?P\d{1,9}))?$ # and sometimes with a number. + ''' + + def __init__(self, version): + m = re.match(self.PATTERN, version, re.VERBOSE) + if not m: + raise RuntimeError('"{}" is not a valid Juju version string'.format(version)) + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch) + else: + s = '{}.{}.{}'.format(self.major, self.minor, self.patch) + if self.build > 0: + s += '.{}'.format(self.build) + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + return ( + self.major == other.major + and self.minor == other.minor + and self.tag == other.tag + and self.build == other.build + and self.patch == other.patch) + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False + + @classmethod + def from_environ(cls) -> 'JujuVersion': + """Build a JujuVersion from JUJU_VERSION.""" + v = os.environ.get('JUJU_VERSION') + if v is None: + v = '0.0.0' + return cls(v) + + def has_app_data(self) -> bool: + """Determine whether this juju version knows about app data.""" + return (self.major, self.minor, self.patch) >= (2, 7, 0) + + def is_dispatch_aware(self) -> bool: + """Determine whether this juju version knows about dispatch.""" + return (self.major, self.minor, self.patch) >= (2, 8, 0) + + def has_controller_storage(self) -> bool: + """Determine whether this juju version supports controller-side storage.""" + return (self.major, self.minor, self.patch) >= (2, 8, 0) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/lib/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..22b5a84e517df8a061b7ca2742678536a481b616 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/lib/__init__.py @@ -0,0 +1,264 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Infrastructure for the opslib functionality.""" + +import logging +import os +import re +import sys + +from ast import literal_eval +from importlib.util import module_from_spec +from importlib.machinery import ModuleSpec +from pkgutil import get_importer +from types import ModuleType +from typing import List + +__all__ = ('use', 'autoimport') + +logger = logging.getLogger(__name__) + +_libraries = None + +_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''') +_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''') + +# Not perfect, but should do for now. +_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''') + + +def use(name: str, api: int, author: str) -> ModuleType: + """Use a library from the ops libraries. + + Args: + name: the name of the library requested. + api: the API version of the library. + author: the author of the library. If not given, requests the + one in the standard library. + + Raises: + ImportError: if the library cannot be found. + TypeError: if the name, api, or author are the wrong type. + ValueError: if the name, api, or author are invalid. + """ + if not isinstance(name, str): + raise TypeError("invalid library name: {!r} (must be a str)".format(name)) + if not isinstance(author, str): + raise TypeError("invalid library author: {!r} (must be a str)".format(author)) + if not isinstance(api, int): + raise TypeError("invalid library API: {!r} (must be an int)".format(api)) + if api < 0: + raise ValueError('invalid library api: {} (must be ≥0)'.format(api)) + if not _libname_re.match(name): + raise ValueError("invalid library name: {!r} (chars and digits only)".format(name)) + if not _libauthor_re.match(author): + raise ValueError("invalid library author email: {!r}".format(author)) + + if _libraries is None: + autoimport() + + versions = _libraries.get((name, author), ()) + for lib in versions: + if lib.api == api: + return lib.import_module() + + others = ', '.join(str(lib.api) for lib in versions) + if others: + msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format( + name, author, api, others) + else: + msg = 'cannot find library "{}" from "{}"'.format(name, author) + + raise ImportError(msg, name=name) + + +def autoimport(): + """Find all libs in the path and enable use of them. + + You only need to call this if you've installed a package or + otherwise changed sys.path in the current run, and need to see the + changes. Otherwise libraries are found on first call of `use`. + """ + global _libraries + _libraries = {} + for spec in _find_all_specs(sys.path): + lib = _parse_lib(spec) + if lib is None: + continue + + versions = _libraries.setdefault((lib.name, lib.author), []) + versions.append(lib) + versions.sort(reverse=True) + + +def _find_all_specs(path): + for sys_dir in path: + if sys_dir == "": + sys_dir = "." + try: + top_dirs = os.listdir(sys_dir) + except (FileNotFoundError, NotADirectoryError): + continue + except OSError as e: + logger.debug("Tried to look for ops.lib packages under '%s': %s", sys_dir, e) + continue + logger.debug("Looking for ops.lib packages under '%s'", sys_dir) + for top_dir in top_dirs: + opslib = os.path.join(sys_dir, top_dir, 'opslib') + try: + lib_dirs = os.listdir(opslib) + except (FileNotFoundError, NotADirectoryError): + continue + except OSError as e: + logger.debug(" Tried '%s': %s", opslib, e) # *lots* of things checked here + continue + else: + logger.debug(" Trying '%s'", opslib) + finder = get_importer(opslib) + if finder is None: + logger.debug(" Finder for '%s' is None", opslib) + continue + if not hasattr(finder, 'find_spec'): + logger.debug(" Finder for '%s' has no find_spec", opslib) + continue + for lib_dir in lib_dirs: + spec_name = "{}.opslib.{}".format(top_dir, lib_dir) + spec = finder.find_spec(spec_name) + if spec is None: + logger.debug(" No spec for %r", spec_name) + continue + if spec.loader is None: + # a namespace package; not supported + logger.debug(" No loader for %r (probably a namespace package)", spec_name) + continue + + logger.debug(" Found %r", spec_name) + yield spec + + +# only the first this many lines of a file are looked at for the LIB* constants +_MAX_LIB_LINES = 99 +# these keys, with these types, are needed to have an opslib +_NEEDED_KEYS = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int} + + +def _join_and(keys: List[str]) -> str: + if len(keys) == 0: + return "" + if len(keys) == 1: + return keys[0] + return ", ".join(keys[:-1]) + ", and " + keys[-1] + + +class _Missing: + """Helper to get the difference between what was found and what was needed when logging.""" + + def __init__(self, found): + self._found = found + + def __str__(self): + exp = set(_NEEDED_KEYS) + got = set(self._found) + if len(got) == 0: + return "missing {}".format(_join_and(sorted(exp))) + return "got {}, but missing {}".format( + _join_and(sorted(got)), + _join_and(sorted(exp - got))) + + +def _parse_lib(spec): + if spec.origin is None: + # "can't happen" + logger.warning("No origin for %r (no idea why; please report)", spec.name) + return None + + logger.debug(" Parsing %r", spec.name) + + try: + with open(spec.origin, 'rt', encoding='utf-8') as f: + libinfo = {} + for n, line in enumerate(f): + if len(libinfo) == len(_NEEDED_KEYS): + break + if n > _MAX_LIB_LINES: + logger.debug( + " Missing opslib metadata after reading to line %d: %s", + _MAX_LIB_LINES, _Missing(libinfo)) + return None + m = _libline_re.match(line) + if m is None: + continue + key, value = m.groups() + if key in _NEEDED_KEYS: + value = literal_eval(value) + if not isinstance(value, _NEEDED_KEYS[key]): + logger.debug( + " Bad type for %s: expected %s, got %s", + key, _NEEDED_KEYS[key].__name__, type(value).__name__) + return None + libinfo[key] = value + else: + if len(libinfo) != len(_NEEDED_KEYS): + logger.debug( + " Missing opslib metadata after reading to end of file: %s", + _Missing(libinfo)) + return None + except Exception as e: + logger.debug(" Failed: %s", e) + return None + + lib = _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH']) + logger.debug(" Success: found library %s", lib) + + return lib + + +class _Lib: + + def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int): + self.spec = spec + self.name = name + self.author = author + self.api = api + self.patch = patch + + self._module = None + + def __repr__(self): + return "<_Lib {}>".format(self) + + def __str__(self): + return "{0.name} by {0.author}, API {0.api}, patch {0.patch}".format(self) + + def import_module(self) -> ModuleType: + if self._module is None: + module = module_from_spec(self.spec) + self.spec.loader.exec_module(module) + self._module = module + return self._module + + def __eq__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a == b + + def __lt__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a < b diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/log.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/log.py new file mode 100644 index 0000000000000000000000000000000000000000..b47013dd597c5500ea763d9e4beada10e6f2ca87 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/log.py @@ -0,0 +1,58 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Interface to emit messages to the Juju logging system.""" + +import sys +import logging + + +class JujuLogHandler(logging.Handler): + """A handler for sending logs to Juju via juju-log.""" + + def __init__(self, model_backend, level=logging.DEBUG): + super().__init__(level) + self.model_backend = model_backend + + def emit(self, record): + """Send the specified logging record to the Juju backend. + + This method is not used directly by the Operator Framework code, but by + :class:`logging.Handler` itself as part of the logging machinery. + """ + self.model_backend.juju_log(record.levelname, self.format(record)) + + +def setup_root_logging(model_backend, debug=False): + """Setup python logging to forward messages to juju-log. + + By default, logging is set to DEBUG level, and messages will be filtered by Juju. + Charmers can also set their own default log level with:: + + logging.getLogger().setLevel(logging.INFO) + + model_backend -- a ModelBackend to use for juju-log + debug -- if True, write logs to stderr as well as to juju-log. + """ + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(JujuLogHandler(model_backend)) + if debug: + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + sys.excepthook = lambda etype, value, tb: logger.error( + "Uncaught exception while in charm code:", exc_info=(etype, value, tb)) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/main.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/main.py new file mode 100644 index 0000000000000000000000000000000000000000..f18f88ae0eff807e065e5df778a16cd83ca560eb --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/main.py @@ -0,0 +1,406 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Main entry point to the Operator Framework.""" + +import inspect +import logging +import os +import shutil +import subprocess +import sys +import typing +import warnings +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model +import ops.storage + +from ops.log import setup_root_logging +from ops.jujuversion import JujuVersion + +CHARM_STATE_FILE = '.unit-state.db' + + +logger = logging.getLogger() + + +def _exe_path(path: Path) -> typing.Optional[Path]: + """Find and return the full path to the given binary. + + Here path is the absolute path to a binary, but might be missing an extension. + """ + p = shutil.which(path.name, mode=os.F_OK, path=str(path.parent)) + if p is None: + return None + return Path(p) + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path('{}/../../..'.format(__file__)).resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _create_event_link(charm, bound_event, link_to): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + link_to -- What the event link should point to + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError( + 'action event name {} needs _action suffix'.format(bound_event.event_kind)) + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError( + 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type)) + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + target_path = os.path.relpath(link_to, str(event_dir)) + + # Ignore the non-symlink files or directories + # assuming the charm author knows what they are doing. + logger.debug( + 'Creating a new relative symlink at %s pointing to %s', + event_path, target_path) + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file + which is determined by inspecting symlinks provided by the charm + author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + + """ + # XXX: on windows this function does not accomplish what it wants to: + # it creates symlinks with no extension pointing to a .py + # and juju only knows how to handle .exe, .bat, .cmd, and .ps1 + # so it does its job, but does not accomplish anything as the + # hooks aren't 'callable'. + link_to = os.path.realpath(os.environ.get("JUJU_DISPATCH_PATH", sys.argv[0])) + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event, link_to) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + logger.debug("Event %s not defined for %s.", event_name, charm) + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + logger.debug('Emitting Juju event %s.', event_name) + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name)) + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +class _Dispatcher: + """Encapsulate how to figure out what event Juju wants us to run. + + Also knows how to run “legacy” hooks when Juju called us via a top-level + ``dispatch`` binary. + + Args: + charm_dir: the toplevel directory of the charm + + Attributes: + event_name: the name of the event to run + is_dispatch_aware: are we running under a Juju that knows about the + dispatch binary, and is that binary present? + + """ + + def __init__(self, charm_dir: Path): + self._charm_dir = charm_dir + self._exec_path = Path(os.environ.get('JUJU_DISPATCH_PATH', sys.argv[0])) + + dispatch = charm_dir / 'dispatch' + if JujuVersion.from_environ().is_dispatch_aware() and _exe_path(dispatch) is not None: + self._init_dispatch() + else: + self._init_legacy() + + def ensure_event_links(self, charm): + """Make sure necessary symlinks are present on disk.""" + if self.is_dispatch_aware: + # links aren't needed + return + + # When a charm is force-upgraded and a unit is in an error state Juju + # does not run upgrade-charm and instead runs the failed hook followed + # by config-changed. Given the nature of force-upgrading the hook setup + # code is not triggered on config-changed. + # + # 'start' event is included as Juju does not fire the install event for + # K8s charms (see LP: #1854635). + if (self.event_name in ('install', 'start', 'upgrade_charm') + or self.event_name.endswith('_storage_attached')): + _setup_event_links(self._charm_dir, charm) + + def run_any_legacy_hook(self): + """Run any extant legacy hook. + + If there is both a dispatch file and a legacy hook for the + current event, run the wanted legacy hook. + """ + if not self.is_dispatch_aware: + # we *are* the legacy hook + return + + dispatch_path = _exe_path(self._charm_dir / self._dispatch_path) + if dispatch_path is None: + logger.debug("Legacy %s does not exist.", self._dispatch_path) + return + + # super strange that there isn't an is_executable + if not os.access(str(dispatch_path), os.X_OK): + logger.warning("Legacy %s exists but is not executable.", self._dispatch_path) + return + + if dispatch_path.resolve() == Path(sys.argv[0]).resolve(): + logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path) + return + + argv = sys.argv.copy() + argv[0] = str(dispatch_path) + logger.info("Running legacy %s.", self._dispatch_path) + try: + subprocess.run(argv, check=True) + except subprocess.CalledProcessError as e: + logger.warning("Legacy %s exited with status %d.", self._dispatch_path, e.returncode) + sys.exit(e.returncode) + except OSError as e: + logger.warning("Unable to run legacy %s: %s", self._dispatch_path, e) + sys.exit(1) + else: + logger.debug("Legacy %s exited with status 0.", self._dispatch_path) + + def _set_name_from_path(self, path: Path): + """Sets the name attribute to that which can be inferred from the given path.""" + name = path.name.replace('-', '_') + if path.parent.name == 'actions': + name = '{}_action'.format(name) + self.event_name = name + + def _init_legacy(self): + """Set up the 'legacy' dispatcher. + + The current Juju doesn't know about 'dispatch' and calls hooks + explicitly. + """ + self.is_dispatch_aware = False + self._set_name_from_path(self._exec_path) + + def _init_dispatch(self): + """Set up the new 'dispatch' dispatcher. + + The current Juju will run 'dispatch' if it exists, and otherwise fall + back to the old behaviour. + + JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install, + in both cases. + """ + self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH']) + + if 'OPERATOR_DISPATCH' in os.environ: + logger.debug("Charm called itself via %s.", self._dispatch_path) + sys.exit(0) + os.environ['OPERATOR_DISPATCH'] = '1' + + self.is_dispatch_aware = True + self._set_name_from_path(self._dispatch_path) + + def is_restricted_context(self): + """Return True if we are running in a restricted Juju context. + + When in a restricted context, most commands (relation-get, config-get, + state-get) are not available. As such, we change how we interact with + Juju. + """ + return self.event_name in ('collect_metrics',) + + +def _should_use_controller_storage(db_path: Path, meta: ops.charm.CharmMeta) -> bool: + """Figure out whether we want to use controller storage or not.""" + # if you've previously used local state, carry on using that + if db_path.exists(): + logger.debug("Using local storage: %s already exists", db_path) + return False + + # if you're not in k8s you don't need controller storage + if 'kubernetes' not in meta.series: + logger.debug("Using local storage: not a kubernetes charm") + return False + + # are we in a new enough Juju? + cur_version = JujuVersion.from_environ() + + if cur_version.has_controller_storage(): + logger.debug("Using controller storage: JUJU_VERSION=%s", cur_version) + return True + else: + logger.debug("Using local storage: JUJU_VERSION=%s", cur_version) + return False + + +def main(charm_class: ops.charm.CharmBase, use_juju_for_storage: bool = None): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + + Args: + charm_class: your charm class. + use_juju_for_storage: whether to use controller-side storage. If not specified + then kubernetes charms that haven't previously used local storage and that + are running on a new enough Juju default to controller-side storage, + otherwise local storage is used. + """ + charm_dir = _get_charm_dir() + + model_backend = ops.model._ModelBackend() + debug = ('JUJU_DEBUG' in os.environ) + setup_root_logging(model_backend, debug=debug) + logger.debug("Operator Framework %s up and running.", ops.__version__) + + dispatcher = _Dispatcher(charm_dir) + dispatcher.run_any_legacy_hook() + + metadata = (charm_dir / 'metadata.yaml').read_text() + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = actions_meta.read_text() + else: + actions_metadata = None + + if not yaml.__with_libyaml__: + logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader') + meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata) + model = ops.model.Model(meta, model_backend) + + charm_state_path = charm_dir / CHARM_STATE_FILE + + if use_juju_for_storage and not ops.storage.juju_backend_available(): + # raise an exception; the charm is broken and needs fixing. + msg = 'charm set use_juju_for_storage=True, but Juju version {} does not support it' + raise RuntimeError(msg.format(JujuVersion.from_environ())) + + if use_juju_for_storage is None: + use_juju_for_storage = _should_use_controller_storage(charm_state_path, meta) + + if use_juju_for_storage: + if dispatcher.is_restricted_context(): + # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event + # Though we eventually expect that juju will run collect-metrics in a + # non-restricted context. Once we can determine that we are running collect-metrics + # in a non-restricted context, we should fire the event as normal. + logger.debug('"%s" is not supported when using Juju for storage\n' + 'see: https://github.com/canonical/operator/issues/348', + dispatcher.event_name) + # Note that we don't exit nonzero, because that would cause Juju to rerun the hook + return + store = ops.storage.JujuStorage() + else: + store = ops.storage.SQLiteStorage(charm_state_path) + framework = ops.framework.Framework(store, charm_dir, meta, model) + framework.set_breakpointhook() + try: + sig = inspect.signature(charm_class) + try: + sig.bind(framework) + except TypeError: + msg = ( + "the second argument, 'key', has been deprecated and will be " + "removed after the 0.7 release") + warnings.warn(msg, DeprecationWarning) + charm = charm_class(framework, None) + else: + charm = charm_class(framework) + dispatcher.ensure_event_links(charm) + + # TODO: Remove the collect_metrics check below as soon as the relevant + # Juju changes are made. Also adjust the docstring on + # EventBase.defer(). + # + # Skip reemission of deferred events for collect-metrics events because + # they do not have the full access to all hook tools. + if not dispatcher.is_restricted_context(): + framework.reemit() + + _emit_charm_event(charm, dispatcher.event_name) + + framework.commit() + finally: + framework.close() diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/model.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/model.py new file mode 100644 index 0000000000000000000000000000000000000000..d446d63647807db570192c690a6d245244b8f19d --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/model.py @@ -0,0 +1,1314 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Representations of Juju's model, application, unit, and other entities.""" + +import datetime +import decimal +import ipaddress +import json +import os +import re +import shutil +import tempfile +import time +import typing +import weakref + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError +import yaml + +import ops +from ops.jujuversion import JujuVersion + + +if yaml.__with_libyaml__: + _DefaultDumper = yaml.CSafeDumper +else: + _DefaultDumper = yaml.SafeDumper + + +class Model: + """Represents the Juju Model as seen from this unit. + + This should not be instantiated directly by Charmers, but can be accessed as `self.model` + from any class that derives from Object. + """ + + def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'): + self._cache = _ModelCache(backend) + self._backend = backend + self._unit = self.get_unit(self._backend.unit_name) + self._relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self._config = ConfigData(self._backend) + self._resources = Resources(list(meta.resources), self._backend) + self._pod = Pod(self._backend) + self._storages = StorageMapping(list(meta.storages), self._backend) + self._bindings = BindingMapping(self._backend) + + @property + def unit(self) -> 'Unit': + """A :class:`Unit` that represents the unit that is running this code (eg yourself).""" + return self._unit + + @property + def app(self): + """A :class:`Application` that represents the application this unit is a part of.""" + return self._unit.app + + @property + def relations(self) -> 'RelationMapping': + """Mapping of endpoint to list of :class:`Relation`. + + Answers the question "what am I currently related to". + See also :meth:`.get_relation`. + """ + return self._relations + + @property + def config(self) -> 'ConfigData': + """Return a mapping of config for the current application.""" + return self._config + + @property + def resources(self) -> 'Resources': + """Access to resources for this charm. + + Use ``model.resources.fetch(resource_name)`` to get the path on disk + where the resource can be found. + """ + return self._resources + + @property + def storages(self) -> 'StorageMapping': + """Mapping of storage_name to :class:`Storage` as defined in metadata.yaml.""" + return self._storages + + @property + def pod(self) -> 'Pod': + """Use ``model.pod.set_spec`` to set the container specification for Kubernetes charms.""" + return self._pod + + @property + def name(self) -> str: + """Return the name of the Model that this unit is running in. + + This is read from the environment variable ``JUJU_MODEL_NAME``. + """ + return self._backend.model_name + + def get_unit(self, unit_name: str) -> 'Unit': + """Get an arbitrary unit by name. + + Internally this uses a cache, so asking for the same unit two times will + return the same object. + """ + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name: str) -> 'Application': + """Get an application by name. + + Internally this uses a cache, so asking for the same application two times will + return the same object. + """ + return self._cache.get(Application, app_name) + + def get_relation( + self, relation_name: str, + relation_id: typing.Optional[int] = None) -> 'Relation': + """Get a specific Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + + Args: + relation_name: The name of the endpoint for this charm + relation_id: An identifier for a specific relation. Used to disambiguate when a + given application has more than one relation on a given endpoint. + + Raises: + TooManyRelatedAppsError: is raised if there is more than one relation to the + supplied relation_name and no relation_id was supplied + """ + return self.relations._get_unique(relation_name, relation_id) + + def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a network space binding. + + Args: + binding_key: The relation name or instance to obtain bindings for. + + Returns: + If ``binding_key`` is a relation name, the method returns the default binding + for that relation. If a relation instance is provided, the method first looks + up a more specific binding for that specific relation ID, and if none is found + falls back to the default binding for the relation name. + """ + return self._bindings.get(binding_key) + + +class _ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + """Represents a named application in the model. + + This might be your application, or might be an application that you are related to. + Charmers should not instantiate Application objects directly, but should use + :meth:`Model.get_app` if they need a reference to a given application. + + Attributes: + name: The name of this application (eg, 'mysql'). This name may differ from the name of + the charm, if the user has deployed it to a different name. + """ + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of the overall application. + + Can only be read and set by the lead unit of the application. + + The status of remote units is always Unknown. + + Raises: + RuntimeError: if you try to set the status of another application, or if you try to + set the status of this application as a unit that is not the leader. + InvalidStatusError: if you try to set the status to something that is not a + :class:`StatusBase` + + Example:: + + self.model.app.status = BlockedStatus('I need a human to come help me') + """ + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for application {} status: {}'.format(self, value) + ) + + if not self._is_our_app: + raise RuntimeError('cannot to set status for a remote application {}'.format(self)) + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + +class Unit: + """Represents a named unit in the model. + + This might be your unit, another unit of your application, or a unit of another application + that you are related to. + + Attributes: + name: The name of the unit (eg, 'mysql/0') + app: The Application the unit is a part of. + """ + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of a specific unit. + + The status of any unit other than yourself is always Unknown. + + Raises: + RuntimeError: if you try to set the status of a unit other than yourself. + InvalidStatusError: if you try to set the status to something other than + a :class:`StatusBase` + Example:: + + self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators') + """ + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for unit {} status: {}'.format(self, value) + ) + + if not self._is_our_unit: + raise RuntimeError('cannot set status for a remote unit {}'.format(self)) + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + def is_leader(self) -> bool: + """Return whether this unit is the leader of its application. + + This can only be called for your own unit. + + Returns: + True if you are the leader, False otherwise + Raises: + RuntimeError: if called for a unit that is not yourself + """ + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError( + 'leadership status of remote units ({}) is not visible to other' + ' applications'.format(self) + ) + + def set_workload_version(self, version: str) -> None: + """Record the version of the software running as the workload. + + This shouldn't be confused with the revision of the charm. This is informative only; + shown in the output of 'juju status'. + """ + if not isinstance(version, str): + raise TypeError("workload version must be a str, not {}: {!r}".format( + type(version).__name__, version)) + self._backend.application_version_set(version) + + +class LazyMapping(Mapping, ABC): + """Represents a dict that isn't populated until it is accessed. + + Charm authors should generally never need to use this directly, but it forms + the basis for many of the dicts that the framework tracks. + """ + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def _invalidate(self): + self._lazy_data = None + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + def __repr__(self): + return repr(self._data) + + +class RelationMapping(Mapping): + """Map of relation names to lists of :class:`Relation` instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role.is_peer(): + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, + self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _invalidate(self, relation_name): + """Used to wipe the cache of a given relation_name. + + Not meant to be used by Charm authors. The content of relation data is + static for the lifetime of a hook, so it is safe to cache in memory once + accessed. + """ + self._data[relation_name] = None + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError('relation id {} must be int or None not {}'.format( + relation_id, + type(relation_id).__name__)) + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, + self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class BindingMapping: + """Mapping of endpoints to network bindings. + + Charm authors should not instantiate this directly, but access it via + :meth:`Model.get_binding` + """ + + def __init__(self, backend): + self._backend = backend + self._data = {} + + def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a specific Binding for an endpoint/relation. + + Not used directly by Charm authors. See :meth:`Model.get_binding` + """ + if isinstance(binding_key, Relation): + binding_name = binding_key.name + relation_id = binding_key.id + elif isinstance(binding_key, str): + binding_name = binding_key + relation_id = None + else: + raise ModelError('binding key must be str or relation instance, not {}' + ''.format(type(binding_key).__name__)) + binding = self._data.get(binding_key) + if binding is None: + binding = Binding(binding_name, relation_id, self._backend) + self._data[binding_key] = binding + return binding + + +class Binding: + """Binding to a network space. + + Attributes: + name: The name of the endpoint this binding represents (eg, 'db') + """ + + def __init__(self, name, relation_id, backend): + self.name = name + self._relation_id = relation_id + self._backend = backend + self._network = None + + @property + def network(self) -> 'Network': + """The network information for this binding.""" + if self._network is None: + try: + self._network = Network(self._backend.network_get(self.name, self._relation_id)) + except RelationNotFoundError: + if self._relation_id is None: + raise + # If a relation is dead, we can still get network info associated with an + # endpoint itself + self._network = Network(self._backend.network_get(self.name)) + return self._network + + +class Network: + """Network space details. + + Charm authors should not instantiate this directly, but should get access to the Network + definition from :meth:`Model.get_binding` and its ``network`` attribute. + + Attributes: + interfaces: A list of :class:`NetworkInterface` details. This includes the + information about how your application should be configured (eg, what + IP addresses should you bind to.) + Note that multiple addresses for a single interface are represented as multiple + interfaces. (eg, ``[NetworkInfo('ens1', '10.1.1.1/32'), + NetworkInfo('ens1', '10.1.2.1/32'])``) + ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP + addresses that other units should use to get in touch with you. + egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that + other units will see you connecting from. Due to things like NAT it isn't always + possible to narrow it down to a single address, but when it is clear, the CIDRs + will be constrained to a single address. (eg, 10.0.0.1/32) + Args: + network_info: A dict of network information as returned by ``network-get``. + """ + + def __init__(self, network_info: dict): + self.interfaces = [] + # Treat multiple addresses on an interface as multiple logical + # interfaces with the same name. + for interface_info in network_info.get('bind-addresses', []): + interface_name = interface_info.get('interface-name') + for address_info in interface_info.get('addresses', []): + self.interfaces.append(NetworkInterface(interface_name, address_info)) + self.ingress_addresses = [] + for address in network_info.get('ingress-addresses', []): + self.ingress_addresses.append(ipaddress.ip_address(address)) + self.egress_subnets = [] + for subnet in network_info.get('egress-subnets', []): + self.egress_subnets.append(ipaddress.ip_network(subnet)) + + @property + def bind_address(self): + """A single address that your application should bind() to. + + For the common case where there is a single answer. This represents a single + address from :attr:`.interfaces` that can be used to configure where your + application should bind() and listen(). + """ + if self.interfaces: + return self.interfaces[0].address + else: + return None + + @property + def ingress_address(self): + """The address other applications should use to connect to your unit. + + Due to things like public/private addresses, NAT and tunneling, the address you bind() + to is not always the address other people can use to connect() to you. + This is just the first address from :attr:`.ingress_addresses`. + """ + if self.ingress_addresses: + return self.ingress_addresses[0] + else: + return None + + +class NetworkInterface: + """Represents a single network interface that the charm needs to know about. + + Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding` + to get the network information for a given endpoint. + + Attributes: + name: The name of the interface (eg. 'eth0', or 'ens1') + subnet: An :class:`ipaddress.ip_network` representation of the IP for the network + interface. This may be a single address (eg '10.0.1.2/32') + """ + + def __init__(self, name: str, address_info: dict): + self.name = name + # TODO: expose a hardware address here, see LP: #1864070. + address = address_info.get('value') + # The value field may be empty. + if address: + self.address = ipaddress.ip_address(address) + else: + self.address = None + cidr = address_info.get('cidr') + # The cidr field may be empty, see LP: #1864102. + if cidr: + self.subnet = ipaddress.ip_network(cidr) + elif address: + # If we have an address, convert it to a /32 or /128 IP network. + self.subnet = ipaddress.ip_network(address) + else: + self.subnet = None + # TODO: expose a hostname/canonical name for the address here, see LP: #1864086. + + +class Relation: + """Represents an established relation between this application and another application. + + This class should not be instantiated directly, instead use :meth:`Model.get_relation` + or :attr:`ops.charm.RelationEvent.relation`. + + Attributes: + name: The name of the local endpoint of the relation (eg 'db') + id: The identifier for a particular relation (integer) + app: An :class:`Application` representing the remote application of this relation. + For peer relations this will be the local application. + units: A set of :class:`Unit` for units that have started and joined this relation. + data: A :class:`RelationData` holding the data buckets for each entity + of a relation. Accessed via eg Relation.data[unit]['foo'] + """ + + def __init__( + self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit, + backend: '_ModelBackend', cache: '_ModelCache'): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return '<{}.{} {}:{}>'.format(type(self).__module__, + type(self).__name__, + self.name, + self.id) + + +class RelationData(Mapping): + """Represents the various data buckets of a given relation. + + Each unit and application involved in a relation has their own data bucket. + Eg: ``{entity: RelationDataContent}`` + where entity can be either a :class:`Unit` or a :class:`Application`. + + Units can read and write their own data, and if they are the leader, + they can read and write their application data. They are allowed to read + remote unit and application data. + + This class should not be created directly. It should be accessed via + :attr:`Relation.data` + """ + + def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'): + self.relation = weakref.proxy(relation) + self._data = { + our_unit: RelationDataContent(self.relation, our_unit, backend), + our_unit.app: RelationDataContent(self.relation, our_unit.app, backend), + } + self._data.update({ + unit: RelationDataContent(self.relation, unit, backend) + for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app is not None: + self._data.update({ + self.relation.app: RelationDataContent(self.relation, self.relation.app, backend), + }) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + def __repr__(self): + return repr(self._data) + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + """Data content of a unit or application in a relation.""" + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + """Load the data from the current entity / relation.""" + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + """Return if the data content can be modified.""" + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on + # whether this unit is a leader or not, but this is not guaranteed + # to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError('cannot set relation data for {}'.format(self._entity.name)) + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an + # empty string will remove the key entirely from the relation data. + self._data.pop(key, None) + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty + # string will remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + """Configuration data. + + This class should not be created directly. It should be accessed via :attr:`Model.config`. + """ + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units. + + To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just + directly use the child class to indicate their status. + """ + + _statuses = {} + name = None + + def __init__(self, message: str): + self.message = message + + def __new__(cls, *args, **kwargs): + """Forbid the usage of StatusBase directly.""" + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + return super().__new__(cls) + + def __eq__(self, other): + if not isinstance(self, type(other)): + return False + return self.message == other.message + + def __repr__(self): + return "{.__class__.__name__}({!r})".format(self, self.message) + + @classmethod + def from_name(cls, name: str, message: str): + """Get the specific Status for the name (or UnknownStatus if not registered).""" + if name == 'unknown': + # unknown is special + return UnknownStatus() + else: + return cls._statuses[name](message) + + @classmethod + def register(cls, child): + """Register a Status for the child's name.""" + if child.name is None: + raise AttributeError('cannot register a Status which has no name') + cls._statuses[child.name] = child + return child + + +@StatusBase.register +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the + charm has not called status-set yet. + + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + def __repr__(self): + return "UnknownStatus()" + + +@StatusBase.register +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message: str = ''): + super().__init__(message) + + +@StatusBase.register +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +@StatusBase.register +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation + for providing those services. This is a "spinning" state, not an error state. It + reflects activity on the unit itself, not on peers or related units. + + """ + name = 'maintenance' + + +@StatusBase.register +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which + it is related is not running. + + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm.""" + + def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name: str) -> Path: + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError('invalid resource name: {}'.format(name)) + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + """Represents the definition of a pod spec in Kubernetes models. + + Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`. + """ + + def __init__(self, backend: '_ModelBackend'): + self._backend = backend + + def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None): + """Set the specification for pods that Juju should start in kubernetes. + + See `juju help-tool pod-spec-set` for details of what should be passed. + + Args: + spec: The mapping defining the pod specification + k8s_resources: Additional kubernetes specific specification. + + Returns: + None + """ + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key: str): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name: str) -> typing.List['Storage']: + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name: str, count: int = 1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via -storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(('cannot add storage {!r}:' + ' it is not present in the charm metadata').format(storage_name)) + self._backend.storage_add(storage_name, count) + + +class Storage: + """Represents a storage as defined in metadata.yaml. + + Attributes: + name: Simple string name of the storage + id: The provider id for storage + """ + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + """Return the location of the storage.""" + if self._location is None: + raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location") + self._location = Path(raw) + return self._location + + +class ModelError(Exception): + """Base class for exceptions raised when interacting with the Model.""" + pass + + +class TooManyRelatedAppsError(ModelError): + """Raised by :meth:`Model.get_relation` if there is more than one related application.""" + + def __init__(self, relation_name, num_related, max_supported): + super().__init__('Too many remote applications on {} ({} > {})'.format( + relation_name, num_related, max_supported)) + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid. + + This is raised if you're either trying to set a value to something that isn't a string, + or if you are trying to set a value in a bucket that you don't have access to. (eg, + another application/unit or setting your application data but you aren't the leader.) + """ + + +class RelationNotFoundError(ModelError): + """Backend error when querying juju for a given relation and that relation doesn't exist.""" + + +class InvalidStatusError(ModelError): + """Raised if trying to set an Application or Unit status to something invalid.""" + + +class _ModelBackend: + """Represents the connection between the Model representation and talking to Juju. + + Charm authors should not directly interact with the ModelBackend, it is a private + implementation of Model. + """ + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self, unit_name=None, model_name=None): + if unit_name is None: + self.unit_name = os.environ['JUJU_UNIT_NAME'] + else: + self.unit_name = unit_name + if model_name is None: + model_name = os.environ.get('JUJU_MODEL_NAME') + self.model_name = model_name + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = None + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE, check=True) + args = (shutil.which(args[0]),) + args[1:] + if use_json: + args += ('--format=json',) + try: + result = run(args, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), + return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'getting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-get', '-r', str(relation_id), '-', member_name] + if is_app: + args.append('--app') + + try: + return self._run(*args, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'setting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)] + if is_app: + args.append('--app') + + try: + return self._run(*args) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + if self._leader_check_time is None: + check = True + else: + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None) + if check: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.yaml' + with spec_path.open("wt", encoding="utf8") as f: + yaml.dump(spec, stream=f, Dumper=_DefaultDumper) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.yaml' + with k8s_res_path.open("wt", encoding="utf8") as f: + yaml.dump(k8s_resources, stream=f, Dumper=_DefaultDumper) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(str(tmpdir)) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + + Args: + is_app: A boolean indicating whether the status should be retrieved for a unit + or an application. + """ + content = self._run( + 'status-get', '--include-data', '--application={}'.format(is_app), + use_json=True, + return_output=True) + # Unit status looks like (in YAML): + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # Application status looks like (in YAML): + # application-status: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # units: + # uo/0: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + + if is_app: + return {'status': content['application-status']['status'], + 'message': content['application-status']['message']} + else: + return content + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + + Args: + status: The status to set. + message: The message to set in the status. + is_app: A boolean indicating whether the status should be set for a unit or an + application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', '--application={}'.format(is_app), status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, + return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, + return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError('storage count must be integer, got: {} ({})'.format(count, + type(count))) + self._run('storage-add', '{}={}'.format(name, count)) + + def action_get(self): + return self._run('action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()]) + + def action_log(self, message): + self._run('action-log', message) + + def action_fail(self, message=''): + self._run('action-fail', message) + + def application_version_set(self, version): + self._run('application-version-set', '--', version) + + def juju_log(self, level, message): + self._run('juju-log', '--log-level', level, "--", message) + + def network_get(self, binding_name, relation_id=None): + """Return network info provided by network-get for a given binding. + + Args: + binding_name: A name of a binding (relation name or extra-binding name). + relation_id: An optional relation id to get network info for. + """ + cmd = ['network-get', binding_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def add_metrics(self, metrics, labels=None): + cmd = ['add-metric'] + + if labels: + label_args = [] + for k, v in labels.items(): + _ModelBackendValidator.validate_metric_label(k) + _ModelBackendValidator.validate_label_value(k, v) + label_args.append('{}={}'.format(k, v)) + cmd.extend(['--labels', ','.join(label_args)]) + + metric_args = [] + for k, v in metrics.items(): + _ModelBackendValidator.validate_metric_key(k) + metric_value = _ModelBackendValidator.format_metric_value(v) + metric_args.append('{}={}'.format(k, metric_value)) + cmd.extend(metric_args) + self._run(*cmd) + + +class _ModelBackendValidator: + """Provides facilities for validating inputs and formatting them for model backends.""" + + METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$') + + @classmethod + def validate_metric_key(cls, key): + if cls.METRIC_KEY_REGEX.match(key) is None: + raise ModelError( + 'invalid metric key {!r}: must match {}'.format( + key, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def validate_metric_label(cls, label_name): + if cls.METRIC_KEY_REGEX.match(label_name) is None: + raise ModelError( + 'invalid metric label name {!r}: must match {}'.format( + label_name, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def format_metric_value(cls, value): + try: + decimal_value = decimal.Decimal.from_float(value) + except TypeError as e: + e2 = ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + raise e2 from e + if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0: + raise ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + return str(decimal_value) + + @classmethod + def validate_label_value(cls, label, value): + # Label values cannot be empty, contain commas or equal signs as those are + # used by add-metric as separators. + if not value: + raise ModelError( + 'metric label {} has an empty value, which is not allowed'.format(label)) + v = str(value) + if re.search('[,=]', v) is not None: + raise ModelError( + 'metric label values must not contain "," or "=": {}={!r}'.format(label, value)) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/storage.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/storage.py new file mode 100644 index 0000000000000000000000000000000000000000..562cde770bcc3b5961aa6086372f0a2529bbd317 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/storage.py @@ -0,0 +1,374 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Structures to offer storage to the charm (through Juju or locally).""" + +from datetime import timedelta +import pickle +import shutil +import subprocess +import sqlite3 +import typing + +import yaml + + +def _run(args, **kw): + cmd = shutil.which(args[0]) + if cmd is None: + raise FileNotFoundError(args[0]) + return subprocess.run([cmd, *args[1:]], **kw) + + +class SQLiteStorage: + """Storage using SQLite backend.""" + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit + # transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), + isolation_level=None, + timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + """Make the database ready to be used as storage.""" + # Make sure that the database is locked until the connection is closed, + # not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute(''' + CREATE TABLE notice ( + sequence INTEGER PRIMARY KEY AUTOINCREMENT, + event_path TEXT, + observer_path TEXT, + method_name TEXT) + ''') + self._db.commit() + + def close(self): + """Part of the Storage API, close the storage backend.""" + self._db.close() + + def commit(self): + """Part of the Storage API, commit latest changes in the storage backend.""" + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + """Part of the Storage API, persist a snapshot data under the given handle. + + Args: + handle_path: The string identifying the snapshot. + snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This + might be a dict/tuple/int, but must only contain 'simple' python types. + """ + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(snapshot_data) + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data)) + + def load_snapshot(self, handle_path: str) -> typing.Any: + """Part of the Storage API, retrieve a snapshot that was previously saved. + + Args: + handle_path: The string identifying the snapshot. + + Raises: + NoSnapshotError: if there is no snapshot for the given handle_path. + """ + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return pickle.loads(row[0]) + raise NoSnapshotError(handle_path) + + def drop_snapshot(self, handle_path: str): + """Part of the Storage API, remove a snapshot that was previously saved. + + Dropping a snapshot that doesn't exist is treated as a no-op. + """ + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def list_snapshots(self) -> typing.Generator[str, None, None]: + """Return the name of all snapshots that are currently saved.""" + c = self._db.cursor() + c.execute("SELECT handle FROM snapshot") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield row[0] + + def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, record an notice (event and observer).""" + self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)', + (event_path, observer_path, method_name)) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, remove a notice that was previously recorded.""" + self._db.execute(''' + DELETE FROM notice + WHERE event_path=? + AND observer_path=? + AND method_name=? + ''', (event_path, observer_path, method_name)) + + def notices(self, event_path: str = None) ->\ + typing.Generator[typing.Tuple[str, str, str], None, None]: + """Part of the Storage API, return all notices that begin with event_path. + + Args: + event_path: If supplied, will only yield events that match event_path. If not + supplied (or None/'') will return all events. + + Returns: + Iterable of (event_path, observer_path, method_name) tuples + """ + if event_path: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + WHERE event_path=? + ORDER BY sequence + ''', (event_path,)) + else: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + ORDER BY sequence + ''') + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class JujuStorage: + """Storing the content tracked by the Framework in Juju. + + This uses :class:`_JujuStorageBackend` to interact with state-get/state-set + as the way to store state for the framework and for components. + """ + + NOTICE_KEY = "#notices#" + + def __init__(self, backend: '_JujuStorageBackend' = None): + self._backend = backend + if backend is None: + self._backend = _JujuStorageBackend() + + def close(self): + """Part of the Storage API, close the storage backend. + + Nothing to be done for Juju backend, as it's transactional. + """ + + def commit(self): + """Part of the Storage API, commit latest changes in the storage backend. + + Nothing to be done for Juju backend, as it's transactional. + """ + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + """Part of the Storage API, persist a snapshot data under the given handle. + + Args: + handle_path: The string identifying the snapshot. + snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This + might be a dict/tuple/int, but must only contain 'simple' python types. + """ + self._backend.set(handle_path, snapshot_data) + + def load_snapshot(self, handle_path): + """Part of the Storage API, retrieve a snapshot that was previously saved. + + Args: + handle_path: The string identifying the snapshot. + + Raises: + NoSnapshotError: if there is no snapshot for the given handle_path. + """ + try: + content = self._backend.get(handle_path) + except KeyError: + raise NoSnapshotError(handle_path) + return content + + def drop_snapshot(self, handle_path): + """Part of the Storage API, remove a snapshot that was previously saved. + + Dropping a snapshot that doesn't exist is treated as a no-op. + """ + self._backend.delete(handle_path) + + def save_notice(self, event_path: str, observer_path: str, method_name: str): + """Part of the Storage API, record an notice (event and observer).""" + notice_list = self._load_notice_list() + notice_list.append([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str): + """Part of the Storage API, remove a notice that was previously recorded.""" + notice_list = self._load_notice_list() + notice_list.remove([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def notices(self, event_path: str = None): + """Part of the Storage API, return all notices that begin with event_path. + + Args: + event_path: If supplied, will only yield events that match event_path. If not + supplied (or None/'') will return all events. + + Returns: + Iterable of (event_path, observer_path, method_name) tuples + """ + notice_list = self._load_notice_list() + for row in notice_list: + if event_path and row[0] != event_path: + continue + yield tuple(row) + + def _load_notice_list(self) -> typing.List[typing.Tuple[str]]: + """Load a notice list from current key. + + Returns: + List of (event_path, observer_path, method_name) tuples; empty if no key or is None. + """ + try: + notice_list = self._backend.get(self.NOTICE_KEY) + except KeyError: + return [] + if notice_list is None: + return [] + return notice_list + + def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None: + """Save a notice list under current key. + + Args: + notices: List of (event_path, observer_path, method_name) tuples. + """ + self._backend.set(self.NOTICE_KEY, notices) + + +class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)): + """Handle a couple basic python types. + + yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one + that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just + subclass SafeLoader and add tuples back in. + """ + # Taken from the example at: + # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml + + construct_python_tuple = yaml.Loader.construct_python_tuple + + +_SimpleLoader.add_constructor( + u'tag:yaml.org,2002:python/tuple', + _SimpleLoader.construct_python_tuple) + + +class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)): + """Add types supported by 'marshal'. + + YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So + we want to only support dumping out types that are safe to load. + """ + + +_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple +_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple) + + +def juju_backend_available() -> bool: + """Check if Juju state storage is available.""" + p = shutil.which('state-get') + return p is not None + + +class _JujuStorageBackend: + """Implements the interface from the Operator framework to Juju's state-get/set/etc.""" + + def set(self, key: str, value: typing.Any) -> None: + """Set a key to a given value. + + Args: + key: The string key that will be used to find the value later + value: Arbitrary content that will be returned by get(). + + Raises: + CalledProcessError: if 'state-set' returns an error code. + """ + # default_flow_style=None means that it can use Block for + # complex types (types that have nested types) but use flow + # for simple types (like an array). Not all versions of PyYAML + # have the same default style. + encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None) + content = yaml.dump( + {key: encoded_value}, encoding='utf8', default_style='|', + default_flow_style=False, + Dumper=_SimpleDumper) + _run(["state-set", "--file", "-"], input=content, check=True) + + def get(self, key: str) -> typing.Any: + """Get the bytes value associated with a given key. + + Args: + key: The string key that will be used to find the value + Raises: + CalledProcessError: if 'state-get' returns an error code. + """ + # We don't capture stderr here so it can end up in debug logs. + p = _run(["state-get", key], stdout=subprocess.PIPE, check=True, universal_newlines=True) + if p.stdout == '' or p.stdout == '\n': + raise KeyError(key) + return yaml.load(p.stdout, Loader=_SimpleLoader) + + def delete(self, key: str) -> None: + """Remove a key from being tracked. + + Args: + key: The key to stop storing + Raises: + CalledProcessError: if 'state-delete' returns an error code. + """ + _run(["state-delete", key], check=True) + + +class NoSnapshotError(Exception): + """Exception to flag that there is no snapshot for the given handle_path.""" + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return 'no snapshot data found for {} object'.format(self.handle_path) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/testing.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/testing.py new file mode 100644 index 0000000000000000000000000000000000000000..e70bc98ff661b51cf45c3085ce6a29809a7b3110 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/testing.py @@ -0,0 +1,826 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Infrastructure to build unittests for Charms using the Operator Framework.""" + +import inspect +import pathlib +import random +import tempfile +import typing +import yaml +from contextlib import contextmanager +from textwrap import dedent + +from ops import ( + charm, + framework, + model, + storage, +) + + +# OptionalYAML is something like metadata.yaml or actions.yaml. You can +# pass in a file-like object or the string directly. +OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]] + + +# noinspection PyProtectedMember +class Harness: + """This class represents a way to build up the model that will drive a test suite. + + The model that is created is from the viewpoint of the charm that you are testing. + + Example:: + + harness = Harness(MyCharm) + # Do initial setup here + relation_id = harness.add_relation('db', 'postgresql') + # Now instantiate the charm to see events as the model changes + harness.begin() + harness.add_relation_unit(relation_id, 'postgresql/0') + harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) + # Check that charm has properly handled the relation_joined event for postgresql/0 + self.assertEqual(harness.charm. ...) + + Args: + charm_cls: The Charm class that you'll be testing. + meta: charm.CharmBase is a A string or file-like object containing the contents of + metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the + parent directory of the Charm, and if not found fall back to a trivial + 'name: test-charm' metadata. + actions: A string or file-like object containing the contents of + actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the + parent directory of the Charm. + config: A string or file-like object containing the contents of + config.yaml. If not supplied, we will look for a 'config.yaml' file in the + parent directory of the Charm. + """ + + def __init__( + self, + charm_cls: typing.Type[charm.CharmBase], + *, + meta: OptionalYAML = None, + actions: OptionalYAML = None, + config: OptionalYAML = None): + self._charm_cls = charm_cls + self._charm = None + self._charm_dir = 'no-disk-path' # this may be updated by _create_meta + self._meta = self._create_meta(meta, actions) + self._unit_name = self._meta.name + '/0' + self._framework = None + self._hooks_enabled = True + self._relation_id_counter = 0 + self._backend = _TestingModelBackend(self._unit_name, self._meta) + self._model = model.Model(self._meta, self._backend) + self._storage = storage.SQLiteStorage(':memory:') + self._oci_resources = {} + self._framework = framework.Framework( + self._storage, self._charm_dir, self._meta, self._model) + self._update_config(key_values=self._load_config_defaults(config)) + + @property + def charm(self) -> charm.CharmBase: + """Return the instance of the charm class that was passed to __init__. + + Note that the Charm is not instantiated until you have called + :meth:`.begin()`. + """ + return self._charm + + @property + def model(self) -> model.Model: + """Return the :class:`~ops.model.Model` that is being driven by this Harness.""" + return self._model + + @property + def framework(self) -> framework.Framework: + """Return the Framework that is being driven by this Harness.""" + return self._framework + + def begin(self) -> None: + """Instantiate the Charm and start handling events. + + Before calling :meth:`begin`, there is no Charm instance, so changes to the Model won't + emit events. You must call :meth:`.begin` before :attr:`.charm` is valid. + """ + if self._charm is not None: + raise RuntimeError('cannot call the begin method on the harness more than once') + + # The Framework adds attributes to class objects for events, etc. As such, we can't re-use + # the original class against multiple Frameworks. So create a locally defined class + # and register it. + # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of + # Class attributes which should clean up this ugliness. The API can stay the same + class TestEvents(self._charm_cls.on.__class__): + pass + + TestEvents.__name__ = self._charm_cls.on.__class__.__name__ + + class TestCharm(self._charm_cls): + on = TestEvents() + + # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo, + # rather than TestCharm has no attribute foo. + TestCharm.__name__ = self._charm_cls.__name__ + self._charm = TestCharm(self._framework) + + def begin_with_initial_hooks(self) -> None: + """Called when you want the Harness to fire the same hooks that Juju would fire at startup. + + This triggers install, relation-created, config-changed, start, and any relation-joined + hooks. Based on what relations have been defined before you called begin(). + Note that all of these are fired before returning control to the test suite, so if you + want to introspect what happens at each step, you need to fire them directly + (eg Charm.on.install.emit()). + + To use this with all the normal hooks, you should instantiate the harness, setup any + relations that you want active when the charm starts, and then call this method. + + Example:: + + harness = Harness(MyCharm) + # Do initial setup here + relation_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(relation_id, 'postgresql/0') + harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) + harness.set_leader(True) + harness.update_config({'initial': 'config'}) + harness.begin_with_initial_hooks() + # This will cause + # install, db-relation-created('postgresql'), leader-elected, config-changed, start + # db-relation-joined('postrgesql/0'), db-relation-changed('postgresql/0') + # To be fired. + """ + self.begin() + # TODO: jam 2020-08-03 This should also handle storage-attached hooks once we have support + # for dealing with storage. + self._charm.on.install.emit() + # Juju itself iterates what relation to fire based on a map[int]relation, so it doesn't + # guarantee a stable ordering between relation events. It *does* give a stable ordering + # of joined units for a given relation. + items = list(self._meta.relations.items()) + random.shuffle(items) + this_app_name = self._meta.name + for relname, rel_meta in items: + if rel_meta.role == charm.RelationRole.peer: + # If the user has directly added a relation, leave it be, but otherwise ensure + # that peer relations are always established at before leader-elected. + rel_ids = self._backend._relation_ids_map.get(relname) + if rel_ids is None: + self.add_relation(relname, self._meta.name) + else: + random.shuffle(rel_ids) + for rel_id in rel_ids: + self._emit_relation_created(relname, rel_id, this_app_name) + else: + rel_ids = self._backend._relation_ids_map.get(relname, []) + random.shuffle(rel_ids) + for rel_id in rel_ids: + app_name = self._backend._relation_app_and_units[rel_id]["app"] + self._emit_relation_created(relname, rel_id, app_name) + if self._backend._is_leader: + self._charm.on.leader_elected.emit() + else: + self._charm.on.leader_settings_changed.emit() + self._charm.on.config_changed.emit() + self._charm.on.start.emit() + all_ids = list(self._backend._relation_names.items()) + random.shuffle(all_ids) + for rel_id, rel_name in all_ids: + rel_app_and_units = self._backend._relation_app_and_units[rel_id] + app_name = rel_app_and_units["app"] + # Note: Juju *does* fire relation events for a given relation in the sorted order of + # the unit names. It also always fires relation-changed immediately after + # relation-joined for the same unit. + # Juju only fires relation-changed (app) if there is data for the related application + relation = self._model.get_relation(rel_name, rel_id) + if self._backend._relation_data[rel_id].get(app_name): + app = self._model.get_app(app_name) + self._charm.on[rel_name].relation_changed.emit( + relation, app, None) + for unit_name in sorted(rel_app_and_units["units"]): + remote_unit = self._model.get_unit(unit_name) + self._charm.on[rel_name].relation_joined.emit( + relation, remote_unit.app, remote_unit) + self._charm.on[rel_name].relation_changed.emit( + relation, remote_unit.app, remote_unit) + + def cleanup(self) -> None: + """Called by your test infrastructure to cleanup any temporary directories/files/etc. + + Currently this only needs to be called if you test with resources. But it is reasonable + to always include a `testcase.addCleanup(harness.cleanup)` just in case. + """ + self._backend._cleanup() + + def _create_meta(self, charm_metadata, action_metadata): + """Create a CharmMeta object. + + Handle the cases where a user doesn't supply explicit metadata snippets. + """ + filename = inspect.getfile(self._charm_cls) + charm_dir = pathlib.Path(filename).parents[1] + + if charm_metadata is None: + metadata_path = charm_dir / 'metadata.yaml' + if metadata_path.is_file(): + charm_metadata = metadata_path.read_text() + self._charm_dir = charm_dir + else: + # The simplest of metadata that the framework can support + charm_metadata = 'name: test-charm' + elif isinstance(charm_metadata, str): + charm_metadata = dedent(charm_metadata) + + if action_metadata is None: + actions_path = charm_dir / 'actions.yaml' + if actions_path.is_file(): + action_metadata = actions_path.read_text() + self._charm_dir = charm_dir + elif isinstance(action_metadata, str): + action_metadata = dedent(action_metadata) + + return charm.CharmMeta.from_yaml(charm_metadata, action_metadata) + + def _load_config_defaults(self, charm_config): + """Load default values from config.yaml. + + Handle the case where a user doesn't supply explicit config snippets. + """ + filename = inspect.getfile(self._charm_cls) + charm_dir = pathlib.Path(filename).parents[1] + + if charm_config is None: + config_path = charm_dir / 'config.yaml' + if config_path.is_file(): + charm_config = config_path.read_text() + self._charm_dir = charm_dir + else: + # The simplest of config that the framework can support + charm_config = '{}' + elif isinstance(charm_config, str): + charm_config = dedent(charm_config) + charm_config = yaml.load(charm_config, Loader=yaml.SafeLoader) + charm_config = charm_config.get('options', {}) + return {key: value['default'] for key, value in charm_config.items() + if 'default' in value} + + def add_oci_resource(self, resource_name: str, + contents: typing.Mapping[str, str] = None) -> None: + """Add oci resources to the backend. + + This will register an oci resource and create a temporary file for processing metadata + about the resource. A default set of values will be used for all the file contents + unless a specific contents dict is provided. + + Args: + resource_name: Name of the resource to add custom contents to. + contents: Optional custom dict to write for the named resource. + """ + if not contents: + contents = {'registrypath': 'registrypath', + 'username': 'username', + 'password': 'password', + } + if resource_name not in self._meta.resources.keys(): + raise RuntimeError('Resource {} is not a defined resources'.format(resource_name)) + if self._meta.resources[resource_name].type != "oci-image": + raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name)) + + as_yaml = yaml.dump(contents, Dumper=yaml.SafeDumper) + self._backend._resources_map[resource_name] = ('contents.yaml', as_yaml) + + def add_resource(self, resource_name: str, content: typing.AnyStr) -> None: + """Add content for a resource to the backend. + + This will register the content, so that a call to `Model.resources.fetch(resource_name)` + will return a path to a file containing that content. + + Args: + resource_name: The name of the resource being added + content: Either string or bytes content, which will be the content of the filename + returned by resource-get. If contents is a string, it will be encoded in utf-8 + """ + if resource_name not in self._meta.resources.keys(): + raise RuntimeError('Resource {} is not a defined resources'.format(resource_name)) + record = self._meta.resources[resource_name] + if record.type != "file": + raise RuntimeError( + 'Resource {} is not a file, but actually {}'.format(resource_name, record.type)) + filename = record.filename + if filename is None: + filename = resource_name + + self._backend._resources_map[resource_name] = (filename, content) + + def populate_oci_resources(self) -> None: + """Populate all OCI resources.""" + for name, data in self._meta.resources.items(): + if data.type == "oci-image": + self.add_oci_resource(name) + + def disable_hooks(self) -> None: + """Stop emitting hook events when the model changes. + + This can be used by developers to stop changes to the model from emitting events that + the charm will react to. Call :meth:`.enable_hooks` + to re-enable them. + """ + self._hooks_enabled = False + + def enable_hooks(self) -> None: + """Re-enable hook events from charm.on when the model is changed. + + By default hook events are enabled once you call :meth:`.begin`, + but if you have used :meth:`.disable_hooks`, this can be used to + enable them again. + """ + self._hooks_enabled = True + + @contextmanager + def hooks_disabled(self): + """A context manager to run code with hooks disabled. + + Example:: + + with harness.hooks_disabled(): + # things in here don't fire events + harness.set_leader(True) + harness.update_config(unset=['foo', 'bar']) + # things here will again fire events + """ + if self._hooks_enabled: + self.disable_hooks() + try: + yield None + finally: + self.enable_hooks() + else: + yield None + + def _next_relation_id(self): + rel_id = self._relation_id_counter + self._relation_id_counter += 1 + return rel_id + + def add_relation(self, relation_name: str, remote_app: str) -> int: + """Declare that there is a new relation between this app and `remote_app`. + + Args: + relation_name: The relation on Charm that is being related to + remote_app: The name of the application that is being related to + + Return: + The relation_id created by this add_relation. + """ + rel_id = self._next_relation_id() + self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id) + self._backend._relation_names[rel_id] = relation_name + self._backend._relation_list_map[rel_id] = [] + self._backend._relation_data[rel_id] = { + remote_app: {}, + self._backend.unit_name: {}, + self._backend.app_name: {}, + } + self._backend._relation_app_and_units[rel_id] = { + "app": remote_app, + "units": [], + } + # Reload the relation_ids list + if self._model is not None: + self._model.relations._invalidate(relation_name) + self._emit_relation_created(relation_name, rel_id, remote_app) + return rel_id + + def _emit_relation_created(self, relation_name: str, relation_id: int, + remote_app: str) -> None: + """Trigger relation-created for a given relation with a given remote application.""" + if self._charm is None or not self._hooks_enabled: + return + if self._charm is None or not self._hooks_enabled: + return + relation = self._model.get_relation(relation_name, relation_id) + app = self._model.get_app(remote_app) + self._charm.on[relation_name].relation_created.emit( + relation, app) + + def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: + """Add a new unit to a relation. + + Example:: + + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + + This will trigger a `relation_joined` event. This would naturally be + followed by a `relation_changed` event, which you can trigger with + :meth:`.update_relation_data`. This separation is artificial in the + sense that Juju will always fire the two, but is intended to make + testing relations and their data bags slightly more natural. + + Args: + relation_id: The integer relation identifier (as returned by add_relation). + remote_unit_name: A string representing the remote unit that is being added. + + Return: + None + """ + self._backend._relation_list_map[relation_id].append(remote_unit_name) + self._backend._relation_data[relation_id][remote_unit_name] = {} + # TODO: jam 2020-08-03 This is where we could assert that the unit name matches the + # application name (eg you don't have a relation to 'foo' but add units of 'bar/0' + self._backend._relation_app_and_units[relation_id]["units"].append(remote_unit_name) + relation_name = self._backend._relation_names[relation_id] + # Make sure that the Model reloads the relation_list for this relation_id, as well as + # reloading the relation data for this unit. + if self._model is not None: + remote_unit = self._model.get_unit(remote_unit_name) + relation = self._model.get_relation(relation_name, relation_id) + unit_cache = relation.data.get(remote_unit, None) + if unit_cache is not None: + unit_cache._invalidate() + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return + self._charm.on[relation_name].relation_joined.emit( + relation, remote_unit.app, remote_unit) + + def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping: + """Get the relation data bucket for a single app or unit in a given relation. + + This ignores all of the safety checks of who can and can't see data in relations (eg, + non-leaders can't read their own application's relation data because there are no events + that keep that data up-to-date for the unit). + + Args: + relation_id: The relation whose content we want to look at. + app_or_unit: The name of the application or unit whose data we want to read + Return: + a dict containing the relation data for `app_or_unit` or None. + + Raises: + KeyError: if relation_id doesn't exist + """ + return self._backend._relation_data[relation_id].get(app_or_unit, None) + + def get_pod_spec(self) -> (typing.Mapping, typing.Mapping): + """Return the content of the pod spec as last set by the charm. + + This returns both the pod spec and any k8s_resources that were supplied. + See the signature of Model.pod.set_spec + """ + return self._backend._pod_spec + + def get_workload_version(self) -> str: + """Read the workload version that was set by the unit.""" + return self._backend._workload_version + + def set_model_name(self, name: str) -> None: + """Set the name of the Model that this is representing. + + This cannot be called once begin() has been called. But it lets you set the value that + will be returned by Model.name. + """ + if self._charm is not None: + raise RuntimeError('cannot set the Model name after begin()') + self._backend.model_name = name + + def update_relation_data( + self, + relation_id: int, + app_or_unit: str, + key_values: typing.Mapping, + ) -> None: + """Update the relation data for a given unit or application in a given relation. + + This also triggers the `relation_changed` event for this relation_id. + + Args: + relation_id: The integer relation_id representing this relation. + app_or_unit: The unit or application name that is being updated. + This can be the local or remote application. + key_values: Each key/value will be updated in the relation data. + """ + relation_name = self._backend._relation_names[relation_id] + relation = self._model.get_relation(relation_name, relation_id) + if '/' in app_or_unit: + entity = self._model.get_unit(app_or_unit) + else: + entity = self._model.get_app(app_or_unit) + rel_data = relation.data.get(entity, None) + if rel_data is not None: + # rel_data may have cached now-stale data, so _invalidate() it. + # Note, this won't cause the data to be loaded if it wasn't already. + rel_data._invalidate() + + new_values = self._backend._relation_data[relation_id][app_or_unit].copy() + for k, v in key_values.items(): + if v == '': + new_values.pop(k, None) + else: + new_values[k] = v + self._backend._relation_data[relation_id][app_or_unit] = new_values + + if app_or_unit == self._model.unit.name: + # No events for our own unit + return + if app_or_unit == self._model.app.name: + # updating our own app only generates an event if it is a peer relation and we + # aren't the leader + is_peer = self._meta.relations[relation_name].role.is_peer() + if not is_peer: + return + if self._model.unit.is_leader(): + return + self._emit_relation_changed(relation_id, app_or_unit) + + def _emit_relation_changed(self, relation_id, app_or_unit): + if self._charm is None or not self._hooks_enabled: + return + rel_name = self._backend._relation_names[relation_id] + relation = self.model.get_relation(rel_name, relation_id) + if '/' in app_or_unit: + app_name = app_or_unit.split('/')[0] + unit_name = app_or_unit + app = self.model.get_app(app_name) + unit = self.model.get_unit(unit_name) + args = (relation, app, unit) + else: + app_name = app_or_unit + app = self.model.get_app(app_name) + args = (relation, app) + self._charm.on[rel_name].relation_changed.emit(*args) + + def _update_config( + self, + key_values: typing.Mapping[str, str] = None, + unset: typing.Iterable[str] = (), + ) -> None: + """Update the config as seen by the charm. + + This will *not* trigger a `config_changed` event, and is intended for internal use. + + Note that the `key_values` mapping will only add or update configuration items. + To remove existing ones, see the `unset` parameter. + + Args: + key_values: A Mapping of key:value pairs to update in config. + unset: An iterable of keys to remove from Config. (Note that this does + not currently reset the config values to the default defined in config.yaml.) + """ + # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config + # is a LazyMapping, but its _load returns a dict and this method mutates + # the dict that Config is caching. Arguably we should be doing some sort + # of charm.framework.model.config._invalidate() + config = self._backend._config + if key_values is not None: + for key, value in key_values.items(): + config[key] = value + for key in unset: + config.pop(key, None) + + def update_config( + self, + key_values: typing.Mapping[str, str] = None, + unset: typing.Iterable[str] = (), + ) -> None: + """Update the config as seen by the charm. + + This will trigger a `config_changed` event. + + Note that the `key_values` mapping will only add or update configuration items. + To remove existing ones, see the `unset` parameter. + + Args: + key_values: A Mapping of key:value pairs to update in config. + unset: An iterable of keys to remove from Config. (Note that this does + not currently reset the config values to the default defined in config.yaml.) + """ + self._update_config(key_values, unset) + if self._charm is None or not self._hooks_enabled: + return + self._charm.on.config_changed.emit() + + def set_leader(self, is_leader: bool = True) -> None: + """Set whether this unit is the leader or not. + + If this charm becomes a leader then `leader_elected` will be triggered. + + Args: + is_leader: True/False as to whether this unit is the leader. + """ + was_leader = self._backend._is_leader + self._backend._is_leader = is_leader + # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in + # the Model objects, so this automatically gets noticed. + if is_leader and not was_leader and self._charm is not None and self._hooks_enabled: + self._charm.on.leader_elected.emit() + + def _get_backend_calls(self, reset: bool = True) -> list: + """Return the calls that we have made to the TestingModelBackend. + + This is useful mostly for testing the framework itself, so that we can assert that we + do/don't trigger extra calls. + + Args: + reset: If True, reset the calls list back to empty, if false, the call list is + preserved. + + Return: + ``[(call1, args...), (call2, args...)]`` + """ + calls = self._backend._calls.copy() + if reset: + self._backend._calls.clear() + return calls + + +def _record_calls(cls): + """Replace methods on cls with methods that record that they have been called. + + Iterate all attributes of cls, and for public methods, replace them with a wrapped method + that records the method called along with the arguments and keyword arguments. + """ + for meth_name, orig_method in cls.__dict__.items(): + if meth_name.startswith('_'): + continue + + def decorator(orig_method): + def wrapped(self, *args, **kwargs): + full_args = (orig_method.__name__,) + args + if kwargs: + full_args = full_args + (kwargs,) + self._calls.append(full_args) + return orig_method(self, *args, **kwargs) + return wrapped + + setattr(cls, meth_name, decorator(orig_method)) + return cls + + +class _ResourceEntry: + """Tracks the contents of a Resource.""" + + def __init__(self, resource_name): + self.name = resource_name + + +@_record_calls +class _TestingModelBackend: + """This conforms to the interface for ModelBackend but provides canned data. + + DO NOT use this class directly, it is used by `Harness`_ to drive the model. + `Harness`_ is responsible for maintaining the internal consistency of the values here, + as the only public methods of this type are for implementing ModelBackend. + """ + + def __init__(self, unit_name, meta): + self.unit_name = unit_name + self.app_name = self.unit_name.split('/')[0] + self.model_name = None + self._calls = [] + self._meta = meta + self._is_leader = None + self._relation_ids_map = {} # relation name to [relation_ids,...] + self._relation_names = {} # reverse map from relation_id to relation_name + self._relation_list_map = {} # relation_id: [unit_name,...] + self._relation_data = {} # {relation_id: {name: data}} + # {relation_id: {"app": app_name, "units": ["app/0",...]} + self._relation_app_and_units = {} + self._config = {} + self._is_leader = False + self._resources_map = {} # {resource_name: resource_content} + self._pod_spec = None + self._app_status = {'status': 'unknown', 'message': ''} + self._unit_status = {'status': 'maintenance', 'message': ''} + self._workload_version = None + self._resource_dir = None + + def _cleanup(self): + if self._resource_dir is not None: + self._resource_dir.cleanup() + self._resource_dir = None + + def _get_resource_dir(self) -> pathlib.Path: + if self._resource_dir is None: + # In actual Juju, the resource path for a charm's resource is + # $AGENT_DIR/resources/$RESOURCE_NAME/$RESOURCE_FILENAME + # However, charms shouldn't depend on this. + self._resource_dir = tempfile.TemporaryDirectory(prefix='tmp-ops-test-resource-') + return pathlib.Path(self._resource_dir.name) + + def relation_ids(self, relation_name): + try: + return self._relation_ids_map[relation_name] + except KeyError as e: + if relation_name not in self._meta.relations: + raise model.ModelError('{} is not a known relation'.format(relation_name)) from e + return [] + + def relation_list(self, relation_id): + try: + return self._relation_list_map[relation_id] + except KeyError as e: + raise model.RelationNotFoundError from e + + def relation_get(self, relation_id, member_name, is_app): + if is_app and '/' in member_name: + member_name = member_name.split('/')[0] + if relation_id not in self._relation_data: + raise model.RelationNotFoundError() + return self._relation_data[relation_id][member_name].copy() + + def relation_set(self, relation_id, key, value, is_app): + relation = self._relation_data[relation_id] + if is_app: + bucket_key = self.app_name + else: + bucket_key = self.unit_name + if bucket_key not in relation: + relation[bucket_key] = {} + bucket = relation[bucket_key] + if value == '': + bucket.pop(key, None) + else: + bucket[key] = value + + def config_get(self): + return self._config + + def is_leader(self): + return self._is_leader + + def application_version_set(self, version): + self._workload_version = version + + def resource_get(self, resource_name): + if resource_name not in self._resources_map: + raise model.ModelError( + "ERROR could not download resource: HTTP request failed: " + "Get https://.../units/unit-{}/resources/{}: resource#{}/{} not found".format( + self.unit_name.replace('/', '-'), resource_name, self.app_name, resource_name + )) + filename, contents = self._resources_map[resource_name] + resource_dir = self._get_resource_dir() + resource_filename = resource_dir / resource_name / filename + if not resource_filename.exists(): + if isinstance(contents, bytes): + mode = 'wb' + else: + mode = 'wt' + resource_filename.parent.mkdir(exist_ok=True) + with resource_filename.open(mode=mode) as resource_file: + resource_file.write(contents) + return resource_filename + + def pod_spec_set(self, spec, k8s_resources): + self._pod_spec = (spec, k8s_resources) + + def status_get(self, *, is_app=False): + if is_app: + return self._app_status + else: + return self._unit_status + + def status_set(self, status, message='', *, is_app=False): + if is_app: + self._app_status = {'status': status, 'message': message} + else: + self._unit_status = {'status': status, 'message': message} + + def storage_list(self, name): + raise NotImplementedError(self.storage_list) + + def storage_get(self, storage_name_id, attribute): + raise NotImplementedError(self.storage_get) + + def storage_add(self, name, count=1): + raise NotImplementedError(self.storage_add) + + def action_get(self): + raise NotImplementedError(self.action_get) + + def action_set(self, results): + raise NotImplementedError(self.action_set) + + def action_log(self, message): + raise NotImplementedError(self.action_log) + + def action_fail(self, message=''): + raise NotImplementedError(self.action_fail) + + def network_get(self, endpoint_name, relation_id=None): + raise NotImplementedError(self.network_get) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/version.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/version.py new file mode 100644 index 0000000000000000000000000000000000000000..db9e98175100d8045815d0ee215d2f5f76ad468f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/ops/version.py @@ -0,0 +1,3 @@ +# this is a generated file + +version = '1.1.0' diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..86d07b5525d10bf1d543be0e1f5d01af897a4b49 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/__init__.py @@ -0,0 +1,427 @@ + +from .error import * + +from .tokens import * +from .events import * +from .nodes import * + +from .loader import * +from .dumper import * + +__version__ = '5.4.1' +try: + from .cyaml import * + __with_libyaml__ = True +except ImportError: + __with_libyaml__ = False + +import io + +#------------------------------------------------------------------------------ +# Warnings control +#------------------------------------------------------------------------------ + +# 'Global' warnings state: +_warnings_enabled = { + 'YAMLLoadWarning': True, +} + +# Get or set global warnings' state +def warnings(settings=None): + if settings is None: + return _warnings_enabled + + if type(settings) is dict: + for key in settings: + if key in _warnings_enabled: + _warnings_enabled[key] = settings[key] + +# Warn when load() is called without Loader=... +class YAMLLoadWarning(RuntimeWarning): + pass + +def load_warning(method): + if _warnings_enabled['YAMLLoadWarning'] is False: + return + + import warnings + + message = ( + "calling yaml.%s() without Loader=... is deprecated, as the " + "default Loader is unsafe. Please read " + "https://msg.pyyaml.org/load for full details." + ) % method + + warnings.warn(message, YAMLLoadWarning, stacklevel=3) + +#------------------------------------------------------------------------------ +def scan(stream, Loader=Loader): + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.check_token(): + yield loader.get_token() + finally: + loader.dispose() + +def parse(stream, Loader=Loader): + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader.check_event(): + yield loader.get_event() + finally: + loader.dispose() + +def compose(stream, Loader=Loader): + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + +def compose_all(stream, Loader=Loader): + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader.get_node() + finally: + loader.dispose() + +def load(stream, Loader=None): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + if Loader is None: + load_warning('load') + Loader = FullLoader + + loader = Loader(stream) + try: + return loader.get_single_data() + finally: + loader.dispose() + +def load_all(stream, Loader=None): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + if Loader is None: + load_warning('load_all') + Loader = FullLoader + + loader = Loader(stream) + try: + while loader.check_data(): + yield loader.get_data() + finally: + loader.dispose() + +def full_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + + Resolve all tags except those known to be + unsafe on untrusted input. + """ + return load(stream, FullLoader) + +def full_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + + Resolve all tags except those known to be + unsafe on untrusted input. + """ + return load_all(stream, FullLoader) + +def safe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + + Resolve only basic YAML tags. This is known + to be safe for untrusted input. + """ + return load(stream, SafeLoader) + +def safe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + + Resolve only basic YAML tags. This is known + to be safe for untrusted input. + """ + return load_all(stream, SafeLoader) + +def unsafe_load(stream): + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + + Resolve all tags, even those known to be + unsafe on untrusted input. + """ + return load(stream, UnsafeLoader) + +def unsafe_load_all(stream): + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + + Resolve all tags, even those known to be + unsafe on untrusted input. + """ + return load_all(stream, UnsafeLoader) + +def emit(events, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + stream = io.StringIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + try: + for event in events: + dumper.emit(event) + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize_all(nodes, stream=None, Dumper=Dumper, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None): + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + stream = io.StringIO() + else: + stream = io.BytesIO() + getvalue = stream.getvalue + dumper = Dumper(stream, canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end) + try: + dumper.open() + for node in nodes: + dumper.serialize(node) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + +def dump_all(documents, stream=None, Dumper=Dumper, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + stream = io.StringIO() + else: + stream = io.BytesIO() + getvalue = stream.getvalue + dumper = Dumper(stream, default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break, + encoding=encoding, version=version, tags=tags, + explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys) + try: + dumper.open() + for data in documents: + dumper.represent(data) + dumper.close() + finally: + dumper.dispose() + if getvalue: + return getvalue() + +def dump(data, stream=None, Dumper=Dumper, **kwds): + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=Dumper, **kwds) + +def safe_dump_all(documents, stream=None, **kwds): + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + +def safe_dump(data, stream=None, **kwds): + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + +def add_implicit_resolver(tag, regexp, first=None, + Loader=None, Dumper=Dumper): + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + if Loader is None: + loader.Loader.add_implicit_resolver(tag, regexp, first) + loader.FullLoader.add_implicit_resolver(tag, regexp, first) + loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first) + else: + Loader.add_implicit_resolver(tag, regexp, first) + Dumper.add_implicit_resolver(tag, regexp, first) + +def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper): + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + if Loader is None: + loader.Loader.add_path_resolver(tag, path, kind) + loader.FullLoader.add_path_resolver(tag, path, kind) + loader.UnsafeLoader.add_path_resolver(tag, path, kind) + else: + Loader.add_path_resolver(tag, path, kind) + Dumper.add_path_resolver(tag, path, kind) + +def add_constructor(tag, constructor, Loader=None): + """ + Add a constructor for the given tag. + Constructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + if Loader is None: + loader.Loader.add_constructor(tag, constructor) + loader.FullLoader.add_constructor(tag, constructor) + loader.UnsafeLoader.add_constructor(tag, constructor) + else: + Loader.add_constructor(tag, constructor) + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=None): + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + if Loader is None: + loader.Loader.add_multi_constructor(tag_prefix, multi_constructor) + loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor) + loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor) + else: + Loader.add_multi_constructor(tag_prefix, multi_constructor) + +def add_representer(data_type, representer, Dumper=Dumper): + """ + Add a representer for the given type. + Representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + Dumper.add_representer(data_type, representer) + +def add_multi_representer(data_type, multi_representer, Dumper=Dumper): + """ + Add a representer for the given type. + Multi-representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + Dumper.add_multi_representer(data_type, multi_representer) + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + def __init__(cls, name, bases, kwds): + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + if isinstance(cls.yaml_loader, list): + for loader in cls.yaml_loader: + loader.add_constructor(cls.yaml_tag, cls.from_yaml) + else: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + + cls.yaml_dumper.add_representer(cls, cls.to_yaml) + +class YAMLObject(metaclass=YAMLObjectMetaclass): + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_loader = [Loader, FullLoader, UnsafeLoader] + yaml_dumper = Dumper + + yaml_tag = None + yaml_flow_style = None + + @classmethod + def from_yaml(cls, loader, node): + """ + Convert a representation node to a Python object. + """ + return loader.construct_yaml_object(node, cls) + + @classmethod + def to_yaml(cls, dumper, data): + """ + Convert a Python object to a representation node. + """ + return dumper.represent_yaml_object(cls.yaml_tag, data, cls, + flow_style=cls.yaml_flow_style) + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/_yaml.cpython-38-x86_64-linux-gnu.so b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/_yaml.cpython-38-x86_64-linux-gnu.so new file mode 100755 index 0000000000000000000000000000000000000000..801c0e2a72a862a33d640a576ca969b684dc8e86 Binary files /dev/null and b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/_yaml.cpython-38-x86_64-linux-gnu.so differ diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/composer.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/composer.py new file mode 100644 index 0000000000000000000000000000000000000000..6d15cb40e3b4198819c91c6f8d8b32807fcf53b2 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/composer.py @@ -0,0 +1,139 @@ + +__all__ = ['Composer', 'ComposerError'] + +from .error import MarkedYAMLError +from .events import * +from .nodes import * + +class ComposerError(MarkedYAMLError): + pass + +class Composer: + + def __init__(self): + self.anchors = {} + + def check_node(self): + # Drop the STREAM-START event. + if self.check_event(StreamStartEvent): + self.get_event() + + # If there are more documents available? + return not self.check_event(StreamEndEvent) + + def get_node(self): + # Get the root node of the next document. + if not self.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # Drop the STREAM-START event. + self.get_event() + + # Compose a document if the stream is not empty. + document = None + if not self.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.check_event(StreamEndEvent): + event = self.get_event() + raise ComposerError("expected a single document in the stream", + document.start_mark, "but found another document", + event.start_mark) + + # Drop the STREAM-END event. + self.get_event() + + return document + + def compose_document(self): + # Drop the DOCUMENT-START event. + self.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + if self.check_event(AliasEvent): + event = self.get_event() + anchor = event.anchor + if anchor not in self.anchors: + raise ComposerError(None, None, "found undefined alias %r" + % anchor, event.start_mark) + return self.anchors[anchor] + event = self.peek_event() + anchor = event.anchor + if anchor is not None: + if anchor in self.anchors: + raise ComposerError("found duplicate anchor %r; first occurrence" + % anchor, self.anchors[anchor].start_mark, + "second occurrence", event.start_mark) + self.descend_resolver(parent, index) + if self.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + event = self.get_event() + tag = event.tag + if tag is None or tag == '!': + tag = self.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode(tag, event.value, + event.start_mark, event.end_mark, style=event.style) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + + def compose_mapping_node(self, anchor): + start_event = self.get_event() + tag = start_event.tag + if tag is None or tag == '!': + tag = self.resolve(MappingNode, None, start_event.implicit) + node = MappingNode(tag, [], + start_event.start_mark, None, + flow_style=start_event.flow_style) + if anchor is not None: + self.anchors[anchor] = node + while not self.check_event(MappingEndEvent): + #key_event = self.peek_event() + item_key = self.compose_node(node, None) + #if item_key in node.value: + # raise ComposerError("while composing a mapping", start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + #node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.get_event() + node.end_mark = end_event.end_mark + return node + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/constructor.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..619acd3070a4845c653fcf22a626e05158035bc2 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/constructor.py @@ -0,0 +1,748 @@ + +__all__ = [ + 'BaseConstructor', + 'SafeConstructor', + 'FullConstructor', + 'UnsafeConstructor', + 'Constructor', + 'ConstructorError' +] + +from .error import * +from .nodes import * + +import collections.abc, datetime, base64, binascii, re, sys, types + +class ConstructorError(MarkedYAMLError): + pass + +class BaseConstructor: + + yaml_constructors = {} + yaml_multi_constructors = {} + + def __init__(self): + self.constructed_objects = {} + self.recursive_objects = {} + self.state_generators = [] + self.deep_construct = False + + def check_data(self): + # If there are more documents available? + return self.check_node() + + def check_state_key(self, key): + """Block special attributes/methods from being set in a newly created + object, to prevent user-controlled methods from being called during + deserialization""" + if self.get_state_keys_blacklist_regexp().match(key): + raise ConstructorError(None, None, + "blacklisted key '%s' in instance state found" % (key,), None) + + def get_data(self): + # Construct and return the next document. + if self.check_node(): + return self.construct_document(self.get_node()) + + def get_single_data(self): + # Ensure that the stream contains a single document and construct it. + node = self.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + data = self.construct_object(node) + while self.state_generators: + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + raise ConstructorError(None, None, + "found unconstructable recursive node", node.start_mark) + self.recursive_objects[node] = None + constructor = None + tag_suffix = None + if node.tag in self.yaml_constructors: + constructor = self.yaml_constructors[node.tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if tag_prefix is not None and node.tag.startswith(tag_prefix): + tag_suffix = node.tag[len(tag_prefix):] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = node.tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = next(generator) + if self.deep_construct: + for dummy in generator: + pass + else: + self.state_generators.append(generator) + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_scalar(self, node): + if not isinstance(node, ScalarNode): + raise ConstructorError(None, None, + "expected a scalar node, but found %s" % node.id, + node.start_mark) + return node.value + + def construct_sequence(self, node, deep=False): + if not isinstance(node, SequenceNode): + raise ConstructorError(None, None, + "expected a sequence node, but found %s" % node.id, + node.start_mark) + return [self.construct_object(child, deep=deep) + for child in node.value] + + def construct_mapping(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + mapping = {} + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + if not isinstance(key, collections.abc.Hashable): + raise ConstructorError("while constructing a mapping", node.start_mark, + "found unhashable key", key_node.start_mark) + value = self.construct_object(value_node, deep=deep) + mapping[key] = value + return mapping + + def construct_pairs(self, node, deep=False): + if not isinstance(node, MappingNode): + raise ConstructorError(None, None, + "expected a mapping node, but found %s" % node.id, + node.start_mark) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + @classmethod + def add_constructor(cls, tag, constructor): + if not 'yaml_constructors' in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + + @classmethod + def add_multi_constructor(cls, tag_prefix, multi_constructor): + if not 'yaml_multi_constructors' in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + +class SafeConstructor(BaseConstructor): + + def construct_scalar(self, node): + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == 'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return super().construct_scalar(node) + + def flatten_mapping(self, node): + merge = [] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == 'tag:yaml.org,2002:merge': + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing a mapping", + node.start_mark, + "expected a mapping for merging, but found %s" + % subnode.id, subnode.start_mark) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError("while constructing a mapping", node.start_mark, + "expected a mapping or list of mappings for merging, but found %s" + % value_node.id, value_node.start_mark) + elif key_node.tag == 'tag:yaml.org,2002:value': + key_node.tag = 'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if merge: + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return super().construct_mapping(node, deep=deep) + + def construct_yaml_null(self, node): + self.construct_scalar(node) + return None + + bool_values = { + 'yes': True, + 'no': False, + 'true': True, + 'false': False, + 'on': True, + 'off': False, + } + + def construct_yaml_bool(self, node): + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + value = self.construct_scalar(node) + value = value.replace('_', '') + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '0': + return 0 + elif value.startswith('0b'): + return sign*int(value[2:], 2) + elif value.startswith('0x'): + return sign*int(value[2:], 16) + elif value[0] == '0': + return sign*int(value, 8) + elif ':' in value: + digits = [int(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*int(value) + + inf_value = 1e300 + while inf_value != inf_value*inf_value: + inf_value *= inf_value + nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + value = self.construct_scalar(node) + value = value.replace('_', '').lower() + sign = +1 + if value[0] == '-': + sign = -1 + if value[0] in '+-': + value = value[1:] + if value == '.inf': + return sign*self.inf_value + elif value == '.nan': + return self.nan_value + elif ':' in value: + digits = [float(part) for part in value.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit*base + base *= 60 + return sign*value + else: + return sign*float(value) + + def construct_yaml_binary(self, node): + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError(None, None, + "failed to convert base64 data into ascii: %s" % exc, + node.start_mark) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + timestamp_regexp = re.compile( + r'''^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:(?:[Tt]|[ \t]+) + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\.(?P[0-9]*))? + (?:[ \t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$''', re.X) + + def construct_yaml_timestamp(self, node): + value = self.construct_scalar(node) + match = self.timestamp_regexp.match(node.value) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + tzinfo = None + if values['fraction']: + fraction = values['fraction'][:6] + while len(fraction) < 6: + fraction += '0' + fraction = int(fraction) + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + tz_minute = int(values['tz_minute'] or 0) + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + tzinfo = datetime.timezone(delta) + elif values['tz']: + tzinfo = datetime.timezone.utc + return datetime.datetime(year, month, day, hour, minute, second, fraction, + tzinfo=tzinfo) + + def construct_yaml_omap(self, node): + # Note: we do not check for duplicate keys, because it's too + # CPU-expensive. + omap = [] + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing an ordered map", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + omap.append((key, value)) + + def construct_yaml_pairs(self, node): + # Note: the same code as `construct_yaml_omap`. + pairs = [] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a sequence, but found %s" % node.id, node.start_mark) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a mapping of length 1, but found %s" % subnode.id, + subnode.start_mark) + if len(subnode.value) != 1: + raise ConstructorError("while constructing pairs", node.start_mark, + "expected a single mapping item, but found %d items" % len(subnode.value), + subnode.start_mark) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + data = set() + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + return self.construct_scalar(node) + + def construct_yaml_seq(self, node): + data = [] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + data = {} + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + raise ConstructorError(None, None, + "could not determine a constructor for the tag %r" % node.tag, + node.start_mark) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:null', + SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:bool', + SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:int', + SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:float', + SafeConstructor.construct_yaml_float) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:binary', + SafeConstructor.construct_yaml_binary) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:timestamp', + SafeConstructor.construct_yaml_timestamp) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:omap', + SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:pairs', + SafeConstructor.construct_yaml_pairs) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:set', + SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:str', + SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:seq', + SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor( + 'tag:yaml.org,2002:map', + SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, + SafeConstructor.construct_undefined) + +class FullConstructor(SafeConstructor): + # 'extend' is blacklisted because it is used by + # construct_python_object_apply to add `listitems` to a newly generate + # python instance + def get_state_keys_blacklist(self): + return ['^extend$', '^__.*__$'] + + def get_state_keys_blacklist_regexp(self): + if not hasattr(self, 'state_keys_blacklist_regexp'): + self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')') + return self.state_keys_blacklist_regexp + + def construct_python_str(self, node): + return self.construct_scalar(node) + + def construct_python_unicode(self, node): + return self.construct_scalar(node) + + def construct_python_bytes(self, node): + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError(None, None, + "failed to convert base64 data into ascii: %s" % exc, + node.start_mark) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError(None, None, + "failed to decode base64 data: %s" % exc, node.start_mark) + + def construct_python_long(self, node): + return self.construct_yaml_int(node) + + def construct_python_complex(self, node): + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark, unsafe=False): + if not name: + raise ConstructorError("while constructing a Python module", mark, + "expected non-empty name appended to the tag", mark) + if unsafe: + try: + __import__(name) + except ImportError as exc: + raise ConstructorError("while constructing a Python module", mark, + "cannot find module %r (%s)" % (name, exc), mark) + if name not in sys.modules: + raise ConstructorError("while constructing a Python module", mark, + "module %r is not imported" % name, mark) + return sys.modules[name] + + def find_python_name(self, name, mark, unsafe=False): + if not name: + raise ConstructorError("while constructing a Python object", mark, + "expected non-empty name appended to the tag", mark) + if '.' in name: + module_name, object_name = name.rsplit('.', 1) + else: + module_name = 'builtins' + object_name = name + if unsafe: + try: + __import__(module_name) + except ImportError as exc: + raise ConstructorError("while constructing a Python object", mark, + "cannot find module %r (%s)" % (module_name, exc), mark) + if module_name not in sys.modules: + raise ConstructorError("while constructing a Python object", mark, + "module %r is not imported" % module_name, mark) + module = sys.modules[module_name] + if not hasattr(module, object_name): + raise ConstructorError("while constructing a Python object", mark, + "cannot find %r in the module %r" + % (object_name, module.__name__), mark) + return getattr(module, object_name) + + def construct_python_name(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python name", node.start_mark, + "expected the empty value, but found %r" % value, node.start_mark) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + value = self.construct_scalar(node) + if value: + raise ConstructorError("while constructing a Python module", node.start_mark, + "expected the empty value, but found %r" % value, node.start_mark) + return self.find_python_module(suffix, node.start_mark) + + def make_python_instance(self, suffix, node, + args=None, kwds=None, newobj=False, unsafe=False): + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if not (unsafe or isinstance(cls, type)): + raise ConstructorError("while constructing a Python instance", node.start_mark, + "expected a class, but found %r" % type(cls), + node.start_mark) + if newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state, unsafe=False): + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + if not unsafe and state: + for key in state.keys(): + self.check_state_key(key) + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + if not unsafe: + self.check_state_key(key) + setattr(instance, key, value) + + def construct_python_object(self, suffix, node): + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} + state = {} + listitems = [] + dictitems = {} + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if state: + self.set_python_instance_state(instance, state) + if listitems: + instance.extend(listitems) + if dictitems: + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + return self.construct_python_object_apply(suffix, node, newobj=True) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/none', + FullConstructor.construct_yaml_null) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/bool', + FullConstructor.construct_yaml_bool) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/str', + FullConstructor.construct_python_str) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/unicode', + FullConstructor.construct_python_unicode) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/bytes', + FullConstructor.construct_python_bytes) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/int', + FullConstructor.construct_yaml_int) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/long', + FullConstructor.construct_python_long) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/float', + FullConstructor.construct_yaml_float) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/complex', + FullConstructor.construct_python_complex) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/list', + FullConstructor.construct_yaml_seq) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/tuple', + FullConstructor.construct_python_tuple) + +FullConstructor.add_constructor( + 'tag:yaml.org,2002:python/dict', + FullConstructor.construct_yaml_map) + +FullConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/name:', + FullConstructor.construct_python_name) + +class UnsafeConstructor(FullConstructor): + + def find_python_module(self, name, mark): + return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True) + + def find_python_name(self, name, mark): + return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True) + + def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): + return super(UnsafeConstructor, self).make_python_instance( + suffix, node, args, kwds, newobj, unsafe=True) + + def set_python_instance_state(self, instance, state): + return super(UnsafeConstructor, self).set_python_instance_state( + instance, state, unsafe=True) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/module:', + UnsafeConstructor.construct_python_module) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object:', + UnsafeConstructor.construct_python_object) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/new:', + UnsafeConstructor.construct_python_object_new) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/apply:', + UnsafeConstructor.construct_python_object_apply) + +# Constructor is same as UnsafeConstructor. Need to leave this in place in case +# people have extended it directly. +class Constructor(UnsafeConstructor): + pass diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/cyaml.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/cyaml.py new file mode 100644 index 0000000000000000000000000000000000000000..0c21345879b298bb8668201bebe7d289586b17f9 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/cyaml.py @@ -0,0 +1,101 @@ + +__all__ = [ + 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader', + 'CBaseDumper', 'CSafeDumper', 'CDumper' +] + +from yaml._yaml import CParser, CEmitter + +from .constructor import * + +from .serializer import * +from .representer import * + +from .resolver import * + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class CSafeLoader(CParser, SafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class CFullLoader(CParser, FullConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + FullConstructor.__init__(self) + Resolver.__init__(self) + +class CUnsafeLoader(CParser, UnsafeConstructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + UnsafeConstructor.__init__(self) + Resolver.__init__(self) + +class CLoader(CParser, Constructor, Resolver): + + def __init__(self, stream): + CParser.__init__(self, stream) + Constructor.__init__(self) + Resolver.__init__(self) + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class CDumper(CEmitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + CEmitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, encoding=encoding, + allow_unicode=allow_unicode, line_break=line_break, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/dumper.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/dumper.py new file mode 100644 index 0000000000000000000000000000000000000000..6aadba551f3836b02f4752277f4b3027073defad --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/dumper.py @@ -0,0 +1,62 @@ + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper'] + +from .emitter import * +from .serializer import * +from .representer import * +from .resolver import * + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + SafeRepresenter.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + +class Dumper(Emitter, Serializer, Representer, Resolver): + + def __init__(self, stream, + default_style=None, default_flow_style=False, + canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None, + encoding=None, explicit_start=None, explicit_end=None, + version=None, tags=None, sort_keys=True): + Emitter.__init__(self, stream, canonical=canonical, + indent=indent, width=width, + allow_unicode=allow_unicode, line_break=line_break) + Serializer.__init__(self, encoding=encoding, + explicit_start=explicit_start, explicit_end=explicit_end, + version=version, tags=tags) + Representer.__init__(self, default_style=default_style, + default_flow_style=default_flow_style, sort_keys=sort_keys) + Resolver.__init__(self) + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/emitter.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/emitter.py new file mode 100644 index 0000000000000000000000000000000000000000..a664d011162af69184df2f8e59ab7feec818f7c7 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/emitter.py @@ -0,0 +1,1137 @@ + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +__all__ = ['Emitter', 'EmitterError'] + +from .error import YAMLError +from .events import * + +class EmitterError(YAMLError): + pass + +class ScalarAnalysis: + def __init__(self, scalar, empty, multiline, + allow_flow_plain, allow_block_plain, + allow_single_quoted, allow_double_quoted, + allow_block): + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + +class Emitter: + + DEFAULT_TAG_PREFIXES = { + '!' : '!', + 'tag:yaml.org,2002:' : '!!', + } + + def __init__(self, stream, canonical=None, indent=None, width=None, + allow_unicode=None, line_break=None): + + # The stream should have the methods `write` and possibly `flush`. + self.stream = stream + + # Encoding can be overridden by STREAM-START. + self.encoding = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] + self.state = self.expect_stream_start + + # Current event and the event queue. + self.events = [] + self.event = None + + # The current indentation level and the stack of previous indents. + self.indents = [] + self.indent = None + + # Flow level. + self.flow_level = 0 + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + + # Whether the document requires an explicit document indicator + self.open_ended = False + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + self.best_indent = 2 + if indent and 1 < indent < 10: + self.best_indent = indent + self.best_width = 80 + if width and width > self.best_indent*2: + self.best_width = width + self.best_line_break = '\n' + if line_break in ['\r', '\n', '\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None + + # Prepared anchor and tag. + self.prepared_anchor = None + self.prepared_tag = None + + # Scalar analysis and style. + self.analysis = None + self.style = None + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return (len(self.events) < count+1) + + def increase_indent(self, flow=False, indentless=False): + self.indents.append(self.indent) + if self.indent is None: + if flow: + self.indent = self.best_indent + else: + self.indent = 0 + elif not indentless: + self.indent += self.best_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + if isinstance(self.event, StreamStartEvent): + if self.event.encoding and not hasattr(self.stream, 'encoding'): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError("expected StreamStartEvent, but got %s" + % self.event) + + def expect_nothing(self): + raise EmitterError("expected nothing, but got %s" % self.event) + + # Document handlers. + + def expect_first_document_start(self): + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator('...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = sorted(self.event.tags.keys()) + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = (first and not self.event.explicit and not self.canonical + and not self.event.version and not self.event.tags + and not self.check_empty_document()) + if not implicit: + self.write_indent() + self.write_indicator('---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator('...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError("expected DocumentStartEvent, but got %s" + % self.event) + + def expect_document_end(self): + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator('...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError("expected DocumentEndEvent, but got %s" + % self.event) + + def expect_document_root(self): + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, + simple_key=False): + self.root_context = root + self.sequence_context = sequence + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + self.process_anchor('&') + self.process_tag() + if isinstance(self.event, ScalarEvent): + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_sequence(): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.flow_level or self.canonical or self.event.flow_style \ + or self.check_empty_mapping(): + self.expect_flow_mapping() + else: + self.expect_block_mapping() + else: + raise EmitterError("expected NodeEvent, but got %s" % self.event) + + def expect_alias(self): + if self.event.anchor is None: + raise EmitterError("anchor is not specified for alias") + self.process_anchor('*') + self.state = self.states.pop() + + def expect_scalar(self): + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + self.write_indicator('[', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator(']', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + self.write_indicator(']', False) + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self): + self.write_indicator('{', True, whitespace=True) + self.flow_level += 1 + self.increase_indent(flow=True) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + self.write_indicator('}', False) + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.flow_level -= 1 + if self.canonical: + self.write_indicator(',', False) + self.write_indent() + self.write_indicator('}', False) + self.state = self.states.pop() + else: + self.write_indicator(',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + self.write_indicator(':', False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(':', True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + indentless = (self.mapping_context and not self.indention) + self.increase_indent(flow=False, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + if not first and isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + self.write_indicator('-', True, indention=True) + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + self.increase_indent(flow=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + if not first and isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + self.write_indent() + if self.check_simple_key(): + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator('?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + self.write_indicator(':', False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + self.write_indent() + self.write_indicator(':', True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + return (isinstance(self.event, SequenceStartEvent) and self.events + and isinstance(self.events[0], SequenceEndEvent)) + + def check_empty_mapping(self): + return (isinstance(self.event, MappingStartEvent) and self.events + and isinstance(self.events[0], MappingEndEvent)) + + def check_empty_document(self): + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return (isinstance(event, ScalarEvent) and event.anchor is None + and event.tag is None and event.implicit and event.value == '') + + def check_simple_key(self): + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \ + and self.event.tag is not None: + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return (length < 128 and (isinstance(self.event, AliasEvent) + or (isinstance(self.event, ScalarEvent) + and not self.analysis.empty and not self.analysis.multiline) + or self.check_empty_sequence() or self.check_empty_mapping())) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + if self.event.anchor is None: + self.prepared_anchor = None + return + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator+self.prepared_anchor, True) + self.prepared_anchor = None + + def process_tag(self): + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if ((not self.canonical or tag is None) and + ((self.style == '' and self.event.implicit[0]) + or (self.style != '' and self.event.implicit[1]))): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = '!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError("tag is not specified") + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + self.prepared_tag = None + + def choose_scalar_style(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if not self.event.style and self.event.implicit[0]: + if (not (self.simple_key_context and + (self.analysis.empty or self.analysis.multiline)) + and (self.flow_level and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain))): + return '' + if self.event.style and self.event.style in '|>': + if (not self.flow_level and not self.simple_key_context + and self.analysis.allow_block): + return self.event.style + if not self.event.style or self.event.style == '\'': + if (self.analysis.allow_single_quoted and + not (self.simple_key_context and self.analysis.multiline)): + return '\'' + return '"' + + def process_scalar(self): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = (not self.simple_key_context) + #if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == '\'': + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + + # Analyzers. + + def prepare_version(self, version): + major, minor = version + if major != 1: + raise EmitterError("unsupported YAML version: %d.%d" % (major, minor)) + return '%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + if not handle: + raise EmitterError("tag handle must not be empty") + if handle[0] != '!' or handle[-1] != '!': + raise EmitterError("tag handle must start and end with '!': %r" % handle) + for ch in handle[1:-1]: + if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_'): + raise EmitterError("invalid character %r in the tag handle: %r" + % (ch, handle)) + return handle + + def prepare_tag_prefix(self, prefix): + if not prefix: + raise EmitterError("tag prefix must not be empty") + chunks = [] + start = end = 0 + if prefix[0] == '!': + end = 1 + while end < len(prefix): + ch = prefix[end] + if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-;/?!:@&=+$,_.~*\'()[]': + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append('%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return ''.join(chunks) + + def prepare_tag(self, tag): + if not tag: + raise EmitterError("tag must not be empty") + if tag == '!': + return tag + handle = None + suffix = tag + prefixes = sorted(self.tag_prefixes.keys()) + for prefix in prefixes: + if tag.startswith(prefix) \ + and (prefix == '!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix):] + chunks = [] + start = end = 0 + while end < len(suffix): + ch = suffix[end] + if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-;/?:@&=+$,_.~*\'()[]' \ + or (ch == '!' and handle != '!'): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end+1 + data = ch.encode('utf-8') + for ch in data: + chunks.append('%%%02X' % ch) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = ''.join(chunks) + if handle: + return '%s%s' % (handle, suffix_text) + else: + return '!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + if not anchor: + raise EmitterError("anchor must not be empty") + for ch in anchor: + if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_'): + raise EmitterError("invalid character %r in the anchor: %r" + % (ch, anchor)) + return anchor + + def analyze_scalar(self, scalar): + + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis(scalar=scalar, empty=True, multiline=False, + allow_flow_plain=False, allow_block_plain=True, + allow_single_quoted=True, allow_double_quoted=True, + allow_block=False) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith('---') or scalar.startswith('...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = (len(scalar) == 1 or + scalar[1] in '\0 \t\r\n\x85\u2028\u2029') + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in '#,[]{}&*!|>\'\"%@`': + flow_indicators = True + block_indicators = True + if ch in '?:': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == '-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in ',?[]{}': + flow_indicators = True + if ch == ':': + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == '#' and preceded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in '\n\x85\u2028\u2029': + line_breaks = True + if not (ch == '\n' or '\x20' <= ch <= '\x7E'): + if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF' + or '\uE000' <= ch <= '\uFFFD' + or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF': + unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == ' ': + if index == 0: + leading_space = True + if index == len(scalar)-1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in '\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar)-1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029') + followed_by_whitespace = (index+1 >= len(scalar) or + scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029') + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if (leading_space or leading_break + or trailing_space or trailing_break): + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if space_break or special_characters: + allow_flow_plain = allow_block_plain = \ + allow_single_quoted = allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis(scalar=scalar, + empty=False, multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block) + + # Writers. + + def flush_stream(self): + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write('\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, + whitespace=False, indention=False): + if self.whitespace or not need_whitespace: + data = indicator + else: + data = ' '+indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + indent = self.indent or 0 + if not self.indention or self.column > indent \ + or (self.column == indent and not self.whitespace): + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = ' '*(indent-self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + data = '%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + data = '%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + self.write_indicator('\'', True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != ' ': + if start+1 == end and self.column > self.best_width and split \ + and start != 0 and end != len(text): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'': + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == '\'': + data = '\'\'' + self.column += 2 + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = (ch == ' ') + breaks = (ch in '\n\x85\u2028\u2029') + end += 1 + self.write_indicator('\'', False) + + ESCAPE_REPLACEMENTS = { + '\0': '0', + '\x07': 'a', + '\x08': 'b', + '\x09': 't', + '\x0A': 'n', + '\x0B': 'v', + '\x0C': 'f', + '\x0D': 'r', + '\x1B': 'e', + '\"': '\"', + '\\': '\\', + '\x85': 'N', + '\xA0': '_', + '\u2028': 'L', + '\u2029': 'P', + } + + def write_double_quoted(self, text, split=True): + self.write_indicator('"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \ + or not ('\x20' <= ch <= '\x7E' + or (self.allow_unicode + and ('\xA0' <= ch <= '\uD7FF' + or '\uE000' <= ch <= '\uFFFD'))): + if start < end: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = '\\'+self.ESCAPE_REPLACEMENTS[ch] + elif ch <= '\xFF': + data = '\\x%02X' % ord(ch) + elif ch <= '\uFFFF': + data = '\\u%04X' % ord(ch) + else: + data = '\\U%08X' % ord(ch) + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end+1 + if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \ + and self.column+(end-start) > self.best_width and split: + data = text[start:end]+'\\' + if start < end: + start = end + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == ' ': + data = '\\' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator('"', False) + + def determine_block_hints(self, text): + hints = '' + if text: + if text[0] in ' \n\x85\u2028\u2029': + hints += str(self.best_indent) + if text[-1] not in '\n\x85\u2028\u2029': + hints += '-' + elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029': + hints += '+' + return hints + + def write_folded(self, text): + hints = self.determine_block_hints(text) + self.write_indicator('>'+hints, True) + if hints[-1:] == '+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + if not leading_space and ch is not None and ch != ' ' \ + and text[start] == '\n': + self.write_line_break() + leading_space = (ch == ' ') + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != ' ': + if start+1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in '\n\x85\u2028\u2029') + spaces = (ch == ' ') + end += 1 + + def write_literal(self, text): + hints = self.determine_block_hints(text) + self.write_indicator('|'+hints, True) + if hints[-1:] == '+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in '\n\x85\u2028\u2029': + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + else: + if ch is None or ch in '\n\x85\u2028\u2029': + data = text[start:end] + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = (ch in '\n\x85\u2028\u2029') + end += 1 + + def write_plain(self, text, split=True): + if self.root_context: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = ' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != ' ': + if start+1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in '\n\x85\u2028\u2029': + if text[start] == '\n': + self.write_line_break() + for br in text[start:end]: + if br == '\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in ' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + spaces = (ch == ' ') + breaks = (ch in '\n\x85\u2028\u2029') + end += 1 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/error.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/error.py new file mode 100644 index 0000000000000000000000000000000000000000..b796b4dc519512c4825ff539a2e6aa20f4d370d0 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/error.py @@ -0,0 +1,75 @@ + +__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError'] + +class Mark: + + def __init__(self, name, index, line, column, buffer, pointer): + self.name = name + self.index = index + self.line = line + self.column = column + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + if self.buffer is None: + return None + head = '' + start = self.pointer + while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer-start > max_length/2-1: + head = ' ... ' + start += 5 + break + tail = '' + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029': + end += 1 + if end-self.pointer > max_length/2-1: + tail = ' ... ' + end -= 5 + break + snippet = self.buffer[start:end] + return ' '*indent + head + snippet + tail + '\n' \ + + ' '*(indent+self.pointer-start+len(head)) + '^' + + def __str__(self): + snippet = self.get_snippet() + where = " in \"%s\", line %d, column %d" \ + % (self.name, self.line+1, self.column+1) + if snippet is not None: + where += ":\n"+snippet + return where + +class YAMLError(Exception): + pass + +class MarkedYAMLError(YAMLError): + + def __init__(self, context=None, context_mark=None, + problem=None, problem_mark=None, note=None): + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + + def __str__(self): + lines = [] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None \ + and (self.problem is None or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None: + lines.append(self.note) + return '\n'.join(lines) + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/events.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/events.py new file mode 100644 index 0000000000000000000000000000000000000000..f79ad389cb6c9517e391dcd25534866bc9ccd36a --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/events.py @@ -0,0 +1,86 @@ + +# Abstract classes. + +class Event(object): + def __init__(self, start_mark=None, end_mark=None): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in ['anchor', 'tag', 'implicit', 'value'] + if hasattr(self, key)] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +class NodeEvent(Event): + def __init__(self, anchor, start_mark=None, end_mark=None): + self.anchor = anchor + self.start_mark = start_mark + self.end_mark = end_mark + +class CollectionStartEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None, + flow_style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class CollectionEndEvent(Event): + pass + +# Implementations. + +class StreamStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndEvent(Event): + pass + +class DocumentStartEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None, version=None, tags=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + self.version = version + self.tags = tags + +class DocumentEndEvent(Event): + def __init__(self, start_mark=None, end_mark=None, + explicit=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.explicit = explicit + +class AliasEvent(NodeEvent): + pass + +class ScalarEvent(NodeEvent): + def __init__(self, anchor, tag, implicit, value, + start_mark=None, end_mark=None, style=None): + self.anchor = anchor + self.tag = tag + self.implicit = implicit + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class SequenceStartEvent(CollectionStartEvent): + pass + +class SequenceEndEvent(CollectionEndEvent): + pass + +class MappingStartEvent(CollectionStartEvent): + pass + +class MappingEndEvent(CollectionEndEvent): + pass + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/loader.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..e90c11224c38e559cdf0cb205f0692ebd4fb8681 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/loader.py @@ -0,0 +1,63 @@ + +__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader'] + +from .reader import * +from .scanner import * +from .parser import * +from .composer import * +from .constructor import * +from .resolver import * + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + BaseConstructor.__init__(self) + BaseResolver.__init__(self) + +class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + FullConstructor.__init__(self) + Resolver.__init__(self) + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + SafeConstructor.__init__(self) + Resolver.__init__(self) + +class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) + +# UnsafeLoader is the same as Loader (which is and was always unsafe on +# untrusted input). Use of either Loader or UnsafeLoader should be rare, since +# FullLoad should be able to load almost all YAML safely. Loader is left intact +# to ensure backwards compatibility. +class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver): + + def __init__(self, stream): + Reader.__init__(self, stream) + Scanner.__init__(self) + Parser.__init__(self) + Composer.__init__(self) + Constructor.__init__(self) + Resolver.__init__(self) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/nodes.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/nodes.py new file mode 100644 index 0000000000000000000000000000000000000000..c4f070c41e1fb1bc01af27d69329e92dded38908 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/nodes.py @@ -0,0 +1,49 @@ + +class Node(object): + def __init__(self, tag, value, start_mark, end_mark): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + value = self.value + #if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + #else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + +class ScalarNode(Node): + id = 'scalar' + def __init__(self, tag, value, + start_mark=None, end_mark=None, style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + +class CollectionNode(Node): + def __init__(self, tag, value, + start_mark=None, end_mark=None, flow_style=None): + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.flow_style = flow_style + +class SequenceNode(CollectionNode): + id = 'sequence' + +class MappingNode(CollectionNode): + id = 'mapping' + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/parser.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..13a5995d292045d0f865a99abf692bd35dc87814 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/parser.py @@ -0,0 +1,589 @@ + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY } + +__all__ = ['Parser', 'ParserError'] + +from .error import MarkedYAMLError +from .tokens import * +from .events import * +from .scanner import * + +class ParserError(MarkedYAMLError): + pass + +class Parser: + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = { + '!': '!', + '!!': 'tag:yaml.org,2002:', + } + + def __init__(self): + self.current_event = None + self.yaml_version = None + self.tag_handles = {} + self.states = [] + self.marks = [] + self.state = self.parse_stream_start + + def dispose(self): + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def check_event(self, *choices): + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + + # Parse the stream start. + token = self.get_token() + event = StreamStartEvent(token.start_mark, token.end_mark, + encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + + # Parse an implicit document. + if not self.check_token(DirectiveToken, DocumentStartToken, + StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + + # Parse any extra document end indicators. + while self.check_token(DocumentEndToken): + self.get_token() + + # Parse an explicit document. + if not self.check_token(StreamEndToken): + token = self.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.check_token(DocumentStartToken): + raise ParserError(None, None, + "expected '', but found %r" + % self.peek_token().id, + self.peek_token().start_mark) + token = self.get_token() + end_mark = token.end_mark + event = DocumentStartEvent(start_mark, end_mark, + explicit=True, version=version, tags=tags) + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + + # Parse the document end. + token = self.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.check_token(DocumentEndToken): + token = self.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, + explicit=explicit) + + # Prepare the next state. + self.state = self.parse_document_start + + return event + + def parse_document_content(self): + if self.check_token(DirectiveToken, + DocumentStartToken, DocumentEndToken, StreamEndToken): + event = self.process_empty_scalar(self.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + self.yaml_version = None + self.tag_handles = {} + while self.check_token(DirectiveToken): + token = self.get_token() + if token.name == 'YAML': + if self.yaml_version is not None: + raise ParserError(None, None, + "found duplicate YAML directive", token.start_mark) + major, minor = token.value + if major != 1: + raise ParserError(None, None, + "found incompatible YAML document (version 1.* is required)", + token.start_mark) + self.yaml_version = token.value + elif token.name == 'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError(None, None, + "duplicate tag handle %r" % handle, + token.start_mark) + self.tag_handles[handle] = prefix + if self.tag_handles: + value = self.yaml_version, self.tag_handles.copy() + else: + value = self.yaml_version, None + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + return self.parse_node(block=True) + + def parse_flow_node(self): + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + return self.parse_node(block=True, indentless_sequence=True) + + def parse_node(self, block=False, indentless_sequence=False): + if self.check_token(AliasToken): + token = self.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) + self.state = self.states.pop() + else: + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.check_token(AnchorToken): + token = self.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.check_token(TagToken): + token = self.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.check_token(TagToken): + token = self.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.check_token(AnchorToken): + token = self.get_token() + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError("while parsing a node", start_mark, + "found undefined tag handle %r" % handle, + tag_mark) + tag = self.tag_handles[handle]+suffix + else: + tag = suffix + #if tag == '!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.peek_token().start_mark + event = None + implicit = (tag is None or tag == '!') + if indentless_sequence and self.check_token(BlockEntryToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark) + self.state = self.parse_indentless_sequence_entry + else: + if self.check_token(ScalarToken): + token = self.get_token() + end_mark = token.end_mark + if (token.plain and tag is None) or tag == '!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + event = ScalarEvent(anchor, tag, implicit, token.value, + start_mark, end_mark, style=token.style) + self.state = self.states.pop() + elif self.check_token(FlowSequenceStartToken): + end_mark = self.peek_token().end_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_sequence_first_entry + elif self.check_token(FlowMappingStartToken): + end_mark = self.peek_token().end_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=True) + self.state = self.parse_flow_mapping_first_key + elif block and self.check_token(BlockSequenceStartToken): + end_mark = self.peek_token().start_mark + event = SequenceStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_sequence_first_entry + elif block and self.check_token(BlockMappingStartToken): + end_mark = self.peek_token().start_mark + event = MappingStartEvent(anchor, tag, implicit, + start_mark, end_mark, flow_style=False) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), '', + start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.peek_token() + raise ParserError("while parsing a %s node" % node, start_mark, + "expected the node content, but found %r" % token.id, + token.start_mark) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END + + def parse_block_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block collection", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + def parse_indentless_sequence_entry(self): + if self.check_token(BlockEntryToken): + token = self.get_token() + if not self.check_token(BlockEntryToken, + KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if not self.check_token(BlockEndToken): + token = self.peek_token() + raise ParserError("while parsing a block mapping", self.marks[-1], + "expected , but found %r" % token.id, token.start_mark) + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_block_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + if not self.check_token(FlowSequenceEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow sequence", self.marks[-1], + "expected ',' or ']', but got %r" % token.id, token.start_mark) + + if self.check_token(KeyToken): + token = self.peek_token() + event = MappingStartEvent(None, None, True, + token.start_mark, token.end_mark, + flow_style=True) + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + self.state = self.parse_flow_sequence_entry + token = self.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + token = self.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + if not self.check_token(FlowMappingEndToken): + if not first: + if self.check_token(FlowEntryToken): + self.get_token() + else: + token = self.peek_token() + raise ParserError("while parsing a flow mapping", self.marks[-1], + "expected ',' or '}', but got %r" % token.id, token.start_mark) + if self.check_token(KeyToken): + token = self.get_token() + if not self.check_token(ValueToken, + FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif not self.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + if self.check_token(ValueToken): + token = self.get_token() + if not self.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.peek_token().start_mark) + + def process_empty_scalar(self, mark): + return ScalarEvent(None, None, (True, False), '', mark, mark) + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/reader.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/reader.py new file mode 100644 index 0000000000000000000000000000000000000000..774b0219b5932a0ee1c27e637371de5ba8d9cb16 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/reader.py @@ -0,0 +1,185 @@ +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current character. + +__all__ = ['Reader', 'ReaderError'] + +from .error import YAMLError, Mark + +import codecs, re + +class ReaderError(YAMLError): + + def __init__(self, name, position, character, encoding, reason): + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + if isinstance(self.character, bytes): + return "'%s' codec can't decode byte #x%02x: %s\n" \ + " in \"%s\", position %d" \ + % (self.encoding, ord(self.character), self.reason, + self.name, self.position) + else: + return "unacceptable character #x%04x: %s\n" \ + " in \"%s\", position %d" \ + % (self.character, self.reason, + self.name, self.position) + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to a unicode string, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `bytes` object, + # - a `str` object, + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream): + self.name = None + self.stream = None + self.stream_pointer = 0 + self.eof = True + self.buffer = '' + self.pointer = 0 + self.raw_buffer = None + self.raw_decode = None + self.encoding = None + self.index = 0 + self.line = 0 + self.column = 0 + if isinstance(stream, str): + self.name = "" + self.check_printable(stream) + self.buffer = stream+'\0' + elif isinstance(stream, bytes): + self.name = "" + self.raw_buffer = stream + self.determine_encoding() + else: + self.stream = stream + self.name = getattr(stream, 'name', "") + self.eof = False + self.raw_buffer = None + self.determine_encoding() + + def peek(self, index=0): + try: + return self.buffer[self.pointer+index] + except IndexError: + self.update(index+1) + return self.buffer[self.pointer+index] + + def prefix(self, length=1): + if self.pointer+length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer:self.pointer+length] + + def forward(self, length=1): + if self.pointer+length+1 >= len(self.buffer): + self.update(length+1) + while length: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in '\n\x85\u2028\u2029' \ + or (ch == '\r' and self.buffer[self.pointer] != '\n'): + self.line += 1 + self.column = 0 + elif ch != '\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + if self.stream is None: + return Mark(self.name, self.index, self.line, self.column, + self.buffer, self.pointer) + else: + return Mark(self.name, self.index, self.line, self.column, + None, None) + + def determine_encoding(self): + while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): + self.update_raw() + if isinstance(self.raw_buffer, bytes): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode + self.encoding = 'utf-8' + self.update(1) + + NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]') + def check_printable(self, data): + match = self.NON_PRINTABLE.search(data) + if match: + character = match.group() + position = self.index+(len(self.buffer)-self.pointer)+match.start() + raise ReaderError(self.name, position, ord(character), + 'unicode', "special characters are not allowed") + + def update(self, length): + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer:] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, + 'strict', self.eof) + except UnicodeDecodeError as exc: + character = self.raw_buffer[exc.start] + if self.stream is not None: + position = self.stream_pointer-len(self.raw_buffer)+exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, + exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += '\0' + self.raw_buffer = None + break + + def update_raw(self, size=4096): + data = self.stream.read(size) + if self.raw_buffer is None: + self.raw_buffer = data + else: + self.raw_buffer += data + self.stream_pointer += len(data) + if not data: + self.eof = True diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/representer.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/representer.py new file mode 100644 index 0000000000000000000000000000000000000000..3b0b192ef32ed7f5b7015456fe883c3327bb841e --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/representer.py @@ -0,0 +1,389 @@ + +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError'] + +from .error import * +from .nodes import * + +import datetime, copyreg, types, base64, collections + +class RepresenterError(YAMLError): + pass + +class BaseRepresenter: + + yaml_representers = {} + yaml_multi_representers = {} + + def __init__(self, default_style=None, default_flow_style=False, sort_keys=True): + self.default_style = default_style + self.sort_keys = sort_keys + self.default_flow_style = default_flow_style + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent(self, data): + node = self.represent_data(data) + self.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent_data(self, data): + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + #if node is None: + # raise RepresenterError("recursive objects are not allowed: %r" % data) + return node + #self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, str(data)) + #if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + @classmethod + def add_representer(cls, data_type, representer): + if not 'yaml_representers' in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + + @classmethod + def add_multi_representer(cls, data_type, representer): + if not 'yaml_multi_representers' in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + + def represent_scalar(self, tag, value, style=None): + if style is None: + style = self.default_style + node = ScalarNode(tag, value, style=style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + value = [] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + value = [] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = list(mapping.items()) + if self.sort_keys: + try: + mapping = sorted(mapping) + except TypeError: + pass + for item_key, item_value in mapping: + node_key = self.represent_data(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + return False + +class SafeRepresenter(BaseRepresenter): + + def ignore_aliases(self, data): + if data is None: + return True + if isinstance(data, tuple) and data == (): + return True + if isinstance(data, (str, bytes, bool, int, float)): + return True + + def represent_none(self, data): + return self.represent_scalar('tag:yaml.org,2002:null', 'null') + + def represent_str(self, data): + return self.represent_scalar('tag:yaml.org,2002:str', data) + + def represent_binary(self, data): + if hasattr(base64, 'encodebytes'): + data = base64.encodebytes(data).decode('ascii') + else: + data = base64.encodestring(data).decode('ascii') + return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|') + + def represent_bool(self, data): + if data: + value = 'true' + else: + value = 'false' + return self.represent_scalar('tag:yaml.org,2002:bool', value) + + def represent_int(self, data): + return self.represent_scalar('tag:yaml.org,2002:int', str(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value*inf_value): + inf_value *= inf_value + + def represent_float(self, data): + if data != data or (data == 0.0 and data == 1.0): + value = '.nan' + elif data == self.inf_value: + value = '.inf' + elif data == -self.inf_value: + value = '-.inf' + else: + value = repr(data).lower() + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag. We fix this by adding + # '.0' before the 'e' symbol. + if '.' not in value and 'e' in value: + value = value.replace('e', '.0e', 1) + return self.represent_scalar('tag:yaml.org,2002:float', value) + + def represent_list(self, data): + #pairs = (len(data) > 0 and isinstance(data, list)) + #if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + #if not pairs: + return self.represent_sequence('tag:yaml.org,2002:seq', data) + #value = [] + #for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + #return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + return self.represent_mapping('tag:yaml.org,2002:map', data) + + def represent_set(self, data): + value = {} + for key in data: + value[key] = None + return self.represent_mapping('tag:yaml.org,2002:set', value) + + def represent_date(self, data): + value = data.isoformat() + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + value = data.isoformat(' ') + return self.represent_scalar('tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + raise RepresenterError("cannot represent an object", data) + +SafeRepresenter.add_representer(type(None), + SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, + SafeRepresenter.represent_str) + +SafeRepresenter.add_representer(bytes, + SafeRepresenter.represent_binary) + +SafeRepresenter.add_representer(bool, + SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, + SafeRepresenter.represent_int) + +SafeRepresenter.add_representer(float, + SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, + SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, + SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, + SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(datetime.date, + SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, + SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, + SafeRepresenter.represent_undefined) + +class Representer(SafeRepresenter): + + def represent_complex(self, data): + if data.imag == 0.0: + data = '%r' % data.real + elif data.real == 0.0: + data = '%rj' % data.imag + elif data.imag > 0: + data = '%r+%rj' % (data.real, data.imag) + else: + data = '%r%rj' % (data.real, data.imag) + return self.represent_scalar('tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + return self.represent_sequence('tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + name = '%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '') + + def represent_module(self, data): + return self.represent_scalar( + 'tag:yaml.org,2002:python/module:'+data.__name__, '') + + def represent_object(self, data): + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copyreg.dispatch_table: + reduce = copyreg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError("cannot represent an object", data) + reduce = (list(reduce)+[None]*5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = 'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = 'tag:yaml.org,2002:python/object/apply:' + newobj = False + function_name = '%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems \ + and isinstance(state, dict) and newobj: + return self.represent_mapping( + 'tag:yaml.org,2002:python/object:'+function_name, state) + if not listitems and not dictitems \ + and isinstance(state, dict) and not state: + return self.represent_sequence(tag+function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag+function_name, value) + + def represent_ordered_dict(self, data): + # Provide uniform representation across different Python versions. + data_type = type(data) + tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \ + % (data_type.__module__, data_type.__name__) + items = [[key, value] for key, value in data.items()] + return self.represent_sequence(tag, [items]) + +Representer.add_representer(complex, + Representer.represent_complex) + +Representer.add_representer(tuple, + Representer.represent_tuple) + +Representer.add_representer(type, + Representer.represent_name) + +Representer.add_representer(collections.OrderedDict, + Representer.represent_ordered_dict) + +Representer.add_representer(types.FunctionType, + Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, + Representer.represent_name) + +Representer.add_representer(types.ModuleType, + Representer.represent_module) + +Representer.add_multi_representer(object, + Representer.represent_object) + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/resolver.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/resolver.py new file mode 100644 index 0000000000000000000000000000000000000000..013896d2f10619e0e75d2579cd63220338a7fef1 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/resolver.py @@ -0,0 +1,227 @@ + +__all__ = ['BaseResolver', 'Resolver'] + +from .error import * +from .nodes import * + +import re + +class ResolverError(YAMLError): + pass + +class BaseResolver: + + DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} + yaml_path_resolvers = {} + + def __init__(self): + self.resolver_exact_paths = [] + self.resolver_prefix_paths = [] + + @classmethod + def add_implicit_resolver(cls, tag, regexp, first): + if not 'yaml_implicit_resolvers' in cls.__dict__: + implicit_resolvers = {} + for key in cls.yaml_implicit_resolvers: + implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:] + cls.yaml_implicit_resolvers = implicit_resolvers + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + + @classmethod + def add_path_resolver(cls, tag, path, kind=None): + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if not 'yaml_path_resolvers' in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError("Invalid path element: %s" % element) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif node_check not in [ScalarNode, SequenceNode, MappingNode] \ + and not isinstance(node_check, str) \ + and node_check is not None: + raise ResolverError("Invalid node checker: %s" % node_check) + if not isinstance(index_check, (str, int)) \ + and index_check is not None: + raise ResolverError("Invalid index checker: %s" % index_check) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] \ + and kind is not None: + raise ResolverError("Invalid node kind: %s" % kind) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + + def descend_resolver(self, current_node, current_index): + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, + current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, + current_node, current_index): + node_check, index_check = path[depth-1] + if isinstance(node_check, str): + if current_node.tag != node_check: + return + elif node_check is not None: + if not isinstance(current_node, node_check): + return + if index_check is True and current_index is not None: + return + if (index_check is False or index_check is None) \ + and current_index is None: + return + if isinstance(index_check, str): + if not (isinstance(current_index, ScalarNode) + and index_check == current_index.value): + return + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return + return True + + def resolve(self, kind, value, implicit): + if kind is ScalarNode and implicit[0]: + if value == '': + resolvers = self.yaml_implicit_resolvers.get('', []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + wildcard_resolvers = self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers + wildcard_resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if self.yaml_path_resolvers: + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + +class Resolver(BaseResolver): + pass + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:bool', + re.compile(r'''^(?:yes|Yes|YES|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list('yYnNtTfFoO')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:float', + re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? + |\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* + |[-+]?\.(?:inf|Inf|INF) + |\.(?:nan|NaN|NAN))$''', re.X), + list('-+0123456789.')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:int', + re.compile(r'''^(?:[-+]?0b[0-1_]+ + |[-+]?0[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), + list('-+0123456789')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:merge', + re.compile(r'^(?:<<)$'), + ['<']) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:null', + re.compile(r'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + ['~', 'n', 'N', '']) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:timestamp', + re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)? + (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list('0123456789')) + +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:value', + re.compile(r'^(?:=)$'), + ['=']) + +# The following resolver is only for documentation purposes. It cannot work +# because plain scalars cannot start with '!', '&', or '*'. +Resolver.add_implicit_resolver( + 'tag:yaml.org,2002:yaml', + re.compile(r'^(?:!|&|\*)$'), + list('!&*')) + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/scanner.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/scanner.py new file mode 100644 index 0000000000000000000000000000000000000000..7437ede1c608266aaca481955f438844479cab4f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/scanner.py @@ -0,0 +1,1435 @@ + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# Read comments in the Scanner code for more details. +# + +__all__ = ['Scanner', 'ScannerError'] + +from .error import MarkedYAMLError +from .tokens import * + +class ScannerError(MarkedYAMLError): + pass + +class SimpleKey: + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + +class Scanner: + + def __init__(self): + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer. + + # Had we reached the end of the stream? + self.done = False + + # The number of unclosed '{' and '['. `flow_level == 0` means block + # context. + self.flow_level = 0 + + # List of processed tokens that are not yet emitted. + self.tokens = [] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} + + # Public methods. + + def check_token(self, *choices): + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # Return the next token, but do not delete if from the queue. + # Return None if no more tokens. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + return self.tokens[0] + else: + return None + + def get_token(self): + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if self.tokens: + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + + def fetch_more_tokens(self): + + # Eat whitespaces and comments until we reach the next token. + self.scan_to_next_token() + + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.column) + + # Peek the next character. + ch = self.peek() + + # Is it the end of stream? + if ch == '\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == '%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == '-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == '.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + #if ch == '\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == '[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == '{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == ']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == '}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == ',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == '-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == '?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == ':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == '*': + return self.fetch_alias() + + # Is it an anchor? + if ch == '&': + return self.fetch_anchor() + + # Is it a tag? + if ch == '!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == '|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == '>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == '\'': + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == '\"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError("while scanning for the next token", None, + "found character %r that cannot start any token" % ch, + self.get_mark()) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in list(self.possible_simple_keys): + key = self.possible_simple_keys[level] + if key.line != self.line \ + or self.index-key.index > 1024: + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken+len(self.tokens) + key = SimpleKey(token_number, required, + self.index, self.line, self.column, self.get_mark()) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError("while scanning a simple key", key.mark, + "could not find expected ':'", self.get_mark()) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + + ## In flow context, tokens should respect indentation. + ## Actually the condition should be `self.indent >= column` according to + ## the spec. But this condition will prohibit intuitively correct + ## constructions such as + ## key : { + ## } + #if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid indentation or unclosed '[' or '{'", + # self.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if self.flow_level: + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + + # Read the token. + mark = self.get_mark() + + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, + encoding=self.encoding)) + + + def fetch_stream_end(self): + + # Set the current indentation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + + # Read the token. + mark = self.get_mark() + + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + + # The steam is finished. + self.done = True + + def fetch_directive(self): + + # Set the current indentation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + + # Set the current indentation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.get_mark() + self.forward(3) + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + self.fetch_flow_collection_start(FlowSequenceStartToken) + + def fetch_flow_mapping_start(self): + self.fetch_flow_collection_start(FlowMappingStartToken) + + def fetch_flow_collection_start(self, TokenClass): + + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + + # Increase the flow level. + self.flow_level += 1 + + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Decrease the flow level. + self.flow_level -= 1 + + # No simple keys after ']' or '}'. + self.allow_simple_key = False + + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + + # Simple keys are allowed after ','. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add FLOW-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError(None, None, + "sequence entries are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + + # Simple keys are allowed after '-'. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not necessary a simple)? + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping keys are not allowed here", + self.get_mark()) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert(key.token_number-self.tokens_taken, + KeyToken(key.mark, key.mark)) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert(key.token_number-self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark)) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be caught by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError(None, None, + "mapping values are not allowed here", + self.get_mark()) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.column): + mark = self.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.get_mark() + self.forward() + end_mark = self.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + + # ALIAS could be a simple key. + self.save_possible_simple_key() + + # No simple keys after ALIAS. + self.allow_simple_key = False + + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + + # ANCHOR could start a simple key. + self.save_possible_simple_key() + + # No simple keys after ANCHOR. + self.allow_simple_key = False + + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + + # TAG could start a simple key. + self.save_possible_simple_key() + + # No simple keys after TAG. + self.allow_simple_key = False + + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + + # A simple key may follow a block scalar. + self.allow_simple_key = True + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + self.fetch_flow_scalar(style='\'') + + def fetch_double(self): + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + + # A flow scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after flow scalars. + self.allow_simple_key = False + + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + + # A plain scalar could be a simple key. + self.save_possible_simple_key() + + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.column == 0: + return True + + def check_document_start(self): + + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.column == 0: + if self.prefix(3) == '---' \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return True + + def check_document_end(self): + + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.column == 0: + if self.prefix(3) == '...' \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return True + + def check_block_entry(self): + + # BLOCK-ENTRY: '-' (' '|'\n') + return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' + + def check_key(self): + + # KEY(flow context): '?' + if self.flow_level: + return True + + # KEY(block context): '?' (' '|'\n') + else: + return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' + + def check_value(self): + + # VALUE(flow context): ':' + if self.flow_level: + return True + + # VALUE(block context): ':' (' '|'\n') + else: + return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029' + + def check_plain(self): + + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + ch = self.peek() + return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \ + or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029' + and (ch == '-' or (not self.flow_level and ch in '?:'))) + + # Scanners. + + def scan_to_next_token(self): + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + if self.index == 0 and self.peek() == '\uFEFF': + self.forward() + found = False + while not found: + while self.peek() == ' ': + self.forward() + if self.peek() == '#': + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + + def scan_directive(self): + # See the specification for details. + start_mark = self.get_mark() + self.forward() + name = self.scan_directive_name(start_mark) + value = None + if name == 'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.get_mark() + elif name == 'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.get_mark() + else: + end_mark = self.get_mark() + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # See the specification for details. + length = 0 + ch = self.peek(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + return value + + def scan_yaml_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + major = self.scan_yaml_directive_number(start_mark) + if self.peek() != '.': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or '.', but found %r" % self.peek(), + self.get_mark()) + self.forward() + minor = self.scan_yaml_directive_number(start_mark) + if self.peek() not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a digit or ' ', but found %r" % self.peek(), + self.get_mark()) + return (major, minor) + + def scan_yaml_directive_number(self, start_mark): + # See the specification for details. + ch = self.peek() + if not ('0' <= ch <= '9'): + raise ScannerError("while scanning a directive", start_mark, + "expected a digit, but found %r" % ch, self.get_mark()) + length = 0 + while '0' <= self.peek(length) <= '9': + length += 1 + value = int(self.prefix(length)) + self.forward(length) + return value + + def scan_tag_directive_value(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + handle = self.scan_tag_directive_handle(start_mark) + while self.peek() == ' ': + self.forward() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.peek() + if ch != ' ': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch, self.get_mark()) + return value + + def scan_tag_directive_prefix(self, start_mark): + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected ' ', but found %r" % ch, self.get_mark()) + return value + + def scan_directive_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + if self.peek() == '#': + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in '\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a directive", start_mark, + "expected a comment or a line break, but found %r" + % ch, self.get_mark()) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpreted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + start_mark = self.get_mark() + indicator = self.peek() + if indicator == '*': + name = 'alias' + else: + name = 'anchor' + self.forward() + length = 0 + ch = self.peek(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_': + length += 1 + ch = self.peek(length) + if not length: + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + value = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`': + raise ScannerError("while scanning an %s" % name, start_mark, + "expected alphabetic or numeric character, but found %r" + % ch, self.get_mark()) + end_mark = self.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # See the specification for details. + start_mark = self.get_mark() + ch = self.peek(1) + if ch == '<': + handle = None + self.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if self.peek() != '>': + raise ScannerError("while parsing a tag", start_mark, + "expected '>', but found %r" % self.peek(), + self.get_mark()) + self.forward() + elif ch in '\0 \t\r\n\x85\u2028\u2029': + handle = None + suffix = '!' + self.forward() + else: + length = 1 + use_handle = False + while ch not in '\0 \r\n\x85\u2028\u2029': + if ch == '!': + use_handle = True + break + length += 1 + ch = self.peek(length) + handle = '!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = '!' + self.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a tag", start_mark, + "expected ' ', but found %r" % ch, self.get_mark()) + value = (handle, suffix) + end_mark = self.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style): + # See the specification for details. + + if style == '>': + folded = True + else: + folded = False + + chunks = [] + start_mark = self.get_mark() + + # Scan the header. + self.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent+1 + if min_indent < 1: + min_indent = 1 + if increment is None: + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + indent = min_indent+increment-1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = '' + + # Scan the inner part of the block scalar. + while self.column == indent and self.peek() != '\0': + chunks.extend(breaks) + leading_non_space = self.peek() not in ' \t' + length = 0 + while self.peek(length) not in '\0\r\n\x85\u2028\u2029': + length += 1 + chunks.append(self.prefix(length)) + self.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if self.column == indent and self.peek() != '\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if folded and line_break == '\n' \ + and leading_non_space and self.peek() not in ' \t': + if not breaks: + chunks.append(' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + #if folded and line_break == '\n': + # if not breaks: + # if self.peek() not in ' \t': + # chunks.append(' ') + # else: + # chunks.append(line_break) + #else: + # chunks.append(line_break) + else: + break + + # Chomp the tail. + if chomping is not False: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + + # We are done. + return ScalarToken(''.join(chunks), False, start_mark, end_mark, + style) + + def scan_block_scalar_indicators(self, start_mark): + # See the specification for details. + chomping = None + increment = None + ch = self.peek() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + elif ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError("while scanning a block scalar", start_mark, + "expected indentation indicator in the range 1-9, but found 0", + self.get_mark()) + self.forward() + ch = self.peek() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.forward() + ch = self.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected chomping or indentation indicators, but found %r" + % ch, self.get_mark()) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # See the specification for details. + while self.peek() == ' ': + self.forward() + if self.peek() == '#': + while self.peek() not in '\0\r\n\x85\u2028\u2029': + self.forward() + ch = self.peek() + if ch not in '\0\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a block scalar", start_mark, + "expected a comment or a line break, but found %r" % ch, + self.get_mark()) + self.scan_line_break() + + def scan_block_scalar_indentation(self): + # See the specification for details. + chunks = [] + max_indent = 0 + end_mark = self.get_mark() + while self.peek() in ' \r\n\x85\u2028\u2029': + if self.peek() != ' ': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + else: + self.forward() + if self.column > max_indent: + max_indent = self.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # See the specification for details. + chunks = [] + end_mark = self.get_mark() + while self.column < indent and self.peek() == ' ': + self.forward() + while self.peek() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.get_mark() + while self.column < indent and self.peek() == ' ': + self.forward() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + chunks = [] + start_mark = self.get_mark() + quote = self.peek() + self.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while self.peek() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.forward() + end_mark = self.get_mark() + return ScalarToken(''.join(chunks), False, start_mark, end_mark, + style) + + ESCAPE_REPLACEMENTS = { + '0': '\0', + 'a': '\x07', + 'b': '\x08', + 't': '\x09', + '\t': '\x09', + 'n': '\x0A', + 'v': '\x0B', + 'f': '\x0C', + 'r': '\x0D', + 'e': '\x1B', + ' ': '\x20', + '\"': '\"', + '\\': '\\', + '/': '/', + 'N': '\x85', + '_': '\xA0', + 'L': '\u2028', + 'P': '\u2029', + } + + ESCAPE_CODES = { + 'x': 2, + 'u': 4, + 'U': 8, + } + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + length = 0 + while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029': + length += 1 + if length: + chunks.append(self.prefix(length)) + self.forward(length) + ch = self.peek() + if not double and ch == '\'' and self.peek(1) == '\'': + chunks.append('\'') + self.forward(2) + elif (double and ch == '\'') or (not double and ch in '\"\\'): + chunks.append(ch) + self.forward() + elif double and ch == '\\': + self.forward() + ch = self.peek() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + self.forward() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + self.forward() + for k in range(length): + if self.peek(k) not in '0123456789ABCDEFabcdef': + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "expected escape sequence of %d hexdecimal numbers, but found %r" % + (length, self.peek(k)), self.get_mark()) + code = int(self.prefix(length), 16) + chunks.append(chr(code)) + self.forward(length) + elif ch in '\r\n\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError("while scanning a double-quoted scalar", start_mark, + "found unknown escape character %r" % ch, self.get_mark()) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # See the specification for details. + chunks = [] + length = 0 + while self.peek(length) in ' \t': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch == '\0': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", self.get_mark()) + elif ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # See the specification for details. + chunks = [] + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.prefix(3) + if (prefix == '---' or prefix == '...') \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + raise ScannerError("while scanning a quoted scalar", start_mark, + "found unexpected document separator", self.get_mark()) + while self.peek() in ' \t': + self.forward() + if self.peek() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',' or '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + chunks = [] + start_mark = self.get_mark() + end_mark = start_mark + indent = self.indent+1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + #if indent == 0: + # indent = 1 + spaces = [] + while True: + length = 0 + if self.peek() == '#': + break + while True: + ch = self.peek(length) + if ch in '\0 \t\r\n\x85\u2028\u2029' \ + or (ch == ':' and + self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029' + + (u',[]{}' if self.flow_level else u''))\ + or (self.flow_level and ch in ',?[]{}'): + break + length += 1 + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.prefix(length)) + self.forward(length) + end_mark = self.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if not spaces or self.peek() == '#' \ + or (not self.flow_level and self.column < indent): + break + return ScalarToken(''.join(chunks), True, start_mark, end_mark) + + def scan_plain_spaces(self, indent, start_mark): + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + chunks = [] + length = 0 + while self.peek(length) in ' ': + length += 1 + whitespaces = self.prefix(length) + self.forward(length) + ch = self.peek() + if ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.prefix(3) + if (prefix == '---' or prefix == '...') \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return + breaks = [] + while self.peek() in ' \r\n\x85\u2028\u2029': + if self.peek() == ' ': + self.forward() + else: + breaks.append(self.scan_line_break()) + prefix = self.prefix(3) + if (prefix == '---' or prefix == '...') \ + and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029': + return + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + ch = self.peek() + if ch != '!': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch, self.get_mark()) + length = 1 + ch = self.peek(length) + if ch != ' ': + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-_': + length += 1 + ch = self.peek(length) + if ch != '!': + self.forward(length) + raise ScannerError("while scanning a %s" % name, start_mark, + "expected '!', but found %r" % ch, self.get_mark()) + length += 1 + value = self.prefix(length) + self.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # See the specification for details. + # Note: we do not check if URI is well-formed. + chunks = [] + length = 0 + ch = self.peek(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \ + or ch in '-;/?:@&=+$,_.!~*\'()[]%': + if ch == '%': + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = self.peek(length) + if length: + chunks.append(self.prefix(length)) + self.forward(length) + length = 0 + if not chunks: + raise ScannerError("while parsing a %s" % name, start_mark, + "expected URI, but found %r" % ch, self.get_mark()) + return ''.join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # See the specification for details. + codes = [] + mark = self.get_mark() + while self.peek() == '%': + self.forward() + for k in range(2): + if self.peek(k) not in '0123456789ABCDEFabcdef': + raise ScannerError("while scanning a %s" % name, start_mark, + "expected URI escape sequence of 2 hexdecimal numbers, but found %r" + % self.peek(k), self.get_mark()) + codes.append(int(self.prefix(2), 16)) + self.forward(2) + try: + value = bytes(codes).decode('utf-8') + except UnicodeDecodeError as exc: + raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.peek() + if ch in '\r\n\x85': + if self.prefix(2) == '\r\n': + self.forward(2) + else: + self.forward() + return '\n' + elif ch in '\u2028\u2029': + self.forward() + return ch + return '' diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/serializer.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/serializer.py new file mode 100644 index 0000000000000000000000000000000000000000..fe911e67ae7a739abb491fbbc6834b9c37bbda4b --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/serializer.py @@ -0,0 +1,111 @@ + +__all__ = ['Serializer', 'SerializerError'] + +from .error import YAMLError +from .events import * +from .nodes import * + +class SerializerError(YAMLError): + pass + +class Serializer: + + ANCHOR_TEMPLATE = 'id%03d' + + def __init__(self, encoding=None, + explicit_start=None, explicit_end=None, version=None, tags=None): + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + self.use_version = version + self.use_tags = tags + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + self.closed = None + + def open(self): + if self.closed is None: + self.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError("serializer is closed") + else: + raise SerializerError("serializer is already opened") + + def close(self): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif not self.closed: + self.emit(StreamEndEvent()) + self.closed = True + + #def __del__(self): + # self.close() + + def serialize(self, node): + if self.closed is None: + raise SerializerError("serializer is not opened") + elif self.closed: + raise SerializerError("serializer is closed") + self.emit(DocumentStartEvent(explicit=self.use_explicit_start, + version=self.use_version, tags=self.use_tags)) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + self.anchors[node] = None + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + + def serialize_node(self, node, parent, index): + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + detected_tag = self.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolve(ScalarNode, node.value, (False, True)) + implicit = (node.tag == detected_tag), (node.tag == default_tag) + self.emit(ScalarEvent(alias, node.tag, implicit, node.value, + style=node.style)) + elif isinstance(node, SequenceNode): + implicit = (node.tag + == self.resolve(SequenceNode, node.value, True)) + self.emit(SequenceStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emit(SequenceEndEvent()) + elif isinstance(node, MappingNode): + implicit = (node.tag + == self.resolve(MappingNode, node.value, True)) + self.emit(MappingStartEvent(alias, node.tag, implicit, + flow_style=node.flow_style)) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emit(MappingEndEvent()) + self.ascend_resolver() + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/tokens.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/tokens.py new file mode 100644 index 0000000000000000000000000000000000000000..4d0b48a394ac8c019b401516a12f688df361cf90 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/grafana-operator/venv/yaml/tokens.py @@ -0,0 +1,104 @@ + +class Token(object): + def __init__(self, start_mark, end_mark): + self.start_mark = start_mark + self.end_mark = end_mark + def __repr__(self): + attributes = [key for key in self.__dict__ + if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) + for key in attributes]) + return '%s(%s)' % (self.__class__.__name__, arguments) + +#class BOMToken(Token): +# id = '' + +class DirectiveToken(Token): + id = '' + def __init__(self, name, value, start_mark, end_mark): + self.name = name + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class DocumentStartToken(Token): + id = '' + +class DocumentEndToken(Token): + id = '' + +class StreamStartToken(Token): + id = '' + def __init__(self, start_mark=None, end_mark=None, + encoding=None): + self.start_mark = start_mark + self.end_mark = end_mark + self.encoding = encoding + +class StreamEndToken(Token): + id = '' + +class BlockSequenceStartToken(Token): + id = '' + +class BlockMappingStartToken(Token): + id = '' + +class BlockEndToken(Token): + id = '' + +class FlowSequenceStartToken(Token): + id = '[' + +class FlowMappingStartToken(Token): + id = '{' + +class FlowSequenceEndToken(Token): + id = ']' + +class FlowMappingEndToken(Token): + id = '}' + +class KeyToken(Token): + id = '?' + +class ValueToken(Token): + id = ':' + +class BlockEntryToken(Token): + id = '-' + +class FlowEntryToken(Token): + id = ',' + +class AliasToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class AnchorToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class TagToken(Token): + id = '' + def __init__(self, value, start_mark, end_mark): + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + +class ScalarToken(Token): + id = '' + def __init__(self, value, plain, start_mark, end_mark, style=None): + self.value = value + self.plain = plain + self.start_mark = start_mark + self.end_mark = end_mark + self.style = style + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/.flake8 b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..8ef84fcd43f3b7a46768c31b20f36cab48ffdfe0 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/.gitignore b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..7d315ecbda5024f3f81756c91caa6d7256970db0 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/.gitignore @@ -0,0 +1,4 @@ +build +*.charm +.idea +__pycache__ diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/.jujuignore b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/.jujuignore new file mode 100644 index 0000000000000000000000000000000000000000..6ccd559eabeae93e4d23215fa450130fa9b37ace --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/LICENSE b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/README.md b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6f3abb7fe9ce429ce54cc9009e93e1efede56fec --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/README.md @@ -0,0 +1,64 @@ +# Grafana Charm + +## Description + +This is the Grafana charm for Kubernetes using the Operator Framework. + +## Usage + +Initial setup (ensure microk8s is a clean slate with `microk8s.reset` or a fresh install with `snap install microk8s --classic`: +```bash +microk8s.enable dns storage registry dashboard +juju bootstrap microk8s mk8s +juju add-model lma +juju create-storage-pool operator-storage kubernetes storage-class=microk8s-hostpath +``` + +Deploy Grafana on its own: +```bash +git clone git@github.com:canonical/grafana-operator.git +cd grafana-operator +charmcraft build +juju deploy ./grafana.charm --resource grafana-image=grafana/grafana:7.2.1 +``` + +View the dashboard in a browser: +1. `juju status` to check the IP of the of the running Grafana application +2. Navigate to `http://IP_ADDRESS:3000` +3. Log in with the default credentials username=admin, password=admin. + +Add Prometheus as a datasource: +```bash +git clone git@github.com:canonical/prometheus-operator.git +cd prometheus-operator +charmcraft build +juju deploy ./prometheus.charm +juju add-relation grafana prometheus +watch -c juju status --color # wait for things to settle down +``` +> Once the deployed charm and relation settles, you should be able to see Prometheus data propagating to the Grafana dashboard. + +### High Availability Grafana + +This charm is written to support a high-availability Grafana cluster, but a database relation is required (MySQL or Postgresql). + +If HA is not required, there is no need to add a database relation. + +> NOTE: HA should not be considered for production use. + +... + +## Developing + +Create and activate a virtualenv, +and install the development requirements, + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Testing + +Just run `run_tests`: + + ./run_tests diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/config.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c91c65a3567d2fba3572c126f52f9f626c2ef05f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/config.yaml @@ -0,0 +1,11 @@ +options: + port: + description: The port grafana will be listening on + type: int + default: 3000 + grafana_log_level: + type: string + description: | + Logging level for Grafana. Options are “debug”, “info”, + “warn”, “error”, and “critical”. + default: info \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/icon.svg b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/icon.svg new file mode 100644 index 0000000000000000000000000000000000000000..2ad84eebbd3188fa28bb7f2379b78ce1a0a1933f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/icon.svg @@ -0,0 +1,12 @@ + + + + + + + + + + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/metadata.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1979c9470f38862d1253b9a6ba62a169cfc48022 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/metadata.yaml @@ -0,0 +1,34 @@ +name: grafana +summary: Data visualization and observability with Grafana +maintainers: + - Justin Clark +description: | + Grafana provides dashboards for monitoring data and this + charm is written to allow for HA on Kubernetes and can take + multiple data sources (for example, Prometheus). +tags: + - lma + - grafana + - prometheus + - monitoring + - observability +series: + - kubernetes +provides: + grafana-source: + interface: grafana-datasource + grafana-dashboard: + interface: grafana-dash +requires: + database: + interface: db + limit: 1 +peers: + grafana: + interface: grafana-peers +storage: + sqlitedb: + type: filesystem + location: /var/lib/grafana +deployment: + service: loadbalancer diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/requirements-dev.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/requirements-dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..eded44146a5877d5d81b343988b516c4acaa4573 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/requirements-dev.txt @@ -0,0 +1,2 @@ +-r requirements.txt +flake8 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/requirements.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca625b4c913fa655ee7beb6ab2769131f7b5a21c --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/requirements.txt @@ -0,0 +1,2 @@ +ops +git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/run_tests b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/run_tests new file mode 100755 index 0000000000000000000000000000000000000000..14bb4f4e1b3a9a8ffef0da6da128bbddb8861ce5 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/run_tests @@ -0,0 +1,16 @@ +#!/bin/sh -e +# Copyright 2020 Justin +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH=src +else + export PYTHONPATH="src:$PYTHONPATH" +fi + +flake8 +python3 -m unittest -v "$@" diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/src/charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..1053f8f871535a9eaec0f1f0712ebddd2218f16d --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/src/charm.py @@ -0,0 +1,494 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +import logging +import hashlib +import textwrap + +from oci_image import OCIImageResource, OCIImageResourceError +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.main import main +from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus + +log = logging.getLogger() + + +# These are the required and optional relation data fields +# In other words, when relating to this charm, these are the fields +# that will be processed by this charm. +REQUIRED_DATASOURCE_FIELDS = { + 'private-address', # the hostname/IP of the data source server + 'port', # the port of the data source server + 'source-type', # the data source type (e.g. prometheus) +} + +OPTIONAL_DATASOURCE_FIELDS = { + 'source-name', # a human-readable name of the source +} + +# https://grafana.com/docs/grafana/latest/administration/configuration/#database +REQUIRED_DATABASE_FIELDS = { + 'type', # mysql, postgres or sqlite3 (sqlite3 doesn't work for HA) + 'host', # in the form ':', e.g. 127.0.0.1:3306 + 'name', + 'user', + 'password', +} + +# verify with Grafana documentation to ensure fields have valid values +# as this charm will not directly handle these cases +# TODO: fill with optional fields +OPTIONAL_DATABASE_FIELDS = set() + +VALID_DATABASE_TYPES = {'mysql', 'postgres', 'sqlite3'} + + +def get_container(pod_spec, container_name): + """Find and return the first container in pod_spec whose name is + container_name, otherwise return None.""" + for container in pod_spec['containers']: + if container['name'] == container_name: + return container + raise ValueError("Unable to find container named '{}' in pod spec".format( + container_name)) + + +class GrafanaK8s(CharmBase): + """Charm to run Grafana on Kubernetes. + + This charm allows for high-availability + (as long as a non-sqlite database relation is present). + + Developers of this charm should be aware of the Grafana provisioning docs: + https://grafana.com/docs/grafana/latest/administration/provisioning/ + """ + + datastore = StoredState() + + def __init__(self, *args): + log.debug('Initializing charm.') + super().__init__(*args) + + # -- get image information + self.image = OCIImageResource(self, 'grafana-image') + + # -- standard hooks + self.framework.observe(self.on.config_changed, self.on_config_changed) + self.framework.observe(self.on.update_status, self.on_update_status) + self.framework.observe(self.on.stop, self._on_stop) + + # -- grafana-source relation observations + self.framework.observe(self.on['grafana-source'].relation_changed, + self.on_grafana_source_changed) + self.framework.observe(self.on['grafana-source'].relation_broken, + self.on_grafana_source_broken) + + # -- grafana (peer) relation observations + self.framework.observe(self.on['grafana'].relation_changed, + self.on_peer_changed) + # self.framework.observe(self.on['grafana'].relation_departed, + # self.on_peer_departed) + + # -- database relation observations + self.framework.observe(self.on['database'].relation_changed, + self.on_database_changed) + self.framework.observe(self.on['database'].relation_broken, + self.on_database_broken) + + # -- initialize states -- + self.datastore.set_default(sources=dict()) # available data sources + self.datastore.set_default(source_names=set()) # unique source names + self.datastore.set_default(sources_to_delete=set()) + self.datastore.set_default(database=dict()) # db configuration + + @property + def has_peer(self) -> bool: + rel = self.model.get_relation('grafana') + return len(rel.units) > 0 if rel is not None else False + + @property + def has_db(self) -> bool: + """Only consider a DB connection if we have config info.""" + return len(self.datastore.database) > 0 + + def _on_stop(self, _): + """Go into maintenance state if the unit is stopped.""" + self.unit.status = MaintenanceStatus('Pod is terminating.') + + def on_config_changed(self, _): + self.configure_pod() + + def on_update_status(self, _): + """Various health checks of the charm.""" + self._check_high_availability() + + def on_grafana_source_changed(self, event): + """ Get relation data for Grafana source and set k8s pod spec. + + This event handler (if the unit is the leader) will get data for + an incoming grafana-source relation and make the relation data + is available in the app's datastore object (StoredState). + """ + + # if this unit is the leader, set the required data + # of the grafana-source in this charm's datastore + if not self.unit.is_leader(): + return + + # if there is no available unit, remove data-source info if it exists + if event.unit is None: + log.warning("event unit can't be None when setting data sources.") + return + + # dictionary of all the required/optional datasource field values + # using this as a more generic way of getting data source fields + datasource_fields = \ + {field: event.relation.data[event.unit].get(field) for field in + REQUIRED_DATASOURCE_FIELDS | OPTIONAL_DATASOURCE_FIELDS} + + missing_fields = [field for field + in REQUIRED_DATASOURCE_FIELDS + if datasource_fields.get(field) is None] + # check the relation data for missing required fields + if len(missing_fields) > 0: + log.error("Missing required data fields for grafana-source " + "relation: {}".format(missing_fields)) + self._remove_source_from_datastore(event.relation.id) + return + + # specifically handle optional fields if necessary + # check if source-name was not passed or if we have already saved the provided name + if datasource_fields['source-name'] is None\ + or datasource_fields['source-name'] in self.datastore.source_names: + default_source_name = '{}_{}'.format( + event.app.name, + event.relation.id + ) + log.warning("No name 'grafana-source' or provided name is already in use. " + "Using safe default: {}.".format(default_source_name)) + datasource_fields['source-name'] = default_source_name + + self.datastore.source_names.add(datasource_fields['source-name']) + + # set the first grafana-source as the default (needed for pod config) + # if `self.datastore.sources` is currently empty, this is the first + datasource_fields['isDefault'] = 'false' + if not dict(self.datastore.sources): + datasource_fields['isDefault'] = 'true' + + # add unit name so the source can be removed might be a + # duplicate of 'source-name', but this will guarantee lookup + datasource_fields['unit_name'] = event.unit.name + + # add the new datasource relation data to the current state + new_source_data = { + field: value for field, value in datasource_fields.items() + if value is not None + } + self.datastore.sources.update({event.relation.id: new_source_data}) + self.configure_pod() + + def on_grafana_source_broken(self, event): + """When a grafana-source is removed, delete from the datastore.""" + if self.unit.is_leader(): + self._remove_source_from_datastore(event.relation.id) + self.configure_pod() + + def on_peer_changed(self, _): + # TODO: https://grafana.com/docs/grafana/latest/tutorials/ha_setup/ + # According to these docs ^, as long as we have a DB, HA should + # work out of the box if we are OK with "Sticky Sessions" + # but having "Stateless Sessions" could require more config + + # if the config changed, set a new pod spec + self.configure_pod() + + def on_peer_departed(self, _): + """Sets pod spec with new info.""" + # TODO: setting pod spec shouldn't do anything now, + # but if we ever need to change config based peer units, + # we will want to make sure configure_pod() is called + self.configure_pod() + + def on_database_changed(self, event): + """Sets configuration information for database connection.""" + if not self.unit.is_leader(): + return + + if event.unit is None: + log.warning("event unit can't be None when setting db config.") + return + + # save the necessary configuration of this database connection + database_fields = \ + {field: event.relation.data[event.unit].get(field) for field in + REQUIRED_DATABASE_FIELDS | OPTIONAL_DATABASE_FIELDS} + + # if any required fields are missing, warn the user and return + missing_fields = [field for field + in REQUIRED_DATABASE_FIELDS + if database_fields.get(field) is None] + if len(missing_fields) > 0: + log.error("Missing required data fields for related database " + "relation: {}".format(missing_fields)) + return + + # check if the passed database type is not in VALID_DATABASE_TYPES + if database_fields['type'] not in VALID_DATABASE_TYPES: + log.error('Grafana can only accept databases of the following ' + 'types: {}'.format(VALID_DATABASE_TYPES)) + return + + # add the new database relation data to the datastore + self.datastore.database.update({ + field: value for field, value in database_fields.items() + if value is not None + }) + self.configure_pod() + + def on_database_broken(self, _): + """Removes database connection info from datastore. + + We are guaranteed to only have one DB connection, so clearing + datastore.database is all we need for the change to be propagated + to the pod spec.""" + if not self.unit.is_leader(): + return + + # remove the existing database info from datastore + self.datastore.database = dict() + + # set pod spec because datastore config has changed + self.configure_pod() + + def _remove_source_from_datastore(self, rel_id): + """Remove the grafana-source from the datastore. + + Once removed from the datastore, this datasource will not + part of the next pod spec.""" + log.info('Removing all data for relation: {}'.format(rel_id)) + removed_source = self.datastore.sources.pop(rel_id, None) + if removed_source is None: + log.warning('Could not remove source for relation: {}'.format( + rel_id)) + else: + # free name from charm's set of source names + # and save to set which will be used in set_pod_spec + self.datastore.source_names.remove(removed_source['source-name']) + self.datastore.sources_to_delete.add(removed_source['source-name']) + + def _check_high_availability(self): + """Checks whether the configuration allows for HA.""" + if self.has_peer: + if self.has_db: + log.info('high availability possible.') + status = MaintenanceStatus('Grafana ready for HA.') + else: + log.warning('high availability not possible ' + 'with current configuration.') + status = BlockedStatus('Need database relation for HA.') + else: + log.info('running Grafana on single node.') + status = MaintenanceStatus('Grafana ready on single node.') + + # make sure we don't have a maintenance status overwrite + # a currently active status + if isinstance(status, MaintenanceStatus) \ + and isinstance(self.unit.status, ActiveStatus): + return status + + self.unit.status = status + return status + + def _make_delete_datasources_config_text(self) -> str: + """Generate text of data sources to delete.""" + if not self.datastore.sources_to_delete: + return "\n" + + delete_datasources_text = textwrap.dedent(""" + deleteDatasources:""") + for name in self.datastore.sources_to_delete: + delete_datasources_text += textwrap.dedent(""" + - name: {} + orgId: 1""".format(name)) + + # clear datastore.sources_to_delete and return text result + self.datastore.sources_to_delete.clear() + return delete_datasources_text + '\n\n' + + def _make_data_source_config_text(self) -> str: + """Build config based on Data Sources section of provisioning docs.""" + # get starting text for the config file and sources to delete + delete_text = self._make_delete_datasources_config_text() + config_text = textwrap.dedent(""" + apiVersion: 1 + """) + config_text += delete_text + if self.datastore.sources: + config_text += "datasources:" + for rel_id, source_info in self.datastore.sources.items(): + # TODO: handle more optional fields and verify that current + # defaults are what we want (e.g. "access") + config_text += textwrap.dedent(""" + - name: {0} + type: {1} + access: proxy + url: http://{2}:{3} + isDefault: {4} + editable: true + orgId: 1""").format( + source_info['source-name'], + source_info['source-type'], + source_info['private-address'], + source_info['port'], + source_info['isDefault'], + ) + + # check if there these are empty + return config_text + '\n' + + def _update_pod_data_source_config_file(self, pod_spec): + """Adds datasources to pod configuration.""" + file_text = self._make_data_source_config_text() + data_source_file_meta = { + 'name': 'grafana-datasources', + 'mountPath': '/etc/grafana/provisioning/datasources', + 'files': [{ + 'path': 'datasources.yaml', + 'content': file_text, + }] + } + container = get_container(pod_spec, self.app.name) + container['volumeConfig'].append(data_source_file_meta) + + # get hash string of the new file text and put into container config + # if this changes, it will trigger a pod restart + file_text_hash = hashlib.md5(file_text.encode()).hexdigest() + if 'DATASOURCES_YAML' in container['envConfig'] \ + and container['envConfig']['DATASOURCES_YAML'] != file_text_hash: + log.info('datasources.yaml hash has changed. ' + 'Triggering pod restart.') + container['envConfig']['DATASOURCES_YAML'] = file_text_hash + + def _make_config_ini_text(self): + """Create the text of the config.ini file. + + More information about this can be found in the Grafana docs: + https://grafana.com/docs/grafana/latest/administration/configuration/ + """ + + config_text = textwrap.dedent(""" + [paths] + provisioning = /etc/grafana/provisioning + + [log] + mode = console + level = {0} + """.format( + self.model.config['grafana_log_level'], + )) + + # if there is a database available, add that information + if self.datastore.database: + db_config = self.datastore.database + config_text += textwrap.dedent(""" + [database] + type = {0} + host = {1} + name = {2} + user = {3} + password = {4} + url = {0}://{3}:{4}@{1}/{2}""".format( + db_config['type'], + db_config['host'], + db_config['name'], + db_config['user'], + db_config['password'], + )) + return config_text + + def _update_pod_config_ini_file(self, pod_spec): + file_text = self._make_config_ini_text() + config_ini_file_meta = { + 'name': 'grafana-config-ini', + 'mountPath': '/etc/grafana', + 'files': [{ + 'path': 'grafana.ini', + 'content': file_text + }] + } + container = get_container(pod_spec, self.app.name) + container['volumeConfig'].append(config_ini_file_meta) + + # get hash string of the new file text and put into container config + # if this changes, it will trigger a pod restart + file_text_hash = hashlib.md5(file_text.encode()).hexdigest() + if 'GRAFANA_INI' in container['envConfig'] \ + and container['envConfig']['GRAFANA_INI'] != file_text_hash: + log.info('grafana.ini hash has changed. Triggering pod restart.') + container['envConfig']['GRAFANA_INI'] = file_text_hash + + def _build_pod_spec(self): + """Builds the pod spec based on available info in datastore`.""" + + config = self.model.config + + spec = { + 'version': 3, + 'containers': [{ + 'name': self.app.name, + 'image': "ubuntu/grafana:latest", + 'ports': [{ + 'containerPort': config['port'], + 'protocol': 'TCP' + }], + 'volumeConfig': [], + 'envConfig': {}, # used to store hashes of config file text + 'kubernetes': { + 'readinessProbe': { + 'httpGet': { + 'path': '/api/health', + 'port': config['port'] + }, + 'initialDelaySeconds': 10, + 'timeoutSeconds': 30 + }, + }, + }] + } + + return spec + + def configure_pod(self): + """Set Juju / Kubernetes pod spec built from `_build_pod_spec()`.""" + + # check for valid high availability (or single node) configuration + self._check_high_availability() + + # in the case where we have peers but no DB connection, + # don't set the pod spec until it is resolved + if self.unit.status == BlockedStatus('Need database relation for HA.'): + log.error('Application is in a blocked state. ' + 'Please resolve before pod spec can be set.') + return + + if not self.unit.is_leader(): + self.unit.status = ActiveStatus() + return + + # general pod spec component updates + self.unit.status = MaintenanceStatus('Building pod spec.') + pod_spec = self._build_pod_spec() + if not pod_spec: + return + self._update_pod_data_source_config_file(pod_spec) + self._update_pod_config_ini_file(pod_spec) + + # set the pod spec with Juju + self.model.pod.set_spec(pod_spec) + self.unit.status = ActiveStatus() + + +if __name__ == '__main__': + main(GrafanaK8s) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/tests/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/tests/test_charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/tests/test_charm.py new file mode 100644 index 0000000000000000000000000000000000000000..e6b87e4151bf4ef5e87674bbd914adc12b49fd6a --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/grafana-operator/tests/test_charm.py @@ -0,0 +1,490 @@ +import hashlib +import textwrap +import unittest + +from ops.testing import Harness +from ops.model import ( + TooManyRelatedAppsError, + ActiveStatus, +) +from charm import ( + GrafanaK8s, + MaintenanceStatus, + BlockedStatus, + get_container, +) + +BASE_CONFIG = { + 'port': 3000, + 'grafana_log_level': 'info', +} + + +class GrafanaCharmTest(unittest.TestCase): + + def setUp(self) -> None: + self.harness = Harness(GrafanaK8s) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + self.harness.add_oci_resource('grafana-image') + + def test__grafana_source_data(self): + + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.sources, {}) + + rel_id = self.harness.add_relation('grafana-source', 'prometheus') + self.harness.add_relation_unit(rel_id, 'prometheus/0') + self.assertIsInstance(rel_id, int) + + # test that the unit data propagates the correct way + # which is through the triggering of on_relation_changed + self.harness.update_relation_data(rel_id, + 'prometheus/0', + { + 'private-address': '192.0.2.1', + 'port': 1234, + 'source-type': 'prometheus', + 'source-name': 'prometheus-app', + }) + + expected_first_source_data = { + 'private-address': '192.0.2.1', + 'port': 1234, + 'source-name': 'prometheus-app', + 'source-type': 'prometheus', + 'isDefault': 'true', + 'unit_name': 'prometheus/0' + } + self.assertEqual(expected_first_source_data, + dict(self.harness.charm.datastore.sources[rel_id])) + + # test that clearing the relation data leads to + # the datastore for this data source being cleared + self.harness.update_relation_data(rel_id, + 'prometheus/0', + { + 'private-address': None, + 'port': None, + }) + self.assertEqual(None, self.harness.charm.datastore.sources.get(rel_id)) + + def test__ha_database_and_status_check(self): + """If there is a peer connection and no database (needed for HA), + the charm should put the application in a blocked state.""" + + # start charm with one peer and no database relation + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.unit.status, + ActiveStatus()) + + # ensure _check_high_availability() ends up with the correct status + status = self.harness.charm._check_high_availability() + self.assertEqual(status, MaintenanceStatus('Grafana ready on single node.')) + + # make sure that triggering 'update-status' hook does not + # overwrite the current active status + self.harness.charm.on.update_status.emit() + self.assertEqual(self.harness.charm.unit.status, + ActiveStatus()) + + peer_rel_id = self.harness.add_relation('grafana', 'grafana') + + # add main unit and its data + # self.harness.add_relation_unit(peer_rel_id, 'grafana/0') + # will trigger the grafana-changed hook + self.harness.update_relation_data(peer_rel_id, + 'grafana/0', + {'private-address': '10.1.2.3'}) + + # add peer unit and its data + self.harness.add_relation_unit(peer_rel_id, 'grafana/1') + self.harness.update_relation_data(peer_rel_id, + 'grafana/1', + {'private-address': '10.0.0.1'}) + + self.assertTrue(self.harness.charm.has_peer) + self.assertFalse(self.harness.charm.has_db) + self.assertEqual( + self.harness.charm.unit.status, + BlockedStatus('Need database relation for HA.') + ) + + # ensure update-status hook doesn't overwrite this + self.harness.charm.on.update_status.emit() + self.assertEqual(self.harness.charm.unit.status, + BlockedStatus('Need database relation for HA.')) + + # now add the database connection and the model should + # not have a blocked status + db_rel_id = self.harness.add_relation('database', 'mysql') + self.harness.add_relation_unit(db_rel_id, 'mysql/0') + self.harness.update_relation_data(db_rel_id, + 'mysql/0', + { + 'type': 'mysql', + 'host': '10.10.10.10:3306', + 'name': 'test_mysql_db', + 'user': 'test-admin', + 'password': 'super!secret!password', + }) + self.assertTrue(self.harness.charm.has_db) + self.assertEqual(self.harness.charm.unit.status, ActiveStatus()) + + # ensure _check_high_availability() ends up with the correct status + status = self.harness.charm._check_high_availability() + self.assertEqual(status, MaintenanceStatus('Grafana ready for HA.')) + + def test__database_relation_data(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.database, {}) + + # add relation and update relation data + rel_id = self.harness.add_relation('database', 'mysql') + rel = self.harness.model.get_relation('database') + self.harness.add_relation_unit(rel_id, 'mysql/0') + test_relation_data = { + 'type': 'mysql', + 'host': '0.1.2.3:3306', + 'name': 'my-test-db', + 'user': 'test-user', + 'password': 'super!secret!password', + } + self.harness.update_relation_data(rel_id, + 'mysql/0', + test_relation_data) + # check that charm datastore was properly set + self.assertEqual(dict(self.harness.charm.datastore.database), + test_relation_data) + + # now depart this relation and ensure the datastore is emptied + self.harness.charm.on.database_relation_broken.emit(rel) + self.assertEqual({}, dict(self.harness.charm.datastore.database)) + + def test__multiple_database_relation_handling(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.database, {}) + + # add first database relation + self.harness.add_relation('database', 'mysql') + + # add second database relation -- should fail here + with self.assertRaises(TooManyRelatedAppsError): + self.harness.add_relation('database', 'mysql') + self.harness.charm.model.get_relation('database') + + def test__multiple_source_relations(self): + """This will test data-source config text with multiple sources. + + Specifically, it will test multiple grafana-source relations.""" + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.sources, {}) + + # add first relation + rel_id0 = self.harness.add_relation('grafana-source', 'prometheus') + self.harness.add_relation_unit(rel_id0, 'prometheus/0') + + # add test data to grafana-source relation + # and test that _make_data_source_config_text() works as expected + prom_source_data = { + 'private-address': '192.0.2.1', + 'port': 4321, + 'source-type': 'prometheus' + } + self.harness.update_relation_data(rel_id0, 'prometheus/0', prom_source_data) + header_text = textwrap.dedent(""" + apiVersion: 1 + + datasources:""") + correct_config_text0 = header_text + textwrap.dedent(""" + - name: prometheus_0 + type: prometheus + access: proxy + url: http://192.0.2.1:4321 + isDefault: true + editable: true + orgId: 1""") + + generated_text = self.harness.charm._make_data_source_config_text() + self.assertEqual(correct_config_text0 + '\n', generated_text) + + # add another source relation and check the resulting config text + jaeger_source_data = { + 'private-address': '255.255.255.0', + 'port': 7890, + 'source-type': 'jaeger', + 'source-name': 'jaeger-application' + } + rel_id1 = self.harness.add_relation('grafana-source', 'jaeger') + self.harness.add_relation_unit(rel_id1, 'jaeger/0') + self.harness.update_relation_data(rel_id1, 'jaeger/0', jaeger_source_data) + + correct_config_text1 = correct_config_text0 + textwrap.dedent(""" + - name: jaeger-application + type: jaeger + access: proxy + url: http://255.255.255.0:7890 + isDefault: false + editable: true + orgId: 1""") + + generated_text = self.harness.charm._make_data_source_config_text() + self.assertEqual(correct_config_text1 + '\n', generated_text) + + # test removal of second source results in config_text + # that is the same as the original + self.harness.update_relation_data(rel_id1, + 'jaeger/0', + { + 'private-address': None, + 'port': None, + }) + generated_text = self.harness.charm._make_data_source_config_text() + correct_text_after_removal = textwrap.dedent(""" + apiVersion: 1 + + deleteDatasources: + - name: jaeger-application + orgId: 1 + + datasources: + - name: prometheus_0 + type: prometheus + access: proxy + url: http://192.0.2.1:4321 + isDefault: true + editable: true + orgId: 1""") + + self.assertEqual(correct_text_after_removal + '\n', generated_text) + + # now test that the 'deleteDatasources' is gone + generated_text = self.harness.charm._make_data_source_config_text() + self.assertEqual(correct_config_text0 + '\n', generated_text) + + def test__pod_spec_container_datasources(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.sources, {}) + + # add first relation + rel_id = self.harness.add_relation('grafana-source', 'prometheus') + self.harness.add_relation_unit(rel_id, 'prometheus/0') + + # add test data to grafana-source relation + # and test that _make_data_source_config_text() works as expected + prom_source_data = { + 'private-address': '192.0.2.1', + 'port': 4321, + 'source-type': 'prometheus' + } + self.harness.update_relation_data(rel_id, 'prometheus/0', prom_source_data) + + data_source_file_text = textwrap.dedent(""" + apiVersion: 1 + + datasources: + - name: prometheus_0 + type: prometheus + access: proxy + url: http://192.0.2.1:4321 + isDefault: true + editable: true + orgId: 1 + """) + + config_ini_file_text = textwrap.dedent(""" + [paths] + provisioning = /etc/grafana/provisioning + + [log] + mode = console + level = {0} + """).format( + self.harness.model.config['grafana_log_level'], + ) + + expected_container_files_spec = [ + { + 'name': 'grafana-datasources', + 'mountPath': '/etc/grafana/provisioning/datasources', + 'files': [{ + 'path': 'datasources.yaml', + 'content': data_source_file_text, + }], + }, + { + 'name': 'grafana-config-ini', + 'mountPath': '/etc/grafana', + 'files': [{ + 'path': 'grafana.ini', + 'content': config_ini_file_text, + }] + } + ] + pod_spec, _ = self.harness.get_pod_spec() + container = get_container(pod_spec, 'grafana') + actual_container_files_spec = container['volumeConfig'] + self.assertEqual(expected_container_files_spec, + actual_container_files_spec) + + def test__access_sqlite_storage_location(self): + expected_path = '/var/lib/grafana' + actual_path = self.harness.charm.meta.storages['sqlitedb'].location + self.assertEqual(expected_path, actual_path) + + def test__config_ini_without_database(self): + self.harness.update_config(BASE_CONFIG) + expected_config_text = textwrap.dedent(""" + [paths] + provisioning = /etc/grafana/provisioning + + [log] + mode = console + level = {0} + """).format( + self.harness.model.config['grafana_log_level'], + ) + + actual_config_text = self.harness.charm._make_config_ini_text() + self.assertEqual(expected_config_text, actual_config_text) + + def test__config_ini_with_database(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + + # add database relation and update relation data + rel_id = self.harness.add_relation('database', 'mysql') + self.harness.add_relation_unit(rel_id, 'mysql/0') + test_relation_data = { + 'type': 'mysql', + 'host': '0.1.2.3:3306', + 'name': 'my-test-db', + 'user': 'test-user', + 'password': 'super!secret!password', + } + self.harness.update_relation_data(rel_id, + 'mysql/0', + test_relation_data) + + # test the results of _make_config_ini_text() + expected_config_text = textwrap.dedent(""" + [paths] + provisioning = /etc/grafana/provisioning + + [log] + mode = console + level = {0} + + [database] + type = mysql + host = 0.1.2.3:3306 + name = my-test-db + user = test-user + password = super!secret!password + url = mysql://test-user:super!secret!password@0.1.2.3:3306/my-test-db""").format( + self.harness.model.config['grafana_log_level'], + ) + + actual_config_text = self.harness.charm._make_config_ini_text() + self.assertEqual(expected_config_text, actual_config_text) + + def test__duplicate_source_names(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + self.assertEqual(self.harness.charm.datastore.sources, {}) + + # add first relation + p_rel_id = self.harness.add_relation('grafana-source', 'prometheus') + p_rel = self.harness.model.get_relation('grafana-source', p_rel_id) + self.harness.add_relation_unit(p_rel_id, 'prometheus/0') + + # add test data to grafana-source relation + prom_source_data0 = { + 'private-address': '192.0.2.1', + 'port': 4321, + 'source-type': 'prometheus', + 'source-name': 'duplicate-source-name' + } + self.harness.update_relation_data(p_rel_id, 'prometheus/0', prom_source_data0) + expected_prom_source_data = { + 'private-address': '192.0.2.1', + 'port': 4321, + 'source-name': 'duplicate-source-name', + 'source-type': 'prometheus', + 'isDefault': 'true', + 'unit_name': 'prometheus/0' + } + self.assertEqual(dict(self.harness.charm.datastore.sources[p_rel_id]), + expected_prom_source_data) + + # add second source with the same name as the first source + g_rel_id = self.harness.add_relation('grafana-source', 'graphite') + g_rel = self.harness.model.get_relation('grafana-source', g_rel_id) + self.harness.add_relation_unit(g_rel_id, 'graphite/0') + + graphite_source_data0 = { + 'private-address': '192.12.23.34', + 'port': 4321, + 'source-type': 'graphite', + 'source-name': 'duplicate-source-name' + } + expected_graphite_source_data = { + 'isDefault': 'false', + 'port': 4321, + 'private-address': '192.12.23.34', + 'source-name': 'graphite_1', + 'source-type': 'graphite', + 'unit_name': 'graphite/0' + } + self.harness.update_relation_data(g_rel_id, 'graphite/0', graphite_source_data0) + self.assertEqual( + expected_graphite_source_data, + dict(self.harness.charm.datastore.sources.get(g_rel_id)) + ) + self.assertEqual(2, len(self.harness.charm.datastore.sources)) + + # now remove the relation and ensure datastore source-name is removed + self.harness.charm.on.grafana_source_relation_broken.emit(p_rel) + self.assertEqual(None, self.harness.charm.datastore.sources.get(p_rel_id)) + self.assertEqual(1, len(self.harness.charm.datastore.sources)) + + # remove graphite relation + self.harness.charm.on.grafana_source_relation_broken.emit(g_rel) + self.assertEqual(None, self.harness.charm.datastore.sources.get(g_rel_id)) + self.assertEqual(0, len(self.harness.charm.datastore.sources)) + + def test__idempotent_datasource_file_hash(self): + self.harness.set_leader(True) + self.harness.update_config(BASE_CONFIG) + + rel_id = self.harness.add_relation('grafana-source', 'prometheus') + self.harness.add_relation_unit(rel_id, 'prometheus/0') + self.assertIsInstance(rel_id, int) + + # test that the unit data propagates the correct way + # which is through the triggering of on_relation_changed + self.harness.update_relation_data(rel_id, + 'prometheus/0', + { + 'private-address': '192.0.2.1', + 'port': 1234, + 'source-type': 'prometheus', + 'source-name': 'prometheus-app', + }) + + # get a hash of the created file and check that it matches the pod spec + pod_spec, _ = self.harness.get_pod_spec() + container = get_container(pod_spec, 'grafana') + hash_text = hashlib.md5( + container['volumeConfig'][0]['files'][0]['content'].encode()).hexdigest() + self.assertEqual(container['envConfig']['DATASOURCES_YAML'], hash_text) + + # test the idempotence of the call by re-configuring the pod spec + self.harness.charm.configure_pod() + self.assertEqual(container['envConfig']['DATASOURCES_YAML'], hash_text) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/.flake8 b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..8ef84fcd43f3b7a46768c31b20f36cab48ffdfe0 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/.gitignore b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b3b17b402232904b604711f178aefca0a623bdf5 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/.gitignore @@ -0,0 +1,6 @@ +*~ +*swp +*.charm +__pycache__ +build +venv diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/.jujuignore b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/.jujuignore new file mode 100644 index 0000000000000000000000000000000000000000..6ccd559eabeae93e4d23215fa450130fa9b37ace --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/LICENSE b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/README.md b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..35f666076133ad99f6a0503a44ccedbf04bd7775 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/README.md @@ -0,0 +1,78 @@ +# Prometheus Operator + +## Description + +The Prometheus Operator provides a cluster monitoring solution using +[Prometheus](https://prometheus.io), which is an open source +monitoring system and alerting toolkit. + +This repository contains a [Juju](https://jaas.ai/) Charm for +deploying the monitoring component of Prometheus in a Kubernetes +cluster. The alerting component of prometheus is offered through a +separate Charm. + +## Setup + +A typical setup using [snaps](https://snapcraft.io/), for deployments +to a [microk8s](https://microk8s.io/) cluster can be done using the +following commands + + sudo snap install microk8s --classic + microk8s.enable dns storage registry dashboard + sudo snap install juju --classic + juju bootstrap microk8s microk8s + juju create-storage-pool operator-storage kubernetes storage-class=microk8s-hostpath + +## Build + +Install the charmcraft tool + + sudo snap install charmcraft + +Build the charm in this git repository + + charmcraft build + +## Usage + +Create a Juju model for your monitoring operators + + juju add-model lma + +Deploy Prometheus using its default configuration. + + juju deploy ./prometheus.charm + +View the Prometheus dashboard + +1. Use `juju status` to determine IP of the Prometheus unit +2. Navigate to `http://:9090` using your browser + +If required, remove the deployed monitoring model completely + + juju destroy-model -y lma --no-wait --force --destroy-storage + +## Relations + +Currently supported relations are + +- [Grafana](https://github.com/canonical/grafana-operator) +- [Alertmanager](https://github.com/canonical/alertmanager-operator) + +## Developing + +Use your existing Python 3 development environment or create and +activate a Python 3 virtualenv + + virtualenv -p python3 venv + source venv/bin/activate + +Install the development requirements + + pip install -r requirements-dev.txt + +## Testing + +Just run `run_tests`: + + ./run_tests diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/actions.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/actions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fef67f32c4a9134c536b965e8c53d055e18c4457 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/actions.yaml @@ -0,0 +1,3 @@ +reload-config: + description: | + Tell Prometheus to reload its config from the ConfigMap. \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/actions/reload-config b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/actions/reload-config new file mode 100755 index 0000000000000000000000000000000000000000..d736d4e1627e01599ba7cef209ba684ef4b0ef41 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/actions/reload-config @@ -0,0 +1,2 @@ +#!/bin/sh +kill -HUP 1 && echo "Sent SIGHUP to the Prometheus container, config reloaded" \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/config.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e40cb172421a1ec10708a96377fbeb9bee5391cd --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/config.yaml @@ -0,0 +1,99 @@ +options: + prometheus-image-path: + type: string + description: | + The location of the image to use, + e.g. "registry.example.com/prometheus:v1". + + This setting is required. + default: "prom/prometheus:latest" + prometheus-image-username: + type: string + description: | + The username for accessing the registry specified in + prometheus-image-path. + default: "" + prometheus-image-password: + type: string + description: | + The password associated with prometheus-image-username for + accessing the registry specified in prometheus-image-path. + default: "" + port: + description: The port prometheus will be listening on + type: int + default: 9090 + ssl-cert: + type: string + default: + description: | + SSL certificate to install and use for Prometheus endpoint. + ssl-key: + type: string + default: + description: | + SSL key to use with certificate specified as ssl-cert. + log-level: + description: | + Prometheus server log level (only log messages with the given severity + or above). Must be one of: [debug, info, warn, error, fatal]. + If not set, the Prometheus default one (info) will be used. + type: string + default: + web-external-url: + description: | + The URL under which Prometheus is externally reachable (for example, + if Prometheus is served via a reverse proxy). + Used for generating relative and absolute links back to + Prometheus itself. If the URL has a path portion, it will be used to + prefix all HTTP endpoints served by Prometheus. + + If omitted, relevant URL components will be derived automatically. + type: string + default: "" + tsdb-retention-time: + description: | + How long to retain samples in the storage. + Units Supported: y, w, d, h, m, s + type: string + default: 15d + tsdb-wal-compression: + description: | + This flag enables compression of the write-ahead log (WAL). + Depending on your data, you can expect the WAL size to be + halved with little extra cpu load. + type: boolean + default: false + external-labels: + description: | + A JSON string of key-value pairs that specify the labels to + attach to metrics in this Prometheus instance when they get pulled + by an aggregating parent. This is useful in the case of federation + where, for example, you want each datacenter to have its own + Prometheus instance and then have a global instance that pulls from + each of these datacenter instances. By specifying a unique set of + external-labels for each datacenter instance, you can easily determine + in the aggregating Prometheus instance which datacenter a metric is + coming from. Note that you are not limited to one instance per + datacenter. The datacenter example here is arbitrary and you are free + to organize your federation's hierarchy as you see fit. + Ex. '{ "cluster": "datacenter1" }'. Both keys and values may be + arbitrarily chosen as you see fit. + type: string + default: "{}" + scrape-interval: + description: | + How frequently to scrape targets by default. + type: string + default: 1m + scrape-timeout: + description: | + How long until a scrape request times out. + type: string + default: 10s + evaluation-interval: + description: | + How frequently rules will be evaluated. + type: string + default: 1m + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/config/prometheus-k8s.yml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/config/prometheus-k8s.yml new file mode 100644 index 0000000000000000000000000000000000000000..e003db0ae0df07f539a6c725ddd7925ccba67232 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/config/prometheus-k8s.yml @@ -0,0 +1,283 @@ +# +# This file copied from https://github.com/prometheus/prometheus/blob/release-2.18/documentation/examples/prometheus-kubernetes.yml +# + +# A scrape configuration for running Prometheus on a Kubernetes cluster. +# This uses separate scrape configs for cluster components (i.e. API server, node) +# and services to allow each to use different authentication configs. +# +# Kubernetes labels will be added as Prometheus labels on metrics via the +# `labelmap` relabeling action. +# +# If you are using Kubernetes 1.7.2 or earlier, please take note of the comments +# for the kubernetes-cadvisor job; you will need to edit or remove this job. + +# Scrape config for API servers. +# +# Kubernetes exposes API servers as endpoints to the default/kubernetes +# service so this uses `endpoints` role and uses relabelling to only keep +# the endpoints associated with the default/kubernetes service using the +# default named port `https`. This works for single API server deployments as +# well as HA API server deployments. +scrape_configs: +- job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + # insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + +# Scrape config for nodes (kubelet). +# +# Rather than connecting directly to the node, the scrape is proxied though the +# Kubernetes apiserver. This means it will work if Prometheus is running out of +# cluster, or can't connect to nodes for some other reason (e.g. because of +# firewalling). +- job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + +# Scrape config for Kubelet cAdvisor. +# +# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics +# (those whose names begin with 'container_') have been removed from the +# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to +# retrieve those metrics. +# +# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor +# HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics" +# in that case (and ensure cAdvisor's HTTP server hasn't been disabled with +# the --cadvisor-port=0 Kubelet flag). +# +# This job is not necessary and should be removed in Kubernetes 1.6 and +# earlier versions, or it will cause the metrics to be scraped twice. +- job_name: 'kubernetes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor + +# Example scrape config for service endpoints. +# +# The relabeling allows the actual service scrape endpoint to be configured +# for all or only some endpoints. +- job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + # Example relabel to scrape only endpoints that have + # "example.io/should_be_scraped = true" annotation. + # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_scraped] + # action: keep + # regex: true + # + # Example relabel to customize metric path based on endpoints + # "example.io/metric_path = " annotation. + # - source_labels: [__meta_kubernetes_service_annotation_example_io_metric_path] + # action: replace + # target_label: __metrics_path__ + # regex: (.+) + # + # Example relabel to scrape only single, desired port for the service based + # on endpoints "example.io/scrape_port = " annotation. + # - source_labels: [__address__, __meta_kubernetes_service_annotation_example_io_scrape_port] + # action: replace + # regex: ([^:]+)(?::\d+)?;(\d+) + # replacement: $1:$2 + # target_label: __address__ + # + # Example relabel to configure scrape scheme for all service scrape targets + # based on endpoints "example.io/scrape_scheme = " annotation. + # - source_labels: [__meta_kubernetes_service_annotation_example_io_scrape_scheme] + # action: replace + # target_label: __scheme__ + # regex: (https?) + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + +# Example scrape config for probing services via the Blackbox Exporter. +# +# The relabeling allows the actual service scrape endpoint to be configured +# for all or only some services. +- job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + # Example relabel to probe only some services that have "example.io/should_be_probed = true" annotation + # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_probed] + # action: keep + # regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox-exporter.example.com:9115 + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + +# Example scrape config for probing ingresses via the Blackbox Exporter. +# +# The relabeling allows the actual ingress scrape endpoint to be configured +# for all or only some services. +- job_name: 'kubernetes-ingresses' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: ingress + + relabel_configs: + # Example relabel to probe only some ingresses that have "example.io/should_be_probed = true" annotation + # - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed] + # action: keep + # regex: true + - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path] + regex: (.+);(.+);(.+) + replacement: ${1}://${2}${3} + target_label: __param_target + - target_label: __address__ + replacement: blackbox-exporter.example.com:9115 + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_ingress_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_ingress_name] + target_label: kubernetes_name + +# Example scrape config for pods +# +# The relabeling allows the actual pod scrape to be configured +# for all the declared ports (or port-free target if none is declared) +# or only some ports. +- job_name: 'kubernetes-pods' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + # Example relabel to scrape only pods that have + # "example.io/should_be_scraped = true" annotation. + # - source_labels: [__meta_kubernetes_pod_annotation_example_io_should_be_scraped] + # action: keep + # regex: true + # + # Example relabel to customize metric path based on pod + # "example.io/metric_path = " annotation. + # - source_labels: [__meta_kubernetes_pod_annotation_example_io_metric_path] + # action: replace + # target_label: __metrics_path__ + # regex: (.+) + # + # Example relabel to scrape only single, desired port for the pod + # based on pod "example.io/scrape_port = " annotation. + # - source_labels: [__address__, __meta_kubernetes_pod_annotation_example_io_scrape_port] + # action: replace + # regex: ([^:]+)(?::\d+)?;(\d+) + # replacement: $1:$2 + # target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/icon.svg b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/icon.svg new file mode 100644 index 0000000000000000000000000000000000000000..5c51f66d901d0a30c082a7207a53d19b763acc2b --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/icon.svg @@ -0,0 +1,50 @@ + + + +image/svg+xml \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/metadata.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5d4329da48a621aad3a50ab6df40d72a11dbcb3f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/metadata.yaml @@ -0,0 +1,34 @@ +name: prometheus +summary: Prometheus for Kubernetes clusters +maintainers: + - Balbir Thomas +description: | + Prometheus is an open source monitoring solution. Prometheus + supports aggregating high dimensional data and exposes a powerful + query language PromQL. This charm deploys and operates Prometheus on + Kubernetes clusters. Prometheus can raise alerts through a relation + with the Altermanager charm. Alerting rules for Prometheus need to + be provided through a relation with the application that requires + alerting. Prometheus provides its own dashboard for data + visualization but a richer visualization interface may be obtained + through a relation with the Grafana charm. +tags: + - observability + - lma + - prometheus + - monitoring + - alerting + - grafana +series: + - kubernetes +requires: + grafana-source: + interface: grafana-datasource + alertmanager: + interface: alertmanager + target: + interface: http +storage: + database: + type: filesystem + location: /var/lib/prometheus diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/requirements-dev.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/requirements-dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..3950bef2e306b78aaa231135636a04f2d443d569 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/requirements-dev.txt @@ -0,0 +1,5 @@ +-r requirements.txt +black +flake8 +pytest +pytest-cov diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/requirements.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ff3e3351770b50b916e2fcc6478e986f59c35845 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/requirements.txt @@ -0,0 +1,2 @@ +ops +pyaml diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/run_tests b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/run_tests new file mode 100755 index 0000000000000000000000000000000000000000..637497ffe1bac2f75fec96b3bc1d25e16e39e1d8 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/run_tests @@ -0,0 +1,16 @@ +#!/bin/sh -e +# Copyright 2020 Balbir Thomas +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH=src +else + export PYTHONPATH="src:$PYTHONPATH" +fi + +black --diff +python3 -m unittest -v "$@" diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/setup.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..15ce0b9ca35e585d2b1925a13bd58d1ea67f0900 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/setup.py @@ -0,0 +1,21 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="prometheus-charm", + version="0.0.1", + author="Balbir Thomas", + author_email="balbir.thomas@canonical.com", + description="Kubernetes Charm/Operator for Prometheus", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/balbirthomas/prometheus-charm", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "Operating System :: OS Independent", + ], + python_requires='>=3.5', +) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/src/charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..e4f584fb540e8f0f74c634fb11fb9145ad3027a4 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/src/charm.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python3 +# Copyright 2020 Balbir Thomas +# See LICENSE file for licensing details. + +import logging +import yaml +import json + +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.main import main +from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus + +logger = logging.getLogger(__name__) + + +class PrometheusCharm(CharmBase): + """A Juju Charm for Prometheus + """ + _stored = StoredState() + + def __init__(self, *args): + logger.debug('Initializing Charm') + + super().__init__(*args) + + self._stored.set_default(alertmanagers=[]) + self._stored.set_default(alertmanager_port='9093') + + self.framework.observe(self.on.config_changed, self._on_config_changed) + self.framework.observe(self.on.stop, self._on_stop) + self.framework.observe(self.on['alertmanager'].relation_changed, + self._on_alertmanager_changed) + self.framework.observe(self.on['alertmanager'].relation_broken, + self._on_alertmanager_broken) + + self.framework.observe(self.on['grafana-source'].relation_changed, + self._on_grafana_changed) + self.framework.observe(self.on['target'].relation_changed, + self._on_config_changed) + + def _on_config_changed(self, _): + """Set a new Juju pod specification + """ + self._configure_pod() + + def _on_stop(self, _): + """Mark unit is inactive + """ + self.unit.status = MaintenanceStatus('Pod is terminating.') + + def _on_grafana_changed(self, event): + """Provide Grafana with data source information + """ + event.relation.data[self.unit]['port'] = str(self.model.config['port']) + event.relation.data[self.unit]['source-type'] = 'prometheus' + + def _on_alertmanager_changed(self, event): + """Set an alertmanager configuation + """ + if not self.unit.is_leader(): + return + + addrs = json.loads(event.relation.data[event.app].get('addrs', '[]')) + port = event.relation.data[event.app]['port'] + + self._stored.alertmanager_port = port + self._stored.alertmanagers = addrs + + + self._configure_pod() + + def _on_alertmanager_broken(self, event): + """Remove all alertmanager configuration + """ + if not self.unit.is_leader(): + return + self._stored.alertmanagers.clear() + self._configure_pod() + + def _cli_args(self): + """Construct command line arguments for Prometheus + """ + config = self.model.config + args = [ + '--config.file=/etc/prometheus/prometheus.yml', + '--storage.tsdb.path=/var/lib/prometheus', + '--web.enable-lifecycle', + '--web.console.templates=/usr/share/prometheus/consoles', + '--web.console.libraries=/usr/share/prometheus/console_libraries' + ] + + # get log level + allowed_log_levels = ['debug', 'info', 'warn', 'error', 'fatal'] + if config.get('log-level'): + log_level = config['log-level'].lower() + else: + log_level = 'info' + + # If log level is invalid set it to debug + if log_level not in allowed_log_levels: + logging.error( + 'Invalid loglevel: {0} given, {1} allowed. ' + 'defaulting to DEBUG loglevel.'.format( + log_level, '/'.join(allowed_log_levels) + ) + ) + log_level = 'debug' + + # set log level + args.append( + '--log.level={0}'.format(log_level) + ) + + # Enable time series database compression + if config.get('tsdb-wal-compression'): + args.append('--storage.tsdb.wal-compression') + + # Set time series retention time + if config.get('tsdb-retention-time') and self._is_valid_timespec( + config['tsdb-retention-time']): + args.append('--storage.tsdb.retention.time={}'.format(config['tsdb-retention-time'])) + + return args + + def _is_valid_timespec(self, timeval): + """Is a time interval unit and value valid + """ + if not timeval: + return False + + time, unit = timeval[:-1], timeval[-1] + + if unit not in ['y', 'w', 'd', 'h', 'm', 's']: + logger.error('Invalid unit {} in time spec'.format(unit)) + return False + + try: + int(time) + except ValueError: + logger.error('Can not convert time {} to integer'.format(time)) + return False + + if not int(time) > 0: + logger.error('Expected positive time spec but got {}'.format(time)) + return False + + return True + + def _are_valid_labels(self, json_data): + """Are Prometheus external labels valid + """ + if not json_data: + return False + + try: + labels = json.loads(json_data) + except (ValueError, TypeError): + logger.error('Can not parse external labels : {}'.format(json_data)) + return False + + if not isinstance(labels, dict): + logger.error('Expected label dictionary but got : {}'.format(labels)) + return False + + for key, value in labels.items(): + if not isinstance(key, str) or not isinstance(value, str): + logger.error('External label keys/values must be strings') + return False + + return True + + def _external_labels(self): + """Extract external labels for Prometheus from configuration + """ + config = self.model.config + labels = {} + + if config.get('external-labels') and self._are_valid_labels( + config['external-labels']): + labels = json.loads(config['external-labels']) + + return labels + + def _prometheus_global_config(self): + """Construct Prometheus global configuration + """ + config = self.model.config + global_config = {} + + labels = self._external_labels() + if labels: + global_config['external_labels'] = labels + + if config.get('scrape-interval') and self._is_valid_timespec( + config['scrape-interval']): + global_config['scrape_interval'] = config['scrape-interval'] + + if config.get('scrape-timeout') and self._is_valid_timespec( + config['scrape-timeout']): + global_config['scrape_timeout'] = config['scrape-timeout'] + + if config.get('evaluation-interval') and self._is_valid_timespec( + config['evaluation-interval']): + global_config['evaluation_interval'] = config['evaluation-interval'] + + return global_config + + def _alerting_config(self): + """Construct Prometheus altering configuation + """ + alerting_config = '' + + if len(self._stored.alertmanagers) < 1: + logger.debug('No alertmanagers available') + return alerting_config + + targets = [] + for manager in self._stored.alertmanagers: + port = self._stored.alertmanager_port + targets.append("{}:{}".format(manager, port)) + + manager_config = {'static_configs': [{'targets': targets}]} + alerting_config = {'alertmanagers': [manager_config]} + + return alerting_config + + def _prometheus_config(self): + """Construct Prometheus configuration + """ + config = self.model.config + + scrape_config = {'global': self._prometheus_global_config(), + 'scrape_configs': []} + + alerting_config = self._alerting_config() + if alerting_config: + scrape_config['alerting'] = alerting_config + + # By default only monitor prometheus server itself + targets = ['localhost:{}'.format(config['port'])] + relation_targets = self.relation_targets + if relation_targets: + targets.extend(relation_targets) + + default_config = { + 'job_name': 'prometheus', + 'scrape_interval': '5s', + 'scrape_timeout': '5s', + 'metrics_path': '/metrics', + 'honor_timestamps': True, + 'scheme': 'http', + 'static_configs': [{ + 'targets': targets + }] + } + scrape_config['scrape_configs'].append(default_config) + + logger.debug('Prometheus config : {}'.format(scrape_config)) + + return yaml.dump(scrape_config), targets + + def _build_pod_spec(self): + """Construct a Juju pod specification for Prometheus + """ + logger.debug('Building Pod Spec') + config = self.model.config + prometheus_config, targets = self._prometheus_config() + spec = { + 'version': 3, + 'containers': [{ + 'name': self.app.name, + 'imageDetails': { + 'imagePath': config['prometheus-image-path'], + 'username': config.get('prometheus-image-username', ''), + 'password': config.get('prometheus-image-password', '') + }, + 'args': self._cli_args(), + "envConfig": { + "targets": str(targets), + }, + 'kubernetes': { + 'readinessProbe': { + 'httpGet': { + 'path': '/-/ready', + 'port': config['port'] + }, + 'initialDelaySeconds': 10, + 'timeoutSeconds': 30 + }, + 'livenessProbe': { + 'httpGet': { + 'path': '/-/healthy', + 'port': config['port'] + }, + 'initialDelaySeconds': 30, + 'timeoutSeconds': 30 + } + }, + 'ports': [{ + 'containerPort': config['port'], + 'name': 'prometheus-http', + 'protocol': 'TCP' + }], + 'volumeConfig': [{ + 'name': 'prometheus-config', + 'mountPath': '/etc/prometheus', + 'files': [{ + 'path': 'prometheus.yml', + 'content': prometheus_config + }] + }] + }] + } + + return spec + + def _check_config(self): + """Identify missing but required items in configuation + + :returns: list of missing configuration items (configuration keys) + """ + logger.debug('Checking Config') + config = self.model.config + missing = [] + + if not config.get('prometheus-image-path'): + missing.append('prometheus-image-path') + + if config.get('prometheus-image-username') \ + and not config.get('prometheus-image-password'): + missing.append('prometheus-image-password') + + return missing + + def _configure_pod(self): + """Setup a new Prometheus pod specification + """ + logger.debug('Configuring Pod') + missing_config = self._check_config() + if missing_config: + logger.error('Incomplete Configuration : {}. ' + 'Application will be blocked.'.format(missing_config)) + self.unit.status = \ + BlockedStatus('Missing configuration: {}'.format(missing_config)) + return + + if not self.unit.is_leader(): + self.unit.status = ActiveStatus() + return + + self.unit.status = MaintenanceStatus('Setting pod spec.') + pod_spec = self._build_pod_spec() + + self.model.pod.set_spec(pod_spec) + self.app.status = ActiveStatus() + self.unit.status = ActiveStatus() + + @property + def relation_targets(self): + hosts = [] + relations = self.framework.model.relations.get("target") + if relations: + for relation in relations: + for i, unit in enumerate(relation.units): + unit_name = unit.name.replace("/", "-") + host = relation.data[unit].get("host") + port = int(relation.data[unit].get("port", "9100")) + if not host: + continue + hostname = f"{host}-{i}.{host}-endpoints" + if hostname and port: + hosts.append(f"{hostname}:{port}") + return hosts + +if __name__ == "__main__": + main(PrometheusCharm) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/tests/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/tests/test_charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/tests/test_charm.py new file mode 100644 index 0000000000000000000000000000000000000000..05f95782d01dfa5d3ae5965531c61fcc442909f5 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/prometheus-operator/tests/test_charm.py @@ -0,0 +1,313 @@ +# Copyright 2020 Balbir Thomas +# See LICENSE file for licensing details. + +import unittest +import yaml +import json + +from ops.testing import Harness +from charm import PrometheusCharm + +MINIMAL_CONFIG = { + 'prometheus-image-path': 'prom/prometheus', + 'port': 9090 +} + +SAMPLE_ALERTING_CONFIG = { + 'alertmanagers': [{ + 'static_configs': [{ + 'targets': ['192.168.0.1:9093'] + }] + }] +} + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(PrometheusCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_image_path_is_required(self): + missing_image_config = { + 'prometheus-image-path': '', + 'prometheus-image-username': '', + 'prometheus-image-password': '' + } + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(missing_image_config) + expected_logs = [ + "ERROR:charm:Incomplete Configuration : ['prometheus-image-path']. " + "Application will be blocked." + ] + self.assertEqual(sorted(logger.output), expected_logs) + + missing = self.harness.charm._check_config() + expected = ['prometheus-image-path'] + self.assertEqual(missing, expected) + + def test_password_is_required_when_username_is_set(self): + missing_password_config = { + 'prometheus-image-path': 'prom/prometheus:latest', + 'prometheus-image-username': 'some-user', + 'prometheus-image-password': '', + } + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(missing_password_config) + expected_logs = [ + "ERROR:charm:Incomplete Configuration : ['prometheus-image-password']. " + "Application will be blocked." + ] + self.assertEqual(sorted(logger.output), expected_logs) + + missing = self.harness.charm._check_config() + expected = ['prometheus-image-password'] + self.assertEqual(missing, expected) + + def test_alerting_config_is_updated_by_alertmanager_relation(self): + self.harness.set_leader(True) + + # check alerting config is empty without alertmanager relation + self.harness.update_config(MINIMAL_CONFIG) + + self.assertEqual(self.harness.charm._stored.alertmanagers, []) + rel_id = self.harness.add_relation('alertmanager', 'alertmanager') + + self.assertIsInstance(rel_id, int) + self.harness.add_relation_unit(rel_id, 'alertmanager/0') + pod_spec = self.harness.get_pod_spec() + self.assertEqual(alerting_config(pod_spec), None) + + # check alerting config is updated when a alertmanager joins + self.harness.update_relation_data(rel_id, + 'alertmanager', + { + 'port': '9093', + 'addrs': '["192.168.0.1"]' + }) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(alerting_config(pod_spec), SAMPLE_ALERTING_CONFIG) + + def test_alerting_config_is_removed_when_alertmanager_is_broken(self): + self.harness.set_leader(True) + + # ensure there is a non-empty alerting config + self.harness.update_config(MINIMAL_CONFIG) + rel_id = self.harness.add_relation('alertmanager', 'alertmanager') + rel = self.harness.model.get_relation('alertmanager') + self.assertIsInstance(rel_id, int) + self.harness.add_relation_unit(rel_id, 'alertmanager/0') + self.harness.update_relation_data(rel_id, + 'alertmanager', + { + 'port': '9093', + 'addrs': '["192.168.0.1"]' + }) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(alerting_config(pod_spec), SAMPLE_ALERTING_CONFIG) + + # check alerting config is removed when relation departs + self.harness.charm.on.alertmanager_relation_broken.emit(rel) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(alerting_config(pod_spec), None) + + def test_grafana_is_provided_port_and_source(self): + self.harness.set_leader(True) + self.harness.update_config(MINIMAL_CONFIG) + rel_id = self.harness.add_relation('grafana-source', 'grafana') + self.harness.add_relation_unit(rel_id, 'grafana/0') + self.harness.update_relation_data(rel_id, 'grafana/0', {}) + data = self.harness.get_relation_data(rel_id, self.harness.model.unit.name) + + self.assertEqual(int(data['port']), MINIMAL_CONFIG['port']) + self.assertEqual(data['source-type'], 'prometheus') + + def test_default_cli_log_level_is_info(self): + self.harness.set_leader(True) + self.harness.update_config(MINIMAL_CONFIG) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--log.level'), 'info') + + def test_invalid_log_level_defaults_to_debug(self): + self.harness.set_leader(True) + bad_log_config = MINIMAL_CONFIG.copy() + bad_log_config['log-level'] = 'bad-level' + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(bad_log_config) + expected_logs = [ + "ERROR:root:Invalid loglevel: bad-level given, " + "debug/info/warn/error/fatal allowed. " + "defaulting to DEBUG loglevel." + ] + self.assertEqual(sorted(logger.output), expected_logs) + + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--log.level'), 'debug') + + def test_valid_log_level_is_accepted(self): + self.harness.set_leader(True) + valid_log_config = MINIMAL_CONFIG.copy() + valid_log_config['log-level'] = 'warn' + self.harness.update_config(valid_log_config) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--log.level'), 'warn') + + def test_tsdb_compression_is_not_enabled_by_default(self): + self.harness.set_leader(True) + compress_config = MINIMAL_CONFIG.copy() + self.harness.update_config(compress_config) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.wal-compression'), + None) + + def test_tsdb_compression_can_be_enabled(self): + self.harness.set_leader(True) + compress_config = MINIMAL_CONFIG.copy() + compress_config['tsdb-wal-compression'] = True + self.harness.update_config(compress_config) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.wal-compression'), + '--storage.tsdb.wal-compression') + + def test_valid_tsdb_retention_times_can_be_set(self): + self.harness.set_leader(True) + retention_time_config = MINIMAL_CONFIG.copy() + acceptable_units = ['y', 'w', 'd', 'h', 'm', 's'] + for unit in acceptable_units: + retention_time = '{}{}'.format(1, unit) + retention_time_config['tsdb-retention-time'] = retention_time + self.harness.update_config(retention_time_config) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'), + retention_time) + + def test_invalid_tsdb_retention_times_can_not_be_set(self): + self.harness.set_leader(True) + retention_time_config = MINIMAL_CONFIG.copy() + + # invalid unit + retention_time = '{}{}'.format(1, 'x') + retention_time_config['tsdb-retention-time'] = retention_time + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(retention_time_config) + expected_logs = ["ERROR:charm:Invalid unit x in time spec"] + self.assertEqual(sorted(logger.output), expected_logs) + + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'), + None) + + # invalid time value + retention_time = '{}{}'.format(0, 'd') + retention_time_config['tsdb-retention-time'] = retention_time + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(retention_time_config) + expected_logs = ["ERROR:charm:Expected positive time spec but got 0"] + self.assertEqual(sorted(logger.output), expected_logs) + + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'), + None) + + def test_global_scrape_interval_can_be_set(self): + self.harness.set_leader(True) + scrapeint_config = MINIMAL_CONFIG.copy() + acceptable_units = ['y', 'w', 'd', 'h', 'm', 's'] + for unit in acceptable_units: + scrapeint_config['scrape-interval'] = '{}{}'.format(1, unit) + self.harness.update_config(scrapeint_config) + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertEqual(gconfig['scrape_interval'], + scrapeint_config['scrape-interval']) + + def test_global_scrape_timeout_can_be_set(self): + self.harness.set_leader(True) + scrapetime_config = MINIMAL_CONFIG.copy() + acceptable_units = ['y', 'w', 'd', 'h', 'm', 's'] + for unit in acceptable_units: + scrapetime_config['scrape-timeout'] = '{}{}'.format(1, unit) + self.harness.update_config(scrapetime_config) + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertEqual(gconfig['scrape_timeout'], + scrapetime_config['scrape-timeout']) + + def test_global_evaluation_interval_can_be_set(self): + self.harness.set_leader(True) + evalint_config = MINIMAL_CONFIG.copy() + acceptable_units = ['y', 'w', 'd', 'h', 'm', 's'] + for unit in acceptable_units: + evalint_config['evaluation-interval'] = '{}{}'.format(1, unit) + self.harness.update_config(evalint_config) + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertEqual(gconfig['evaluation_interval'], + evalint_config['evaluation-interval']) + + def test_valid_external_labels_can_be_set(self): + self.harness.set_leader(True) + label_config = MINIMAL_CONFIG.copy() + labels = {'name1': 'value1', + 'name2': 'value2'} + label_config['external-labels'] = json.dumps(labels) + self.harness.update_config(label_config) + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertIsNotNone(gconfig['external_labels']) + self.assertEqual(labels, gconfig['external_labels']) + + def test_invalid_external_labels_can_not_be_set(self): + self.harness.set_leader(True) + label_config = MINIMAL_CONFIG.copy() + # label value must be string + labels = {'name': 1} + label_config['external-labels'] = json.dumps(labels) + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(label_config) + expected_logs = ["ERROR:charm:External label keys/values must be strings"] + self.assertEqual(sorted(logger.output), expected_logs) + + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertIsNone(gconfig.get('external_labels')) + + def test_default_scrape_config_is_always_set(self): + self.harness.set_leader(True) + self.harness.update_config(MINIMAL_CONFIG) + pod_spec = self.harness.get_pod_spec() + prometheus_scrape_config = scrape_config(pod_spec, 'prometheus') + self.assertIsNotNone(prometheus_scrape_config, 'No default config found') + + +def alerting_config(pod_spec): + config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content'] + config_dict = yaml.safe_load(config_yaml) + return config_dict.get('alerting') + + +def global_config(pod_spec): + config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content'] + config_dict = yaml.safe_load(config_yaml) + return config_dict['global'] + + +def scrape_config(pod_spec, job_name): + config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content'] + config_dict = yaml.safe_load(config_yaml) + scrape_configs = config_dict['scrape_configs'] + for config in scrape_configs: + if config['job_name'] == job_name: + return config + return None + + +def cli_arg(pod_spec, cli_opt): + args = pod_spec[0]['containers'][0]['args'] + for arg in args: + opt_list = arg.split('=') + if len(opt_list) == 2 and opt_list[0] == cli_opt: + return opt_list[1] + if len(opt_list) == 1 and opt_list[0] == cli_opt: + return opt_list[0] + return None diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/README.md b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1b226214256240e150d1cb34fdc75fb0b0bf412b --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/README.md @@ -0,0 +1,47 @@ +# squid-k8s Charm + +## Overview + +This is a Kuberentes Charm to deploy [Squid Cache](http://www.squid-cache.org/). + +Sugested Actions for this charm: +* Set allowed URLs + Possible way to run action: `juju run-action squid/0 addurl url=google.com` +* Stop/Start/Restart the squid service - done + Run like this: `juju run-action squid/0 restart` +* Set ftp, http, https proxies + +## Quickstart + +If you don't have microk8s and juju installed executing the following commands: +``` +sudo snap install juju --classic +sudo snap install microk8s --classic +juju bootstrap microk8s +juju add-model squid +``` + +Afterwards clone the repository and deploy the charm +``` +git clone https://github.com/DomFleischmann/charm-squid-k8s.git +cd charm-squid-k8s +git submodule update --init +juju deploy . +``` +Check if the charm is deployed correctly with `juju status` + +To test the `addurl` action open another terminal and type the following command: +`export https_proxy=http://:3128` + +Where squid-ip is the Squid App Address shown in `juju status` + +Now when executing `curl https://www.google.com` squid will block access to the url + +Execute the `addurl` action: +`juju run-action squid/0 addurl url=google.com` + +Now when executing `curl https://www.google.com` it will give you the google output. + +## Contact + - Author: Dominik Fleischmann + - Bug Tracker: [here](https://github.com/DomFleischmann/charm-squid-k8s) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/actions.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/actions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e57d8e3987419e2857c90ffdef8a8a9bff4256f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/actions.yaml @@ -0,0 +1,14 @@ +addurl: + description: "Add allowed URL to squid config" + params: + url: + description: "URL that will be allowed" + type: string + default: "" +deleteurl: + description: "Delete allowed URL squid config" + params: + url: + description: "URL that will stop to be allowed" + type: string + default: "" diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/config.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..450525eeed2cb05b0cf347357d67b3693f8c192f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/config.yaml @@ -0,0 +1,5 @@ +options: + port: + type: int + description: "Port" + default: 3128 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/metadata.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a70555ab993c896032413b6125b39c4f0c43197a --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/metadata.yaml @@ -0,0 +1,22 @@ +name: squid +summary: Describe your charm here +maintainers: + - Dominik Fleischmann +description: | + A nice long description of what the product does and any + high level information about how the charm provides the product, + on which platform, etc. +tags: + - misc +series: + - kubernetes +deployment: + type: stateful + service: loadbalancer +storage: + spool: + type: filesystem + location: /var/spool/squid +provides: + prometheus-target: + interface: http diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/requirements.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4da6f5fe4169cdf33fb85954f2ea3db2290f3fc8 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/requirements.txt @@ -0,0 +1,3 @@ +ops +jinja2 +git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/squid.charm b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/squid.charm new file mode 100644 index 0000000000000000000000000000000000000000..961e9e4ac7cc603d2577e3b10f50bb92ab8a931c Binary files /dev/null and b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/squid.charm differ diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/src/charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..f10b46c531fbf211b1bba965b580e7c79a30859d --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/src/charm.py @@ -0,0 +1,122 @@ +#! /usr/bin/env python3 + +import logging +import subprocess + +from jinja2 import Template +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.main import main +from ops.model import ActiveStatus, MaintenanceStatus + + +SQUID_CONF = "/etc/squid/squid.conf" + +logger = logging.getLogger(__name__) + + +def reload(): + subprocess.Popen("sleep 1 && kill -HUP `cat /var/run/squid.pid`", shell=True) + + +def apply_config(config): + with open(SQUID_CONF, "w") as f: + f.write(config) + reload() + + +def _generate_allowedurls_config(allowed_urls: set): + allowed_urls_text = "" + for url in allowed_urls: + allowed_urls_text += f"acl allowedurls dstdomain .{url}\n" + allowed_urls_text += "http_access allow allowedurls\n" + return allowed_urls_text + + +def _generate_config(**kwargs): + with open("template/squid.conf") as template: + return Template(template.read()).render(**kwargs) + + +def update_config(allowed_urls: set): + allowed_urls_config = _generate_allowedurls_config(allowed_urls) + squid_config = _generate_config(allowed_urls=allowed_urls_config) + if squid_config: + apply_config(squid_config) + + +class SquidK8SCharm(CharmBase): + """Class reprisenting this Operator charm.""" + + _stored = StoredState() + + def __init__(self, *args): + """Initialize charm and configure states and events to observe.""" + super().__init__(*args) + self._stored.set_default(allowedurls=set()) + + self.framework.observe(self.on.start, self.configure_pod) + self.framework.observe(self.on.config_changed, self.configure_pod) + self.framework.observe(self.on.addurl_action, self.on_addurl_action) + self.framework.observe(self.on.deleteurl_action, self.on_deleteurl_action) + +# self.framework.observe(self.on["prometheus-target"].relation_joined, self._publish_prometheus_target_info) + +# def _publish_prometheus_target_info(self, event): +# event.relation.data[self.unit]["host"] = self.app.name +# event.relation.data[self.unit]["port"] = str(9100) + + def on_addurl_action(self, event): + url = event.params["url"] + self._stored.allowedurls.add(url) + update_config(self._stored.allowedurls) + + def on_deleteurl_action(self, event): + """Handle the deleteurl action.""" + url = event.params["url"] + if url in self._stored.allowedurls: + self._stored.allowedurls.remove(url) + update_config(self._stored.allowedurls) + + def configure_pod(self, event): + if not self.unit.is_leader(): + self.unit.status = ActiveStatus() + return + + self.unit.status = MaintenanceStatus("Applying pod spec") + + pod_spec = { + "version": 3, + "containers": [ + { + "name": self.framework.model.app.name, + "image": "domfleischmann/squid-python", + "ports": [ + { + "name": "squid", + "containerPort": self.config["port"], + "protocol": "TCP", + } + ], + }, + # { + # "name": "exporter", + # "image": "prom/node-exporter", + # "ports": [ + # { + # "containerPort": 9100, + # "name": "exporter-http", + # "protocol": "TCP", + # } + # ], + # } + ], + } + + self.model.pod.set_spec(pod_spec) + self.unit.status = ActiveStatus() + self.app.status = ActiveStatus() + + +if __name__ == "__main__": + main(SquidK8SCharm) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/template/squid.conf b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/template/squid.conf new file mode 100644 index 0000000000000000000000000000000000000000..9dc7e8b664d007c9d6dc452dffdc7fe47358d1b2 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/template/squid.conf @@ -0,0 +1,26 @@ +acl SSL_ports port 443 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl CONNECT method CONNECT +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports +http_access allow localhost manager +http_access deny manager +http_access allow localhost +{{ allowed_urls }} +http_access deny all +http_port 3128 +coredump_dir /var/spool/squid +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern (Release|Packages(.gz)*)$ 0 20% 2880 +refresh_pattern . 0 20% 4320 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/tox.ini b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/tox.ini new file mode 100644 index 0000000000000000000000000000000000000000..a2476324f8eea6fdb222b4f382057849e8d85f44 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/ops/squid-operator/tox.ini @@ -0,0 +1,46 @@ +[tox] +skipsdist = True +envlist = unit, functional +skip_missing_interpreters = True + +[testenv] +basepython = python3 +setenv = + PYTHONPATH = {toxinidir}/lib/:{toxinidir} +passenv = HOME + +[testenv:unit] +commands = + coverage run -m unittest discover -s {toxinidir}/tests/unit -v + coverage report \ + --omit tests/*,mod/*,.tox/* + coverage html \ + --omit tests/*,mod/*,.tox/* +deps = -r{toxinidir}/tests/unit/requirements.txt + +[testenv:functional] +changedir = {toxinidir}/tests/functional +commands = functest-run-suite {posargs} +deps = -r{toxinidir}/tests/functional/requirements.txt + +[testenv:lint] +commands = flake8 +deps = + flake8 + flake8-docstrings + flake8-import-order + pep8-naming + flake8-colors + +[flake8] +exclude = + .git, + __pycache__, + .tox, + mod, +max-line-length = 120 +max-complexity = 10 +import-order-style = google + +[isort] +force_to_top=setuppath diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/.flake8 b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..8ef84fcd43f3b7a46768c31b20f36cab48ffdfe0 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/.gitignore b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..b3b17b402232904b604711f178aefca0a623bdf5 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/.gitignore @@ -0,0 +1,6 @@ +*~ +*swp +*.charm +__pycache__ +build +venv diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/LICENSE b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..94a9ed024d3859793618152ea559a168bbcbb5e2 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/README.md b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..35f666076133ad99f6a0503a44ccedbf04bd7775 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/README.md @@ -0,0 +1,78 @@ +# Prometheus Operator + +## Description + +The Prometheus Operator provides a cluster monitoring solution using +[Prometheus](https://prometheus.io), which is an open source +monitoring system and alerting toolkit. + +This repository contains a [Juju](https://jaas.ai/) Charm for +deploying the monitoring component of Prometheus in a Kubernetes +cluster. The alerting component of prometheus is offered through a +separate Charm. + +## Setup + +A typical setup using [snaps](https://snapcraft.io/), for deployments +to a [microk8s](https://microk8s.io/) cluster can be done using the +following commands + + sudo snap install microk8s --classic + microk8s.enable dns storage registry dashboard + sudo snap install juju --classic + juju bootstrap microk8s microk8s + juju create-storage-pool operator-storage kubernetes storage-class=microk8s-hostpath + +## Build + +Install the charmcraft tool + + sudo snap install charmcraft + +Build the charm in this git repository + + charmcraft build + +## Usage + +Create a Juju model for your monitoring operators + + juju add-model lma + +Deploy Prometheus using its default configuration. + + juju deploy ./prometheus.charm + +View the Prometheus dashboard + +1. Use `juju status` to determine IP of the Prometheus unit +2. Navigate to `http://:9090` using your browser + +If required, remove the deployed monitoring model completely + + juju destroy-model -y lma --no-wait --force --destroy-storage + +## Relations + +Currently supported relations are + +- [Grafana](https://github.com/canonical/grafana-operator) +- [Alertmanager](https://github.com/canonical/alertmanager-operator) + +## Developing + +Use your existing Python 3 development environment or create and +activate a Python 3 virtualenv + + virtualenv -p python3 venv + source venv/bin/activate + +Install the development requirements + + pip install -r requirements-dev.txt + +## Testing + +Just run `run_tests`: + + ./run_tests diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/actions.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/actions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fef67f32c4a9134c536b965e8c53d055e18c4457 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/actions.yaml @@ -0,0 +1,3 @@ +reload-config: + description: | + Tell Prometheus to reload its config from the ConfigMap. \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/actions/reload-config b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/actions/reload-config new file mode 100755 index 0000000000000000000000000000000000000000..d736d4e1627e01599ba7cef209ba684ef4b0ef41 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/actions/reload-config @@ -0,0 +1,2 @@ +#!/bin/sh +kill -HUP 1 && echo "Sent SIGHUP to the Prometheus container, config reloaded" \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/config.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e40cb172421a1ec10708a96377fbeb9bee5391cd --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/config.yaml @@ -0,0 +1,99 @@ +options: + prometheus-image-path: + type: string + description: | + The location of the image to use, + e.g. "registry.example.com/prometheus:v1". + + This setting is required. + default: "prom/prometheus:latest" + prometheus-image-username: + type: string + description: | + The username for accessing the registry specified in + prometheus-image-path. + default: "" + prometheus-image-password: + type: string + description: | + The password associated with prometheus-image-username for + accessing the registry specified in prometheus-image-path. + default: "" + port: + description: The port prometheus will be listening on + type: int + default: 9090 + ssl-cert: + type: string + default: + description: | + SSL certificate to install and use for Prometheus endpoint. + ssl-key: + type: string + default: + description: | + SSL key to use with certificate specified as ssl-cert. + log-level: + description: | + Prometheus server log level (only log messages with the given severity + or above). Must be one of: [debug, info, warn, error, fatal]. + If not set, the Prometheus default one (info) will be used. + type: string + default: + web-external-url: + description: | + The URL under which Prometheus is externally reachable (for example, + if Prometheus is served via a reverse proxy). + Used for generating relative and absolute links back to + Prometheus itself. If the URL has a path portion, it will be used to + prefix all HTTP endpoints served by Prometheus. + + If omitted, relevant URL components will be derived automatically. + type: string + default: "" + tsdb-retention-time: + description: | + How long to retain samples in the storage. + Units Supported: y, w, d, h, m, s + type: string + default: 15d + tsdb-wal-compression: + description: | + This flag enables compression of the write-ahead log (WAL). + Depending on your data, you can expect the WAL size to be + halved with little extra cpu load. + type: boolean + default: false + external-labels: + description: | + A JSON string of key-value pairs that specify the labels to + attach to metrics in this Prometheus instance when they get pulled + by an aggregating parent. This is useful in the case of federation + where, for example, you want each datacenter to have its own + Prometheus instance and then have a global instance that pulls from + each of these datacenter instances. By specifying a unique set of + external-labels for each datacenter instance, you can easily determine + in the aggregating Prometheus instance which datacenter a metric is + coming from. Note that you are not limited to one instance per + datacenter. The datacenter example here is arbitrary and you are free + to organize your federation's hierarchy as you see fit. + Ex. '{ "cluster": "datacenter1" }'. Both keys and values may be + arbitrarily chosen as you see fit. + type: string + default: "{}" + scrape-interval: + description: | + How frequently to scrape targets by default. + type: string + default: 1m + scrape-timeout: + description: | + How long until a scrape request times out. + type: string + default: 10s + evaluation-interval: + description: | + How frequently rules will be evaluated. + type: string + default: 1m + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/config/prometheus-k8s.yml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/config/prometheus-k8s.yml new file mode 100644 index 0000000000000000000000000000000000000000..e003db0ae0df07f539a6c725ddd7925ccba67232 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/config/prometheus-k8s.yml @@ -0,0 +1,283 @@ +# +# This file copied from https://github.com/prometheus/prometheus/blob/release-2.18/documentation/examples/prometheus-kubernetes.yml +# + +# A scrape configuration for running Prometheus on a Kubernetes cluster. +# This uses separate scrape configs for cluster components (i.e. API server, node) +# and services to allow each to use different authentication configs. +# +# Kubernetes labels will be added as Prometheus labels on metrics via the +# `labelmap` relabeling action. +# +# If you are using Kubernetes 1.7.2 or earlier, please take note of the comments +# for the kubernetes-cadvisor job; you will need to edit or remove this job. + +# Scrape config for API servers. +# +# Kubernetes exposes API servers as endpoints to the default/kubernetes +# service so this uses `endpoints` role and uses relabelling to only keep +# the endpoints associated with the default/kubernetes service using the +# default named port `https`. This works for single API server deployments as +# well as HA API server deployments. +scrape_configs: +- job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + # insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + +# Scrape config for nodes (kubelet). +# +# Rather than connecting directly to the node, the scrape is proxied though the +# Kubernetes apiserver. This means it will work if Prometheus is running out of +# cluster, or can't connect to nodes for some other reason (e.g. because of +# firewalling). +- job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + +# Scrape config for Kubelet cAdvisor. +# +# This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics +# (those whose names begin with 'container_') have been removed from the +# Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to +# retrieve those metrics. +# +# In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor +# HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics" +# in that case (and ensure cAdvisor's HTTP server hasn't been disabled with +# the --cadvisor-port=0 Kubelet flag). +# +# This job is not necessary and should be removed in Kubernetes 1.6 and +# earlier versions, or it will cause the metrics to be scraped twice. +- job_name: 'kubernetes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor + +# Example scrape config for service endpoints. +# +# The relabeling allows the actual service scrape endpoint to be configured +# for all or only some endpoints. +- job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + # Example relabel to scrape only endpoints that have + # "example.io/should_be_scraped = true" annotation. + # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_scraped] + # action: keep + # regex: true + # + # Example relabel to customize metric path based on endpoints + # "example.io/metric_path = " annotation. + # - source_labels: [__meta_kubernetes_service_annotation_example_io_metric_path] + # action: replace + # target_label: __metrics_path__ + # regex: (.+) + # + # Example relabel to scrape only single, desired port for the service based + # on endpoints "example.io/scrape_port = " annotation. + # - source_labels: [__address__, __meta_kubernetes_service_annotation_example_io_scrape_port] + # action: replace + # regex: ([^:]+)(?::\d+)?;(\d+) + # replacement: $1:$2 + # target_label: __address__ + # + # Example relabel to configure scrape scheme for all service scrape targets + # based on endpoints "example.io/scrape_scheme = " annotation. + # - source_labels: [__meta_kubernetes_service_annotation_example_io_scrape_scheme] + # action: replace + # target_label: __scheme__ + # regex: (https?) + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + +# Example scrape config for probing services via the Blackbox Exporter. +# +# The relabeling allows the actual service scrape endpoint to be configured +# for all or only some services. +- job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + # Example relabel to probe only some services that have "example.io/should_be_probed = true" annotation + # - source_labels: [__meta_kubernetes_service_annotation_example_io_should_be_probed] + # action: keep + # regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox-exporter.example.com:9115 + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + +# Example scrape config for probing ingresses via the Blackbox Exporter. +# +# The relabeling allows the actual ingress scrape endpoint to be configured +# for all or only some services. +- job_name: 'kubernetes-ingresses' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: ingress + + relabel_configs: + # Example relabel to probe only some ingresses that have "example.io/should_be_probed = true" annotation + # - source_labels: [__meta_kubernetes_ingress_annotation_example_io_should_be_probed] + # action: keep + # regex: true + - source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path] + regex: (.+);(.+);(.+) + replacement: ${1}://${2}${3} + target_label: __param_target + - target_label: __address__ + replacement: blackbox-exporter.example.com:9115 + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_ingress_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_ingress_name] + target_label: kubernetes_name + +# Example scrape config for pods +# +# The relabeling allows the actual pod scrape to be configured +# for all the declared ports (or port-free target if none is declared) +# or only some ports. +- job_name: 'kubernetes-pods' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + # Example relabel to scrape only pods that have + # "example.io/should_be_scraped = true" annotation. + # - source_labels: [__meta_kubernetes_pod_annotation_example_io_should_be_scraped] + # action: keep + # regex: true + # + # Example relabel to customize metric path based on pod + # "example.io/metric_path = " annotation. + # - source_labels: [__meta_kubernetes_pod_annotation_example_io_metric_path] + # action: replace + # target_label: __metrics_path__ + # regex: (.+) + # + # Example relabel to scrape only single, desired port for the pod + # based on pod "example.io/scrape_port = " annotation. + # - source_labels: [__address__, __meta_kubernetes_pod_annotation_example_io_scrape_port] + # action: replace + # regex: ([^:]+)(?::\d+)?;(\d+) + # replacement: $1:$2 + # target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/dispatch b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/dispatch new file mode 100755 index 0000000000000000000000000000000000000000..fe31c0567bdce62a6542a6470997cb6a874e4bd8 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/dispatch @@ -0,0 +1,3 @@ +#!/bin/sh + +JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/hooks/install b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/hooks/install new file mode 120000 index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/hooks/install @@ -0,0 +1 @@ +../dispatch \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/hooks/start b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/hooks/start new file mode 120000 index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/hooks/start @@ -0,0 +1 @@ +../dispatch \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/hooks/upgrade-charm b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/hooks/upgrade-charm new file mode 120000 index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/hooks/upgrade-charm @@ -0,0 +1 @@ +../dispatch \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/icon.svg b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/icon.svg new file mode 100644 index 0000000000000000000000000000000000000000..5c51f66d901d0a30c082a7207a53d19b763acc2b --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/icon.svg @@ -0,0 +1,50 @@ + + + +image/svg+xml \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/metadata.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5d4329da48a621aad3a50ab6df40d72a11dbcb3f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/metadata.yaml @@ -0,0 +1,34 @@ +name: prometheus +summary: Prometheus for Kubernetes clusters +maintainers: + - Balbir Thomas +description: | + Prometheus is an open source monitoring solution. Prometheus + supports aggregating high dimensional data and exposes a powerful + query language PromQL. This charm deploys and operates Prometheus on + Kubernetes clusters. Prometheus can raise alerts through a relation + with the Altermanager charm. Alerting rules for Prometheus need to + be provided through a relation with the application that requires + alerting. Prometheus provides its own dashboard for data + visualization but a richer visualization interface may be obtained + through a relation with the Grafana charm. +tags: + - observability + - lma + - prometheus + - monitoring + - alerting + - grafana +series: + - kubernetes +requires: + grafana-source: + interface: grafana-datasource + alertmanager: + interface: alertmanager + target: + interface: http +storage: + database: + type: filesystem + location: /var/lib/prometheus diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/requirements-dev.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/requirements-dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..3950bef2e306b78aaa231135636a04f2d443d569 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/requirements-dev.txt @@ -0,0 +1,5 @@ +-r requirements.txt +black +flake8 +pytest +pytest-cov diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/requirements.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ff3e3351770b50b916e2fcc6478e986f59c35845 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/requirements.txt @@ -0,0 +1,2 @@ +ops +pyaml diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/run_tests b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/run_tests new file mode 100755 index 0000000000000000000000000000000000000000..637497ffe1bac2f75fec96b3bc1d25e16e39e1d8 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/run_tests @@ -0,0 +1,16 @@ +#!/bin/sh -e +# Copyright 2020 Balbir Thomas +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH=src +else + export PYTHONPATH="src:$PYTHONPATH" +fi + +black --diff +python3 -m unittest -v "$@" diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/setup.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..15ce0b9ca35e585d2b1925a13bd58d1ea67f0900 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/setup.py @@ -0,0 +1,21 @@ +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="prometheus-charm", + version="0.0.1", + author="Balbir Thomas", + author_email="balbir.thomas@canonical.com", + description="Kubernetes Charm/Operator for Prometheus", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/balbirthomas/prometheus-charm", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "Operating System :: OS Independent", + ], + python_requires='>=3.5', +) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/src/charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..e4f584fb540e8f0f74c634fb11fb9145ad3027a4 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/src/charm.py @@ -0,0 +1,377 @@ +#!/usr/bin/env python3 +# Copyright 2020 Balbir Thomas +# See LICENSE file for licensing details. + +import logging +import yaml +import json + +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.main import main +from ops.model import ActiveStatus, MaintenanceStatus, BlockedStatus + +logger = logging.getLogger(__name__) + + +class PrometheusCharm(CharmBase): + """A Juju Charm for Prometheus + """ + _stored = StoredState() + + def __init__(self, *args): + logger.debug('Initializing Charm') + + super().__init__(*args) + + self._stored.set_default(alertmanagers=[]) + self._stored.set_default(alertmanager_port='9093') + + self.framework.observe(self.on.config_changed, self._on_config_changed) + self.framework.observe(self.on.stop, self._on_stop) + self.framework.observe(self.on['alertmanager'].relation_changed, + self._on_alertmanager_changed) + self.framework.observe(self.on['alertmanager'].relation_broken, + self._on_alertmanager_broken) + + self.framework.observe(self.on['grafana-source'].relation_changed, + self._on_grafana_changed) + self.framework.observe(self.on['target'].relation_changed, + self._on_config_changed) + + def _on_config_changed(self, _): + """Set a new Juju pod specification + """ + self._configure_pod() + + def _on_stop(self, _): + """Mark unit is inactive + """ + self.unit.status = MaintenanceStatus('Pod is terminating.') + + def _on_grafana_changed(self, event): + """Provide Grafana with data source information + """ + event.relation.data[self.unit]['port'] = str(self.model.config['port']) + event.relation.data[self.unit]['source-type'] = 'prometheus' + + def _on_alertmanager_changed(self, event): + """Set an alertmanager configuation + """ + if not self.unit.is_leader(): + return + + addrs = json.loads(event.relation.data[event.app].get('addrs', '[]')) + port = event.relation.data[event.app]['port'] + + self._stored.alertmanager_port = port + self._stored.alertmanagers = addrs + + + self._configure_pod() + + def _on_alertmanager_broken(self, event): + """Remove all alertmanager configuration + """ + if not self.unit.is_leader(): + return + self._stored.alertmanagers.clear() + self._configure_pod() + + def _cli_args(self): + """Construct command line arguments for Prometheus + """ + config = self.model.config + args = [ + '--config.file=/etc/prometheus/prometheus.yml', + '--storage.tsdb.path=/var/lib/prometheus', + '--web.enable-lifecycle', + '--web.console.templates=/usr/share/prometheus/consoles', + '--web.console.libraries=/usr/share/prometheus/console_libraries' + ] + + # get log level + allowed_log_levels = ['debug', 'info', 'warn', 'error', 'fatal'] + if config.get('log-level'): + log_level = config['log-level'].lower() + else: + log_level = 'info' + + # If log level is invalid set it to debug + if log_level not in allowed_log_levels: + logging.error( + 'Invalid loglevel: {0} given, {1} allowed. ' + 'defaulting to DEBUG loglevel.'.format( + log_level, '/'.join(allowed_log_levels) + ) + ) + log_level = 'debug' + + # set log level + args.append( + '--log.level={0}'.format(log_level) + ) + + # Enable time series database compression + if config.get('tsdb-wal-compression'): + args.append('--storage.tsdb.wal-compression') + + # Set time series retention time + if config.get('tsdb-retention-time') and self._is_valid_timespec( + config['tsdb-retention-time']): + args.append('--storage.tsdb.retention.time={}'.format(config['tsdb-retention-time'])) + + return args + + def _is_valid_timespec(self, timeval): + """Is a time interval unit and value valid + """ + if not timeval: + return False + + time, unit = timeval[:-1], timeval[-1] + + if unit not in ['y', 'w', 'd', 'h', 'm', 's']: + logger.error('Invalid unit {} in time spec'.format(unit)) + return False + + try: + int(time) + except ValueError: + logger.error('Can not convert time {} to integer'.format(time)) + return False + + if not int(time) > 0: + logger.error('Expected positive time spec but got {}'.format(time)) + return False + + return True + + def _are_valid_labels(self, json_data): + """Are Prometheus external labels valid + """ + if not json_data: + return False + + try: + labels = json.loads(json_data) + except (ValueError, TypeError): + logger.error('Can not parse external labels : {}'.format(json_data)) + return False + + if not isinstance(labels, dict): + logger.error('Expected label dictionary but got : {}'.format(labels)) + return False + + for key, value in labels.items(): + if not isinstance(key, str) or not isinstance(value, str): + logger.error('External label keys/values must be strings') + return False + + return True + + def _external_labels(self): + """Extract external labels for Prometheus from configuration + """ + config = self.model.config + labels = {} + + if config.get('external-labels') and self._are_valid_labels( + config['external-labels']): + labels = json.loads(config['external-labels']) + + return labels + + def _prometheus_global_config(self): + """Construct Prometheus global configuration + """ + config = self.model.config + global_config = {} + + labels = self._external_labels() + if labels: + global_config['external_labels'] = labels + + if config.get('scrape-interval') and self._is_valid_timespec( + config['scrape-interval']): + global_config['scrape_interval'] = config['scrape-interval'] + + if config.get('scrape-timeout') and self._is_valid_timespec( + config['scrape-timeout']): + global_config['scrape_timeout'] = config['scrape-timeout'] + + if config.get('evaluation-interval') and self._is_valid_timespec( + config['evaluation-interval']): + global_config['evaluation_interval'] = config['evaluation-interval'] + + return global_config + + def _alerting_config(self): + """Construct Prometheus altering configuation + """ + alerting_config = '' + + if len(self._stored.alertmanagers) < 1: + logger.debug('No alertmanagers available') + return alerting_config + + targets = [] + for manager in self._stored.alertmanagers: + port = self._stored.alertmanager_port + targets.append("{}:{}".format(manager, port)) + + manager_config = {'static_configs': [{'targets': targets}]} + alerting_config = {'alertmanagers': [manager_config]} + + return alerting_config + + def _prometheus_config(self): + """Construct Prometheus configuration + """ + config = self.model.config + + scrape_config = {'global': self._prometheus_global_config(), + 'scrape_configs': []} + + alerting_config = self._alerting_config() + if alerting_config: + scrape_config['alerting'] = alerting_config + + # By default only monitor prometheus server itself + targets = ['localhost:{}'.format(config['port'])] + relation_targets = self.relation_targets + if relation_targets: + targets.extend(relation_targets) + + default_config = { + 'job_name': 'prometheus', + 'scrape_interval': '5s', + 'scrape_timeout': '5s', + 'metrics_path': '/metrics', + 'honor_timestamps': True, + 'scheme': 'http', + 'static_configs': [{ + 'targets': targets + }] + } + scrape_config['scrape_configs'].append(default_config) + + logger.debug('Prometheus config : {}'.format(scrape_config)) + + return yaml.dump(scrape_config), targets + + def _build_pod_spec(self): + """Construct a Juju pod specification for Prometheus + """ + logger.debug('Building Pod Spec') + config = self.model.config + prometheus_config, targets = self._prometheus_config() + spec = { + 'version': 3, + 'containers': [{ + 'name': self.app.name, + 'imageDetails': { + 'imagePath': config['prometheus-image-path'], + 'username': config.get('prometheus-image-username', ''), + 'password': config.get('prometheus-image-password', '') + }, + 'args': self._cli_args(), + "envConfig": { + "targets": str(targets), + }, + 'kubernetes': { + 'readinessProbe': { + 'httpGet': { + 'path': '/-/ready', + 'port': config['port'] + }, + 'initialDelaySeconds': 10, + 'timeoutSeconds': 30 + }, + 'livenessProbe': { + 'httpGet': { + 'path': '/-/healthy', + 'port': config['port'] + }, + 'initialDelaySeconds': 30, + 'timeoutSeconds': 30 + } + }, + 'ports': [{ + 'containerPort': config['port'], + 'name': 'prometheus-http', + 'protocol': 'TCP' + }], + 'volumeConfig': [{ + 'name': 'prometheus-config', + 'mountPath': '/etc/prometheus', + 'files': [{ + 'path': 'prometheus.yml', + 'content': prometheus_config + }] + }] + }] + } + + return spec + + def _check_config(self): + """Identify missing but required items in configuation + + :returns: list of missing configuration items (configuration keys) + """ + logger.debug('Checking Config') + config = self.model.config + missing = [] + + if not config.get('prometheus-image-path'): + missing.append('prometheus-image-path') + + if config.get('prometheus-image-username') \ + and not config.get('prometheus-image-password'): + missing.append('prometheus-image-password') + + return missing + + def _configure_pod(self): + """Setup a new Prometheus pod specification + """ + logger.debug('Configuring Pod') + missing_config = self._check_config() + if missing_config: + logger.error('Incomplete Configuration : {}. ' + 'Application will be blocked.'.format(missing_config)) + self.unit.status = \ + BlockedStatus('Missing configuration: {}'.format(missing_config)) + return + + if not self.unit.is_leader(): + self.unit.status = ActiveStatus() + return + + self.unit.status = MaintenanceStatus('Setting pod spec.') + pod_spec = self._build_pod_spec() + + self.model.pod.set_spec(pod_spec) + self.app.status = ActiveStatus() + self.unit.status = ActiveStatus() + + @property + def relation_targets(self): + hosts = [] + relations = self.framework.model.relations.get("target") + if relations: + for relation in relations: + for i, unit in enumerate(relation.units): + unit_name = unit.name.replace("/", "-") + host = relation.data[unit].get("host") + port = int(relation.data[unit].get("port", "9100")) + if not host: + continue + hostname = f"{host}-{i}.{host}-endpoints" + if hostname and port: + hosts.append(f"{hostname}:{port}") + return hosts + +if __name__ == "__main__": + main(PrometheusCharm) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/tests/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/tests/test_charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/tests/test_charm.py new file mode 100644 index 0000000000000000000000000000000000000000..05f95782d01dfa5d3ae5965531c61fcc442909f5 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/prometheus-operator/tests/test_charm.py @@ -0,0 +1,313 @@ +# Copyright 2020 Balbir Thomas +# See LICENSE file for licensing details. + +import unittest +import yaml +import json + +from ops.testing import Harness +from charm import PrometheusCharm + +MINIMAL_CONFIG = { + 'prometheus-image-path': 'prom/prometheus', + 'port': 9090 +} + +SAMPLE_ALERTING_CONFIG = { + 'alertmanagers': [{ + 'static_configs': [{ + 'targets': ['192.168.0.1:9093'] + }] + }] +} + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(PrometheusCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_image_path_is_required(self): + missing_image_config = { + 'prometheus-image-path': '', + 'prometheus-image-username': '', + 'prometheus-image-password': '' + } + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(missing_image_config) + expected_logs = [ + "ERROR:charm:Incomplete Configuration : ['prometheus-image-path']. " + "Application will be blocked." + ] + self.assertEqual(sorted(logger.output), expected_logs) + + missing = self.harness.charm._check_config() + expected = ['prometheus-image-path'] + self.assertEqual(missing, expected) + + def test_password_is_required_when_username_is_set(self): + missing_password_config = { + 'prometheus-image-path': 'prom/prometheus:latest', + 'prometheus-image-username': 'some-user', + 'prometheus-image-password': '', + } + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(missing_password_config) + expected_logs = [ + "ERROR:charm:Incomplete Configuration : ['prometheus-image-password']. " + "Application will be blocked." + ] + self.assertEqual(sorted(logger.output), expected_logs) + + missing = self.harness.charm._check_config() + expected = ['prometheus-image-password'] + self.assertEqual(missing, expected) + + def test_alerting_config_is_updated_by_alertmanager_relation(self): + self.harness.set_leader(True) + + # check alerting config is empty without alertmanager relation + self.harness.update_config(MINIMAL_CONFIG) + + self.assertEqual(self.harness.charm._stored.alertmanagers, []) + rel_id = self.harness.add_relation('alertmanager', 'alertmanager') + + self.assertIsInstance(rel_id, int) + self.harness.add_relation_unit(rel_id, 'alertmanager/0') + pod_spec = self.harness.get_pod_spec() + self.assertEqual(alerting_config(pod_spec), None) + + # check alerting config is updated when a alertmanager joins + self.harness.update_relation_data(rel_id, + 'alertmanager', + { + 'port': '9093', + 'addrs': '["192.168.0.1"]' + }) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(alerting_config(pod_spec), SAMPLE_ALERTING_CONFIG) + + def test_alerting_config_is_removed_when_alertmanager_is_broken(self): + self.harness.set_leader(True) + + # ensure there is a non-empty alerting config + self.harness.update_config(MINIMAL_CONFIG) + rel_id = self.harness.add_relation('alertmanager', 'alertmanager') + rel = self.harness.model.get_relation('alertmanager') + self.assertIsInstance(rel_id, int) + self.harness.add_relation_unit(rel_id, 'alertmanager/0') + self.harness.update_relation_data(rel_id, + 'alertmanager', + { + 'port': '9093', + 'addrs': '["192.168.0.1"]' + }) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(alerting_config(pod_spec), SAMPLE_ALERTING_CONFIG) + + # check alerting config is removed when relation departs + self.harness.charm.on.alertmanager_relation_broken.emit(rel) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(alerting_config(pod_spec), None) + + def test_grafana_is_provided_port_and_source(self): + self.harness.set_leader(True) + self.harness.update_config(MINIMAL_CONFIG) + rel_id = self.harness.add_relation('grafana-source', 'grafana') + self.harness.add_relation_unit(rel_id, 'grafana/0') + self.harness.update_relation_data(rel_id, 'grafana/0', {}) + data = self.harness.get_relation_data(rel_id, self.harness.model.unit.name) + + self.assertEqual(int(data['port']), MINIMAL_CONFIG['port']) + self.assertEqual(data['source-type'], 'prometheus') + + def test_default_cli_log_level_is_info(self): + self.harness.set_leader(True) + self.harness.update_config(MINIMAL_CONFIG) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--log.level'), 'info') + + def test_invalid_log_level_defaults_to_debug(self): + self.harness.set_leader(True) + bad_log_config = MINIMAL_CONFIG.copy() + bad_log_config['log-level'] = 'bad-level' + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(bad_log_config) + expected_logs = [ + "ERROR:root:Invalid loglevel: bad-level given, " + "debug/info/warn/error/fatal allowed. " + "defaulting to DEBUG loglevel." + ] + self.assertEqual(sorted(logger.output), expected_logs) + + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--log.level'), 'debug') + + def test_valid_log_level_is_accepted(self): + self.harness.set_leader(True) + valid_log_config = MINIMAL_CONFIG.copy() + valid_log_config['log-level'] = 'warn' + self.harness.update_config(valid_log_config) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--log.level'), 'warn') + + def test_tsdb_compression_is_not_enabled_by_default(self): + self.harness.set_leader(True) + compress_config = MINIMAL_CONFIG.copy() + self.harness.update_config(compress_config) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.wal-compression'), + None) + + def test_tsdb_compression_can_be_enabled(self): + self.harness.set_leader(True) + compress_config = MINIMAL_CONFIG.copy() + compress_config['tsdb-wal-compression'] = True + self.harness.update_config(compress_config) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.wal-compression'), + '--storage.tsdb.wal-compression') + + def test_valid_tsdb_retention_times_can_be_set(self): + self.harness.set_leader(True) + retention_time_config = MINIMAL_CONFIG.copy() + acceptable_units = ['y', 'w', 'd', 'h', 'm', 's'] + for unit in acceptable_units: + retention_time = '{}{}'.format(1, unit) + retention_time_config['tsdb-retention-time'] = retention_time + self.harness.update_config(retention_time_config) + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'), + retention_time) + + def test_invalid_tsdb_retention_times_can_not_be_set(self): + self.harness.set_leader(True) + retention_time_config = MINIMAL_CONFIG.copy() + + # invalid unit + retention_time = '{}{}'.format(1, 'x') + retention_time_config['tsdb-retention-time'] = retention_time + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(retention_time_config) + expected_logs = ["ERROR:charm:Invalid unit x in time spec"] + self.assertEqual(sorted(logger.output), expected_logs) + + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'), + None) + + # invalid time value + retention_time = '{}{}'.format(0, 'd') + retention_time_config['tsdb-retention-time'] = retention_time + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(retention_time_config) + expected_logs = ["ERROR:charm:Expected positive time spec but got 0"] + self.assertEqual(sorted(logger.output), expected_logs) + + pod_spec = self.harness.get_pod_spec() + self.assertEqual(cli_arg(pod_spec, '--storage.tsdb.retention.time'), + None) + + def test_global_scrape_interval_can_be_set(self): + self.harness.set_leader(True) + scrapeint_config = MINIMAL_CONFIG.copy() + acceptable_units = ['y', 'w', 'd', 'h', 'm', 's'] + for unit in acceptable_units: + scrapeint_config['scrape-interval'] = '{}{}'.format(1, unit) + self.harness.update_config(scrapeint_config) + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertEqual(gconfig['scrape_interval'], + scrapeint_config['scrape-interval']) + + def test_global_scrape_timeout_can_be_set(self): + self.harness.set_leader(True) + scrapetime_config = MINIMAL_CONFIG.copy() + acceptable_units = ['y', 'w', 'd', 'h', 'm', 's'] + for unit in acceptable_units: + scrapetime_config['scrape-timeout'] = '{}{}'.format(1, unit) + self.harness.update_config(scrapetime_config) + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertEqual(gconfig['scrape_timeout'], + scrapetime_config['scrape-timeout']) + + def test_global_evaluation_interval_can_be_set(self): + self.harness.set_leader(True) + evalint_config = MINIMAL_CONFIG.copy() + acceptable_units = ['y', 'w', 'd', 'h', 'm', 's'] + for unit in acceptable_units: + evalint_config['evaluation-interval'] = '{}{}'.format(1, unit) + self.harness.update_config(evalint_config) + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertEqual(gconfig['evaluation_interval'], + evalint_config['evaluation-interval']) + + def test_valid_external_labels_can_be_set(self): + self.harness.set_leader(True) + label_config = MINIMAL_CONFIG.copy() + labels = {'name1': 'value1', + 'name2': 'value2'} + label_config['external-labels'] = json.dumps(labels) + self.harness.update_config(label_config) + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertIsNotNone(gconfig['external_labels']) + self.assertEqual(labels, gconfig['external_labels']) + + def test_invalid_external_labels_can_not_be_set(self): + self.harness.set_leader(True) + label_config = MINIMAL_CONFIG.copy() + # label value must be string + labels = {'name': 1} + label_config['external-labels'] = json.dumps(labels) + with self.assertLogs(level='ERROR') as logger: + self.harness.update_config(label_config) + expected_logs = ["ERROR:charm:External label keys/values must be strings"] + self.assertEqual(sorted(logger.output), expected_logs) + + pod_spec = self.harness.get_pod_spec() + gconfig = global_config(pod_spec) + self.assertIsNone(gconfig.get('external_labels')) + + def test_default_scrape_config_is_always_set(self): + self.harness.set_leader(True) + self.harness.update_config(MINIMAL_CONFIG) + pod_spec = self.harness.get_pod_spec() + prometheus_scrape_config = scrape_config(pod_spec, 'prometheus') + self.assertIsNotNone(prometheus_scrape_config, 'No default config found') + + +def alerting_config(pod_spec): + config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content'] + config_dict = yaml.safe_load(config_yaml) + return config_dict.get('alerting') + + +def global_config(pod_spec): + config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content'] + config_dict = yaml.safe_load(config_yaml) + return config_dict['global'] + + +def scrape_config(pod_spec, job_name): + config_yaml = pod_spec[0]['containers'][0]['volumeConfig'][0]['files'][0]['content'] + config_dict = yaml.safe_load(config_yaml) + scrape_configs = config_dict['scrape_configs'] + for config in scrape_configs: + if config['job_name'] == job_name: + return config + return None + + +def cli_arg(pod_spec, cli_opt): + args = pod_spec[0]['containers'][0]['args'] + for arg in args: + opt_list = arg.split('=') + if len(opt_list) == 2 and opt_list[0] == cli_opt: + return opt_list[1] + if len(opt_list) == 1 and opt_list[0] == cli_opt: + return opt_list[0] + return None diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/README.md b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1b226214256240e150d1cb34fdc75fb0b0bf412b --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/README.md @@ -0,0 +1,47 @@ +# squid-k8s Charm + +## Overview + +This is a Kuberentes Charm to deploy [Squid Cache](http://www.squid-cache.org/). + +Sugested Actions for this charm: +* Set allowed URLs + Possible way to run action: `juju run-action squid/0 addurl url=google.com` +* Stop/Start/Restart the squid service - done + Run like this: `juju run-action squid/0 restart` +* Set ftp, http, https proxies + +## Quickstart + +If you don't have microk8s and juju installed executing the following commands: +``` +sudo snap install juju --classic +sudo snap install microk8s --classic +juju bootstrap microk8s +juju add-model squid +``` + +Afterwards clone the repository and deploy the charm +``` +git clone https://github.com/DomFleischmann/charm-squid-k8s.git +cd charm-squid-k8s +git submodule update --init +juju deploy . +``` +Check if the charm is deployed correctly with `juju status` + +To test the `addurl` action open another terminal and type the following command: +`export https_proxy=http://:3128` + +Where squid-ip is the Squid App Address shown in `juju status` + +Now when executing `curl https://www.google.com` squid will block access to the url + +Execute the `addurl` action: +`juju run-action squid/0 addurl url=google.com` + +Now when executing `curl https://www.google.com` it will give you the google output. + +## Contact + - Author: Dominik Fleischmann + - Bug Tracker: [here](https://github.com/DomFleischmann/charm-squid-k8s) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/actions.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/actions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e57d8e3987419e2857c90ffdef8a8a9bff4256f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/actions.yaml @@ -0,0 +1,14 @@ +addurl: + description: "Add allowed URL to squid config" + params: + url: + description: "URL that will be allowed" + type: string + default: "" +deleteurl: + description: "Delete allowed URL squid config" + params: + url: + description: "URL that will stop to be allowed" + type: string + default: "" diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/config.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..450525eeed2cb05b0cf347357d67b3693f8c192f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/config.yaml @@ -0,0 +1,5 @@ +options: + port: + type: int + description: "Port" + default: 3128 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/dispatch b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/dispatch new file mode 100755 index 0000000000000000000000000000000000000000..fe31c0567bdce62a6542a6470997cb6a874e4bd8 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/dispatch @@ -0,0 +1,3 @@ +#!/bin/sh + +JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv ./src/charm.py diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/hooks/install b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/hooks/install new file mode 120000 index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/hooks/install @@ -0,0 +1 @@ +../dispatch \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/hooks/start b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/hooks/start new file mode 120000 index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/hooks/start @@ -0,0 +1 @@ +../dispatch \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/hooks/upgrade-charm b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/hooks/upgrade-charm new file mode 120000 index 0000000000000000000000000000000000000000..8b970447af1decd19c27ca3c609fc97f56a233e3 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/hooks/upgrade-charm @@ -0,0 +1 @@ +../dispatch \ No newline at end of file diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/metadata.yaml b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/metadata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a70555ab993c896032413b6125b39c4f0c43197a --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/metadata.yaml @@ -0,0 +1,22 @@ +name: squid +summary: Describe your charm here +maintainers: + - Dominik Fleischmann +description: | + A nice long description of what the product does and any + high level information about how the charm provides the product, + on which platform, etc. +tags: + - misc +series: + - kubernetes +deployment: + type: stateful + service: loadbalancer +storage: + spool: + type: filesystem + location: /var/spool/squid +provides: + prometheus-target: + interface: http diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/requirements.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..4da6f5fe4169cdf33fb85954f2ea3db2290f3fc8 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/requirements.txt @@ -0,0 +1,3 @@ +ops +jinja2 +git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/squid.charm b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/squid.charm new file mode 100644 index 0000000000000000000000000000000000000000..961e9e4ac7cc603d2577e3b10f50bb92ab8a931c Binary files /dev/null and b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/squid.charm differ diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/src/charm.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/src/charm.py new file mode 100755 index 0000000000000000000000000000000000000000..f10b46c531fbf211b1bba965b580e7c79a30859d --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/src/charm.py @@ -0,0 +1,122 @@ +#! /usr/bin/env python3 + +import logging +import subprocess + +from jinja2 import Template +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.main import main +from ops.model import ActiveStatus, MaintenanceStatus + + +SQUID_CONF = "/etc/squid/squid.conf" + +logger = logging.getLogger(__name__) + + +def reload(): + subprocess.Popen("sleep 1 && kill -HUP `cat /var/run/squid.pid`", shell=True) + + +def apply_config(config): + with open(SQUID_CONF, "w") as f: + f.write(config) + reload() + + +def _generate_allowedurls_config(allowed_urls: set): + allowed_urls_text = "" + for url in allowed_urls: + allowed_urls_text += f"acl allowedurls dstdomain .{url}\n" + allowed_urls_text += "http_access allow allowedurls\n" + return allowed_urls_text + + +def _generate_config(**kwargs): + with open("template/squid.conf") as template: + return Template(template.read()).render(**kwargs) + + +def update_config(allowed_urls: set): + allowed_urls_config = _generate_allowedurls_config(allowed_urls) + squid_config = _generate_config(allowed_urls=allowed_urls_config) + if squid_config: + apply_config(squid_config) + + +class SquidK8SCharm(CharmBase): + """Class reprisenting this Operator charm.""" + + _stored = StoredState() + + def __init__(self, *args): + """Initialize charm and configure states and events to observe.""" + super().__init__(*args) + self._stored.set_default(allowedurls=set()) + + self.framework.observe(self.on.start, self.configure_pod) + self.framework.observe(self.on.config_changed, self.configure_pod) + self.framework.observe(self.on.addurl_action, self.on_addurl_action) + self.framework.observe(self.on.deleteurl_action, self.on_deleteurl_action) + +# self.framework.observe(self.on["prometheus-target"].relation_joined, self._publish_prometheus_target_info) + +# def _publish_prometheus_target_info(self, event): +# event.relation.data[self.unit]["host"] = self.app.name +# event.relation.data[self.unit]["port"] = str(9100) + + def on_addurl_action(self, event): + url = event.params["url"] + self._stored.allowedurls.add(url) + update_config(self._stored.allowedurls) + + def on_deleteurl_action(self, event): + """Handle the deleteurl action.""" + url = event.params["url"] + if url in self._stored.allowedurls: + self._stored.allowedurls.remove(url) + update_config(self._stored.allowedurls) + + def configure_pod(self, event): + if not self.unit.is_leader(): + self.unit.status = ActiveStatus() + return + + self.unit.status = MaintenanceStatus("Applying pod spec") + + pod_spec = { + "version": 3, + "containers": [ + { + "name": self.framework.model.app.name, + "image": "domfleischmann/squid-python", + "ports": [ + { + "name": "squid", + "containerPort": self.config["port"], + "protocol": "TCP", + } + ], + }, + # { + # "name": "exporter", + # "image": "prom/node-exporter", + # "ports": [ + # { + # "containerPort": 9100, + # "name": "exporter-http", + # "protocol": "TCP", + # } + # ], + # } + ], + } + + self.model.pod.set_spec(pod_spec) + self.unit.status = ActiveStatus() + self.app.status = ActiveStatus() + + +if __name__ == "__main__": + main(SquidK8SCharm) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/template/squid.conf b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/template/squid.conf new file mode 100644 index 0000000000000000000000000000000000000000..9dc7e8b664d007c9d6dc452dffdc7fe47358d1b2 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/template/squid.conf @@ -0,0 +1,26 @@ +acl SSL_ports port 443 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl CONNECT method CONNECT +http_access deny !Safe_ports +http_access deny CONNECT !SSL_ports +http_access allow localhost manager +http_access deny manager +http_access allow localhost +{{ allowed_urls }} +http_access deny all +http_port 3128 +coredump_dir /var/spool/squid +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern (Release|Packages(.gz)*)$ 0 20% 2880 +refresh_pattern . 0 20% 4320 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/tox.ini b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/tox.ini new file mode 100644 index 0000000000000000000000000000000000000000..a2476324f8eea6fdb222b4f382057849e8d85f44 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/tox.ini @@ -0,0 +1,46 @@ +[tox] +skipsdist = True +envlist = unit, functional +skip_missing_interpreters = True + +[testenv] +basepython = python3 +setenv = + PYTHONPATH = {toxinidir}/lib/:{toxinidir} +passenv = HOME + +[testenv:unit] +commands = + coverage run -m unittest discover -s {toxinidir}/tests/unit -v + coverage report \ + --omit tests/*,mod/*,.tox/* + coverage html \ + --omit tests/*,mod/*,.tox/* +deps = -r{toxinidir}/tests/unit/requirements.txt + +[testenv:functional] +changedir = {toxinidir}/tests/functional +commands = functest-run-suite {posargs} +deps = -r{toxinidir}/tests/functional/requirements.txt + +[testenv:lint] +commands = flake8 +deps = + flake8 + flake8-docstrings + flake8-import-order + pep8-naming + flake8-colors + +[flake8] +exclude = + .git, + __pycache__, + .tox, + mod, +max-line-length = 120 +max-complexity = 10 +import-order-style = google + +[isort] +force_to_top=setuppath diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/INSTALLER b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/LICENSE.rst b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..c37cae49ec77ad6ebb25568c1605f1fee5313cfb --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2007 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/METADATA b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..1af8df0f716eb32aa165f3eaf5205cef3cdac54f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/METADATA @@ -0,0 +1,106 @@ +Metadata-Version: 2.1 +Name: Jinja2 +Version: 2.11.3 +Summary: A very fast and expressive template engine. +Home-page: https://palletsprojects.com/p/jinja/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: Pallets +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Documentation, https://jinja.palletsprojects.com/ +Project-URL: Code, https://github.com/pallets/jinja +Project-URL: Issue tracker, https://github.com/pallets/jinja/issues +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* +Description-Content-Type: text/x-rst +Requires-Dist: MarkupSafe (>=0.23) +Provides-Extra: i18n +Requires-Dist: Babel (>=0.8) ; extra == 'i18n' + +Jinja +===== + +Jinja is a fast, expressive, extensible templating engine. Special +placeholders in the template allow writing code similar to Python +syntax. Then the template is passed data to render the final document. + +It includes: + +- Template inheritance and inclusion. +- Define and import macros within templates. +- HTML templates can use autoescaping to prevent XSS from untrusted + user input. +- A sandboxed environment can safely render untrusted templates. +- AsyncIO support for generating templates and calling async + functions. +- I18N support with Babel. +- Templates are compiled to optimized Python code just-in-time and + cached, or can be compiled ahead-of-time. +- Exceptions point to the correct line in templates to make debugging + easier. +- Extensible filters, tests, functions, and even syntax. + +Jinja's philosophy is that while application logic belongs in Python if +possible, it shouldn't make the template designer's job difficult by +restricting functionality too much. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + $ pip install -U Jinja2 + +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + +In A Nutshell +------------- + +.. code-block:: jinja + + {% extends "base.html" %} + {% block title %}Members{% endblock %} + {% block content %} + + {% endblock %} + + +Links +----- + +- Website: https://palletsprojects.com/p/jinja/ +- Documentation: https://jinja.palletsprojects.com/ +- Releases: https://pypi.org/project/Jinja2/ +- Code: https://github.com/pallets/jinja +- Issue tracker: https://github.com/pallets/jinja/issues +- Test status: https://dev.azure.com/pallets/jinja/_build +- Official chat: https://discord.gg/t6rrQZH + + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/RECORD b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..da926963b1de3b16a3d4cb3d2b77c28951deda48 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/RECORD @@ -0,0 +1,61 @@ +Jinja2-2.11.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +Jinja2-2.11.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 +Jinja2-2.11.3.dist-info/METADATA,sha256=PscpJ1C3RSp8xcjV3fAuTz13rKbGxmzJXnMQFH-WKhs,3535 +Jinja2-2.11.3.dist-info/RECORD,, +Jinja2-2.11.3.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 +Jinja2-2.11.3.dist-info/entry_points.txt,sha256=Qy_DkVo6Xj_zzOtmErrATe8lHZhOqdjpt3e4JJAGyi8,61 +Jinja2-2.11.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 +jinja2/__init__.py,sha256=LZUXmxJc2GIchfSAeMWsxCWiQYO-w1-736f2Q3I8ms8,1549 +jinja2/__pycache__/__init__.cpython-38.pyc,, +jinja2/__pycache__/_compat.cpython-38.pyc,, +jinja2/__pycache__/_identifier.cpython-38.pyc,, +jinja2/__pycache__/asyncfilters.cpython-38.pyc,, +jinja2/__pycache__/asyncsupport.cpython-38.pyc,, +jinja2/__pycache__/bccache.cpython-38.pyc,, +jinja2/__pycache__/compiler.cpython-38.pyc,, +jinja2/__pycache__/constants.cpython-38.pyc,, +jinja2/__pycache__/debug.cpython-38.pyc,, +jinja2/__pycache__/defaults.cpython-38.pyc,, +jinja2/__pycache__/environment.cpython-38.pyc,, +jinja2/__pycache__/exceptions.cpython-38.pyc,, +jinja2/__pycache__/ext.cpython-38.pyc,, +jinja2/__pycache__/filters.cpython-38.pyc,, +jinja2/__pycache__/idtracking.cpython-38.pyc,, +jinja2/__pycache__/lexer.cpython-38.pyc,, +jinja2/__pycache__/loaders.cpython-38.pyc,, +jinja2/__pycache__/meta.cpython-38.pyc,, +jinja2/__pycache__/nativetypes.cpython-38.pyc,, +jinja2/__pycache__/nodes.cpython-38.pyc,, +jinja2/__pycache__/optimizer.cpython-38.pyc,, +jinja2/__pycache__/parser.cpython-38.pyc,, +jinja2/__pycache__/runtime.cpython-38.pyc,, +jinja2/__pycache__/sandbox.cpython-38.pyc,, +jinja2/__pycache__/tests.cpython-38.pyc,, +jinja2/__pycache__/utils.cpython-38.pyc,, +jinja2/__pycache__/visitor.cpython-38.pyc,, +jinja2/_compat.py,sha256=B6Se8HjnXVpzz9-vfHejn-DV2NjaVK-Iewupc5kKlu8,3191 +jinja2/_identifier.py,sha256=EdgGJKi7O1yvr4yFlvqPNEqV6M1qHyQr8Gt8GmVTKVM,1775 +jinja2/asyncfilters.py,sha256=XJtYXTxFvcJ5xwk6SaDL4S0oNnT0wPYvXBCSzc482fI,4250 +jinja2/asyncsupport.py,sha256=ZBFsDLuq3Gtji3Ia87lcyuDbqaHZJRdtShZcqwpFnSQ,7209 +jinja2/bccache.py,sha256=3Pmp4jo65M9FQuIxdxoDBbEDFwe4acDMQf77nEJfrHA,12139 +jinja2/compiler.py,sha256=Ta9W1Lit542wItAHXlDcg0sEOsFDMirCdlFPHAurg4o,66284 +jinja2/constants.py,sha256=RR1sTzNzUmKco6aZicw4JpQpJGCuPuqm1h1YmCNUEFY,1458 +jinja2/debug.py,sha256=neR7GIGGjZH3_ILJGVUYy3eLQCCaWJMXOb7o0kGInWc,8529 +jinja2/defaults.py,sha256=85B6YUUCyWPSdrSeVhcqFVuu_bHUAQXeey--FIwSeVQ,1126 +jinja2/environment.py,sha256=XDSLKc4SqNLMOwTSq3TbWEyA5WyXfuLuVD0wAVjEFwM,50629 +jinja2/exceptions.py,sha256=VjNLawcmf2ODffqVMCQK1cRmvFaUfQWF4u8ouP3QPcE,5425 +jinja2/ext.py,sha256=AtwL5O5enT_L3HR9-oBvhGyUTdGoyaqG_ICtnR_EVd4,26441 +jinja2/filters.py,sha256=9ORilsZrUoydSI9upz8_qGy7gozDWLYoFmlIBFSVRnQ,41439 +jinja2/idtracking.py,sha256=J3O4VHsrbf3wzwiBc7Cro26kHb6_5kbULeIOzocchIU,9211 +jinja2/lexer.py,sha256=nUFLRKhhKmmEWkLI65nQePgcQs7qsRdjVYZETMt_v0g,30331 +jinja2/loaders.py,sha256=C-fST_dmFjgWkp0ZuCkrgICAoOsoSIF28wfAFink0oU,17666 +jinja2/meta.py,sha256=QjyYhfNRD3QCXjBJpiPl9KgkEkGXJbAkCUq4-Ur10EQ,4131 +jinja2/nativetypes.py,sha256=Ul__gtVw4xH-0qvUvnCNHedQeNDwmEuyLJztzzSPeRg,2753 +jinja2/nodes.py,sha256=Mk1oJPVgIjnQw9WOqILvcu3rLepcFZ0ahxQm2mbwDwc,31095 +jinja2/optimizer.py,sha256=gQLlMYzvQhluhzmAIFA1tXS0cwgWYOjprN-gTRcHVsc,1457 +jinja2/parser.py,sha256=fcfdqePNTNyvosIvczbytVA332qpsURvYnCGcjDHSkA,35660 +jinja2/runtime.py,sha256=0y-BRyIEZ9ltByL2Id6GpHe1oDRQAwNeQvI0SKobNMw,30618 +jinja2/sandbox.py,sha256=knayyUvXsZ-F0mk15mO2-ehK9gsw04UhB8td-iUOtLc,17127 +jinja2/tests.py,sha256=iO_Y-9Vo60zrVe1lMpSl5sKHqAxe2leZHC08OoZ8K24,4799 +jinja2/utils.py,sha256=Wy4yC3IByqUWwnKln6SdaixdzgK74P6F5nf-gQZrYnU,22436 +jinja2/visitor.py,sha256=DUHupl0a4PGp7nxRtZFttUzAi1ccxzqc2hzetPYUz8U,3240 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/WHEEL b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..01b8fc7d4a10cb8b4f1d21f11d3398d07d6b3478 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/entry_points.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..3619483fd4fca7407f3bd462aefcd70d1de69737 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[babel.extractors] +jinja2 = jinja2.ext:babel_extract [i18n] + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/top_level.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..7f7afbf3bf54b346092be6a72070fcbd305ead1e --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/Jinja2-2.11.3.dist-info/top_level.txt @@ -0,0 +1 @@ +jinja2 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/INSTALLER b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/LICENSE.rst b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/LICENSE.rst new file mode 100644 index 0000000000000000000000000000000000000000..9d227a0cc43c3268d15722b763bd94ad298645a1 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/LICENSE.rst @@ -0,0 +1,28 @@ +Copyright 2010 Pallets + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/METADATA b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e4a7b90f51d7d6457663b1935f2665dd44f8e352 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/METADATA @@ -0,0 +1,94 @@ +Metadata-Version: 2.1 +Name: MarkupSafe +Version: 1.1.1 +Summary: Safely add untrusted strings to HTML/XML markup. +Home-page: https://palletsprojects.com/p/markupsafe/ +Author: Armin Ronacher +Author-email: armin.ronacher@active-4.com +Maintainer: The Pallets Team +Maintainer-email: contact@palletsprojects.com +License: BSD-3-Clause +Project-URL: Documentation, https://markupsafe.palletsprojects.com/ +Project-URL: Code, https://github.com/pallets/markupsafe +Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* +Description-Content-Type: text/x-rst + +MarkupSafe +========== + +MarkupSafe implements a text object that escapes characters so it is +safe to use in HTML and XML. Characters that have special meanings are +replaced so that they display as the actual characters. This mitigates +injection attacks, meaning untrusted user input can safely be displayed +on a page. + + +Installing +---------- + +Install and update using `pip`_: + +.. code-block:: text + + pip install -U MarkupSafe + +.. _pip: https://pip.pypa.io/en/stable/quickstart/ + + +Examples +-------- + +.. code-block:: pycon + + >>> from markupsafe import Markup, escape + >>> # escape replaces special characters and wraps in Markup + >>> escape('') + Markup(u'<script>alert(document.cookie);</script>') + >>> # wrap in Markup to mark text "safe" and prevent escaping + >>> Markup('Hello') + Markup('hello') + >>> escape(Markup('Hello')) + Markup('hello') + >>> # Markup is a text subclass (str on Python 3, unicode on Python 2) + >>> # methods and operators escape their arguments + >>> template = Markup("Hello %s") + >>> template % '"World"' + Markup('Hello "World"') + + +Donate +------ + +The Pallets organization develops and supports MarkupSafe and other +libraries that use it. In order to grow the community of contributors +and users, and allow the maintainers to devote more time to the +projects, `please donate today`_. + +.. _please donate today: https://palletsprojects.com/donate + + +Links +----- + +* Website: https://palletsprojects.com/p/markupsafe/ +* Documentation: https://markupsafe.palletsprojects.com/ +* Releases: https://pypi.org/project/MarkupSafe/ +* Code: https://github.com/pallets/markupsafe +* Issue tracker: https://github.com/pallets/markupsafe/issues +* Test status: https://dev.azure.com/pallets/markupsafe/_build +* Official chat: https://discord.gg/t6rrQZH + + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/RECORD b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..6d95824014c4aeaac8dab2207394c7d655aa7cbd --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/RECORD @@ -0,0 +1,16 @@ +MarkupSafe-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +MarkupSafe-1.1.1.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 +MarkupSafe-1.1.1.dist-info/METADATA,sha256=-XXnVvCxQP2QbHutIQq_7Pk9OATy-x0NC7gN_3_SCRE,3167 +MarkupSafe-1.1.1.dist-info/RECORD,, +MarkupSafe-1.1.1.dist-info/WHEEL,sha256=RIeRBYNNiNK3sXfnenIjXDrR2Tzyz05xCMpKF2hJ1iA,111 +MarkupSafe-1.1.1.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 +markupsafe/__init__.py,sha256=oTblO5f9KFM-pvnq9bB0HgElnqkJyqHnFN1Nx2NIvnY,10126 +markupsafe/__pycache__/__init__.cpython-38.pyc,, +markupsafe/__pycache__/_compat.cpython-38.pyc,, +markupsafe/__pycache__/_constants.cpython-38.pyc,, +markupsafe/__pycache__/_native.cpython-38.pyc,, +markupsafe/_compat.py,sha256=uEW1ybxEjfxIiuTbRRaJpHsPFf4yQUMMKaPgYEC5XbU,558 +markupsafe/_constants.py,sha256=zo2ajfScG-l1Sb_52EP3MlDCqO7Y1BVHUXXKRsVDRNk,4690 +markupsafe/_native.py,sha256=d-8S_zzYt2y512xYcuSxq0NeG2DUUvG80wVdTn-4KI8,1873 +markupsafe/_speedups.c,sha256=k0fzEIK3CP6MmMqeY0ob43TP90mVN0DTyn7BAl3RqSg,9884 +markupsafe/_speedups.cpython-38-x86_64-linux-gnu.so,sha256=t037yzhfsUaStpvo6eqDVYeK-dHfWmgB4cVL9nkY2-k,48016 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/WHEEL b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..b1fcc33cbc2a978759a99244f6338ec10ee908a2 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: false +Tag: cp38-cp38-manylinux2010_x86_64 + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/top_level.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..75bf729258f9daef77370b6df1a57940f90fc23f --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/MarkupSafe-1.1.1.dist-info/top_level.txt @@ -0,0 +1 @@ +markupsafe diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/INSTALLER b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/LICENSE b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2f1b8e15e5627d92f0521605c9870bc8e5505cb4 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/METADATA b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..565f05b73714eb85d96beb669a1aa42920c21c3a --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/METADATA @@ -0,0 +1,46 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 5.4.1 +Summary: YAML parser and emitter for Python +Home-page: https://pyyaml.org/ +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Download-URL: https://pypi.org/project/PyYAML/ +Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues +Project-URL: CI, https://github.com/yaml/pyyaml/actions +Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation +Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core +Project-URL: Source Code, https://github.com/yaml/pyyaml +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.* + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/RECORD b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..68ca4da2c4df950a57ff47f60fbcacbb5256d161 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/RECORD @@ -0,0 +1,43 @@ +PyYAML-5.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyYAML-5.4.1.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101 +PyYAML-5.4.1.dist-info/METADATA,sha256=XnrM5LY-uS85ica26gKUK0dGG-xmPjmGfDTSLpIHQFk,2087 +PyYAML-5.4.1.dist-info/RECORD,, +PyYAML-5.4.1.dist-info/WHEEL,sha256=Dh4w5P6PPWbqyqoE6MHlzbFQwZXlM-voWJDf2WUsS2g,108 +PyYAML-5.4.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402 +_yaml/__pycache__/__init__.cpython-38.pyc,, +yaml/__init__.py,sha256=gfp2CbRVhzknghkiiJD2l6Z0pI-mv_iZHPSJ4aj0-nY,13170 +yaml/__pycache__/__init__.cpython-38.pyc,sha256=n0YyVkjiOLmcjlR2NXE5TIZf7Z2clZ6sqQ5KlyuTWSI,11845 +yaml/__pycache__/composer.cpython-38.pyc,sha256=OVPzAKAvC2-Tiv4HUwUUG9JHCzG17nvsRQcFTCtY9xs,3563 +yaml/__pycache__/constructor.cpython-38.pyc,sha256=EXPDY7Prtv3F6QbXiJc5F4BvJQyCCGRu83WF4u6X7Vo,20822 +yaml/__pycache__/cyaml.cpython-38.pyc,sha256=wI01UFU-WhUcdnnczL5QpKu0ZNQTttSzXbleIvIfcvM,3411 +yaml/__pycache__/dumper.cpython-38.pyc,sha256=9wIctrlMpF4ksMWuCc5QAyZSenGiRVyrtU-1pAfj54U,1823 +yaml/__pycache__/emitter.cpython-38.pyc,sha256=kd_QGJd0GjpfgQPN9DlG_7HwKfJnJ24JxtdiUOxM9iE,25353 +yaml/__pycache__/error.cpython-38.pyc,sha256=j6mkXgDmzV0y0lo6FeUrvZL2vHN6Vkc52k0_R0oOn6g,2300 +yaml/__pycache__/events.cpython-38.pyc,sha256=NFsoAO36pPL_uxoCO-xRxKndQ3vx47mkStOYjfoQVZ8,3974 +yaml/__pycache__/loader.cpython-38.pyc,sha256=lEMB2brjPrfMjXXTJpCEx6-ct4eI6LYovD4hW5ZuGsw,2164 +yaml/__pycache__/nodes.cpython-38.pyc,sha256=Kkxh_oL04gQg-YFWwnfjpIoYspsXO4GEqKTr3NbxOD8,1725 +yaml/__pycache__/parser.cpython-38.pyc,sha256=0R9Qx0cBMUoOLzMOWeXCyXsC4S4KJ7oPHdmTVPQ4FbQ,11924 +yaml/__pycache__/reader.cpython-38.pyc,sha256=ZpOMJ6rZDc8EWffI4vZR_Fhcu3WmhgT_GAkDrKkEtPo,4537 +yaml/__pycache__/representer.cpython-38.pyc,sha256=tR9wWffCThWXwQe47uYFdHg2bCkqNjBcwmG7RSHmWS4,10069 +yaml/__pycache__/resolver.cpython-38.pyc,sha256=zsLBuCKn8KAJPVGo5J_xZSytifJktdTtkUNnltOt__I,5498 +yaml/__pycache__/scanner.cpython-38.pyc,sha256=N8ubxRd6bZBjoRna6CU8wK1Imb_7TWOsudzPh9JDDkQ,25269 +yaml/__pycache__/serializer.cpython-38.pyc,sha256=9JDH7ONP5zFlep0f2yNWRoOSZr5Y28jL012O1EIbuug,3320 +yaml/__pycache__/tokens.cpython-38.pyc,sha256=haBW6UBDhVFog2xIe63OkrAP_9JRFyNKCROFPRJiyu0,4935 +yaml/_yaml.cpython-38-x86_64-linux-gnu.so,sha256=fxjEXaSdzion1SMwhu9Ikx-JOVNtcl6KvW_pyGBt-cU,2342916 +yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883 +yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639 +yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851 +yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837 +yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006 +yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533 +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061 +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495 +yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794 +yaml/representer.py,sha256=82UM3ZxUQKqsKAF4ltWOxCS6jGPIFtXpGs7mvqyv4Xs,14184 +yaml/resolver.py,sha256=Z1W8AOMA6Proy4gIO2OhUO4IPS_bFNAl0Ca3rwChpPg,8999 +yaml/scanner.py,sha256=KeQIKGNlSyPE8QDwionHxy9CgbqE5teJEz05FR9-nAg,51277 +yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165 +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/WHEEL b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..69d594f055a5127401ebe017f8837cef4c76c020 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: false +Tag: cp38-cp38-manylinux1_x86_64 + diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/top_level.txt b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6475e911f628412049bc4090d86f23ac403adde --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/PyYAML-5.4.1.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/_yaml/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/_yaml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7baa8c4b68127d5cdf0be9a799429e61347c2694 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/__init__.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f17866f6c4135346c4b5876f019b1a4938f2e8bc --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/__init__.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +"""Jinja is a template engine written in pure Python. It provides a +non-XML syntax that supports inline expressions and an optional +sandboxed environment. +""" +from markupsafe import escape +from markupsafe import Markup + +from .bccache import BytecodeCache +from .bccache import FileSystemBytecodeCache +from .bccache import MemcachedBytecodeCache +from .environment import Environment +from .environment import Template +from .exceptions import TemplateAssertionError +from .exceptions import TemplateError +from .exceptions import TemplateNotFound +from .exceptions import TemplateRuntimeError +from .exceptions import TemplatesNotFound +from .exceptions import TemplateSyntaxError +from .exceptions import UndefinedError +from .filters import contextfilter +from .filters import environmentfilter +from .filters import evalcontextfilter +from .loaders import BaseLoader +from .loaders import ChoiceLoader +from .loaders import DictLoader +from .loaders import FileSystemLoader +from .loaders import FunctionLoader +from .loaders import ModuleLoader +from .loaders import PackageLoader +from .loaders import PrefixLoader +from .runtime import ChainableUndefined +from .runtime import DebugUndefined +from .runtime import make_logging_undefined +from .runtime import StrictUndefined +from .runtime import Undefined +from .utils import clear_caches +from .utils import contextfunction +from .utils import environmentfunction +from .utils import evalcontextfunction +from .utils import is_undefined +from .utils import select_autoescape + +__version__ = "2.11.3" diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/_compat.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..1f044954a02933bcec2277fcdd575821bc18a99a --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/_compat.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# flake8: noqa +import marshal +import sys + +PY2 = sys.version_info[0] == 2 +PYPY = hasattr(sys, "pypy_translation_info") +_identity = lambda x: x + +if not PY2: + unichr = chr + range_type = range + text_type = str + string_types = (str,) + integer_types = (int,) + + iterkeys = lambda d: iter(d.keys()) + itervalues = lambda d: iter(d.values()) + iteritems = lambda d: iter(d.items()) + + import pickle + from io import BytesIO, StringIO + + NativeStringIO = StringIO + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + ifilter = filter + imap = map + izip = zip + intern = sys.intern + + implements_iterator = _identity + implements_to_string = _identity + encode_filename = _identity + + marshal_dump = marshal.dump + marshal_load = marshal.load + +else: + unichr = unichr + text_type = unicode + range_type = xrange + string_types = (str, unicode) + integer_types = (int, long) + + iterkeys = lambda d: d.iterkeys() + itervalues = lambda d: d.itervalues() + iteritems = lambda d: d.iteritems() + + import cPickle as pickle + from cStringIO import StringIO as BytesIO, StringIO + + NativeStringIO = BytesIO + + exec("def reraise(tp, value, tb=None):\n raise tp, value, tb") + + from itertools import imap, izip, ifilter + + intern = intern + + def implements_iterator(cls): + cls.next = cls.__next__ + del cls.__next__ + return cls + + def implements_to_string(cls): + cls.__unicode__ = cls.__str__ + cls.__str__ = lambda x: x.__unicode__().encode("utf-8") + return cls + + def encode_filename(filename): + if isinstance(filename, unicode): + return filename.encode("utf-8") + return filename + + def marshal_dump(code, f): + if isinstance(f, file): + marshal.dump(code, f) + else: + f.write(marshal.dumps(code)) + + def marshal_load(f): + if isinstance(f, file): + return marshal.load(f) + return marshal.loads(f.read()) + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a + # dummy metaclass for one level of class instantiation that replaces + # itself with the actual metaclass. + class metaclass(type): + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + + return type.__new__(metaclass, "temporary_class", (), {}) + + +try: + from urllib.parse import quote_from_bytes as url_quote +except ImportError: + from urllib import quote as url_quote + + +try: + from collections import abc +except ImportError: + import collections as abc + + +try: + from os import fspath +except ImportError: + try: + from pathlib import PurePath + except ImportError: + PurePath = None + + def fspath(path): + if hasattr(path, "__fspath__"): + return path.__fspath__() + + # Python 3.5 doesn't have __fspath__ yet, use str. + if PurePath is not None and isinstance(path, PurePath): + return str(path) + + return path diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/_identifier.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/_identifier.py new file mode 100644 index 0000000000000000000000000000000000000000..224d5449d138e75f4f3c25d70cb8f1ea54ccd047 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/_identifier.py @@ -0,0 +1,6 @@ +import re + +# generated by scripts/generate_identifier_pattern.py +pattern = re.compile( + r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950 +) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/asyncfilters.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/asyncfilters.py new file mode 100644 index 0000000000000000000000000000000000000000..3d98dbcc00de104a584b0707df8f0d4e8bbe1376 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/asyncfilters.py @@ -0,0 +1,158 @@ +from functools import wraps + +from . import filters +from .asyncsupport import auto_aiter +from .asyncsupport import auto_await + + +async def auto_to_seq(value): + seq = [] + if hasattr(value, "__aiter__"): + async for item in value: + seq.append(item) + else: + for item in value: + seq.append(item) + return seq + + +async def async_select_or_reject(args, kwargs, modfunc, lookup_attr): + seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr) + if seq: + async for item in auto_aiter(seq): + if func(item): + yield item + + +def dualfilter(normal_filter, async_filter): + wrap_evalctx = False + if getattr(normal_filter, "environmentfilter", False) is True: + + def is_async(args): + return args[0].is_async + + wrap_evalctx = False + else: + has_evalctxfilter = getattr(normal_filter, "evalcontextfilter", False) is True + has_ctxfilter = getattr(normal_filter, "contextfilter", False) is True + wrap_evalctx = not has_evalctxfilter and not has_ctxfilter + + def is_async(args): + return args[0].environment.is_async + + @wraps(normal_filter) + def wrapper(*args, **kwargs): + b = is_async(args) + if wrap_evalctx: + args = args[1:] + if b: + return async_filter(*args, **kwargs) + return normal_filter(*args, **kwargs) + + if wrap_evalctx: + wrapper.evalcontextfilter = True + + wrapper.asyncfiltervariant = True + + return wrapper + + +def asyncfiltervariant(original): + def decorator(f): + return dualfilter(original, f) + + return decorator + + +@asyncfiltervariant(filters.do_first) +async def do_first(environment, seq): + try: + return await auto_aiter(seq).__anext__() + except StopAsyncIteration: + return environment.undefined("No first item, sequence was empty.") + + +@asyncfiltervariant(filters.do_groupby) +async def do_groupby(environment, value, attribute): + expr = filters.make_attrgetter(environment, attribute) + return [ + filters._GroupTuple(key, await auto_to_seq(values)) + for key, values in filters.groupby( + sorted(await auto_to_seq(value), key=expr), expr + ) + ] + + +@asyncfiltervariant(filters.do_join) +async def do_join(eval_ctx, value, d=u"", attribute=None): + return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute) + + +@asyncfiltervariant(filters.do_list) +async def do_list(value): + return await auto_to_seq(value) + + +@asyncfiltervariant(filters.do_reject) +async def do_reject(*args, **kwargs): + return async_select_or_reject(args, kwargs, lambda x: not x, False) + + +@asyncfiltervariant(filters.do_rejectattr) +async def do_rejectattr(*args, **kwargs): + return async_select_or_reject(args, kwargs, lambda x: not x, True) + + +@asyncfiltervariant(filters.do_select) +async def do_select(*args, **kwargs): + return async_select_or_reject(args, kwargs, lambda x: x, False) + + +@asyncfiltervariant(filters.do_selectattr) +async def do_selectattr(*args, **kwargs): + return async_select_or_reject(args, kwargs, lambda x: x, True) + + +@asyncfiltervariant(filters.do_map) +async def do_map(*args, **kwargs): + seq, func = filters.prepare_map(args, kwargs) + if seq: + async for item in auto_aiter(seq): + yield await auto_await(func(item)) + + +@asyncfiltervariant(filters.do_sum) +async def do_sum(environment, iterable, attribute=None, start=0): + rv = start + if attribute is not None: + func = filters.make_attrgetter(environment, attribute) + else: + + def func(x): + return x + + async for item in auto_aiter(iterable): + rv += func(item) + return rv + + +@asyncfiltervariant(filters.do_slice) +async def do_slice(value, slices, fill_with=None): + return filters.do_slice(await auto_to_seq(value), slices, fill_with) + + +ASYNC_FILTERS = { + "first": do_first, + "groupby": do_groupby, + "join": do_join, + "list": do_list, + # we intentionally do not support do_last because that would be + # ridiculous + "reject": do_reject, + "rejectattr": do_rejectattr, + "map": do_map, + "select": do_select, + "selectattr": do_selectattr, + "sum": do_sum, + "slice": do_slice, +} diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/asyncsupport.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/asyncsupport.py new file mode 100644 index 0000000000000000000000000000000000000000..78ba3739d8dee1e644f96e32f833279d941c3f65 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/asyncsupport.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +"""The code for async support. Importing this patches Jinja on supported +Python versions. +""" +import asyncio +import inspect +from functools import update_wrapper + +from markupsafe import Markup + +from .environment import TemplateModule +from .runtime import LoopContext +from .utils import concat +from .utils import internalcode +from .utils import missing + + +async def concat_async(async_gen): + rv = [] + + async def collect(): + async for event in async_gen: + rv.append(event) + + await collect() + return concat(rv) + + +async def generate_async(self, *args, **kwargs): + vars = dict(*args, **kwargs) + try: + async for event in self.root_render_func(self.new_context(vars)): + yield event + except Exception: + yield self.environment.handle_exception() + + +def wrap_generate_func(original_generate): + def _convert_generator(self, loop, args, kwargs): + async_gen = self.generate_async(*args, **kwargs) + try: + while 1: + yield loop.run_until_complete(async_gen.__anext__()) + except StopAsyncIteration: + pass + + def generate(self, *args, **kwargs): + if not self.environment.is_async: + return original_generate(self, *args, **kwargs) + return _convert_generator(self, asyncio.get_event_loop(), args, kwargs) + + return update_wrapper(generate, original_generate) + + +async def render_async(self, *args, **kwargs): + if not self.environment.is_async: + raise RuntimeError("The environment was not created with async mode enabled.") + + vars = dict(*args, **kwargs) + ctx = self.new_context(vars) + + try: + return await concat_async(self.root_render_func(ctx)) + except Exception: + return self.environment.handle_exception() + + +def wrap_render_func(original_render): + def render(self, *args, **kwargs): + if not self.environment.is_async: + return original_render(self, *args, **kwargs) + loop = asyncio.get_event_loop() + return loop.run_until_complete(self.render_async(*args, **kwargs)) + + return update_wrapper(render, original_render) + + +def wrap_block_reference_call(original_call): + @internalcode + async def async_call(self): + rv = await concat_async(self._stack[self._depth](self._context)) + if self._context.eval_ctx.autoescape: + rv = Markup(rv) + return rv + + @internalcode + def __call__(self): + if not self._context.environment.is_async: + return original_call(self) + return async_call(self) + + return update_wrapper(__call__, original_call) + + +def wrap_macro_invoke(original_invoke): + @internalcode + async def async_invoke(self, arguments, autoescape): + rv = await self._func(*arguments) + if autoescape: + rv = Markup(rv) + return rv + + @internalcode + def _invoke(self, arguments, autoescape): + if not self._environment.is_async: + return original_invoke(self, arguments, autoescape) + return async_invoke(self, arguments, autoescape) + + return update_wrapper(_invoke, original_invoke) + + +@internalcode +async def get_default_module_async(self): + if self._module is not None: + return self._module + self._module = rv = await self.make_module_async() + return rv + + +def wrap_default_module(original_default_module): + @internalcode + def _get_default_module(self): + if self.environment.is_async: + raise RuntimeError("Template module attribute is unavailable in async mode") + return original_default_module(self) + + return _get_default_module + + +async def make_module_async(self, vars=None, shared=False, locals=None): + context = self.new_context(vars, shared, locals) + body_stream = [] + async for item in self.root_render_func(context): + body_stream.append(item) + return TemplateModule(self, context, body_stream) + + +def patch_template(): + from . import Template + + Template.generate = wrap_generate_func(Template.generate) + Template.generate_async = update_wrapper(generate_async, Template.generate_async) + Template.render_async = update_wrapper(render_async, Template.render_async) + Template.render = wrap_render_func(Template.render) + Template._get_default_module = wrap_default_module(Template._get_default_module) + Template._get_default_module_async = get_default_module_async + Template.make_module_async = update_wrapper( + make_module_async, Template.make_module_async + ) + + +def patch_runtime(): + from .runtime import BlockReference, Macro + + BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__) + Macro._invoke = wrap_macro_invoke(Macro._invoke) + + +def patch_filters(): + from .filters import FILTERS + from .asyncfilters import ASYNC_FILTERS + + FILTERS.update(ASYNC_FILTERS) + + +def patch_all(): + patch_template() + patch_runtime() + patch_filters() + + +async def auto_await(value): + if inspect.isawaitable(value): + return await value + return value + + +async def auto_aiter(iterable): + if hasattr(iterable, "__aiter__"): + async for item in iterable: + yield item + return + for item in iterable: + yield item + + +class AsyncLoopContext(LoopContext): + _to_iterator = staticmethod(auto_aiter) + + @property + async def length(self): + if self._length is not None: + return self._length + + try: + self._length = len(self._iterable) + except TypeError: + iterable = [x async for x in self._iterator] + self._iterator = self._to_iterator(iterable) + self._length = len(iterable) + self.index + (self._after is not missing) + + return self._length + + @property + async def revindex0(self): + return await self.length - self.index + + @property + async def revindex(self): + return await self.length - self.index0 + + async def _peek_next(self): + if self._after is not missing: + return self._after + + try: + self._after = await self._iterator.__anext__() + except StopAsyncIteration: + self._after = missing + + return self._after + + @property + async def last(self): + return await self._peek_next() is missing + + @property + async def nextitem(self): + rv = await self._peek_next() + + if rv is missing: + return self._undefined("there is no next item") + + return rv + + def __aiter__(self): + return self + + async def __anext__(self): + if self._after is not missing: + rv = self._after + self._after = missing + else: + rv = await self._iterator.__anext__() + + self.index0 += 1 + self._before = self._current + self._current = rv + return rv, self + + +async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0): + import warnings + + warnings.warn( + "This template must be recompiled with at least Jinja 2.11, or" + " it will fail in 3.0.", + DeprecationWarning, + stacklevel=2, + ) + return AsyncLoopContext(iterable, undefined, recurse, depth0) + + +patch_all() diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/bccache.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/bccache.py new file mode 100644 index 0000000000000000000000000000000000000000..9c0661030f7af4b2b186b1726341c910194b7cbd --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/bccache.py @@ -0,0 +1,350 @@ +# -*- coding: utf-8 -*- +"""The optional bytecode cache system. This is useful if you have very +complex template situations and the compilation of all those templates +slows down your application too much. + +Situations where this is useful are often forking web applications that +are initialized on the first request. +""" +import errno +import fnmatch +import os +import stat +import sys +import tempfile +from hashlib import sha1 +from os import listdir +from os import path + +from ._compat import BytesIO +from ._compat import marshal_dump +from ._compat import marshal_load +from ._compat import pickle +from ._compat import text_type +from .utils import open_if_exists + +bc_version = 4 +# Magic bytes to identify Jinja bytecode cache files. Contains the +# Python major and minor version to avoid loading incompatible bytecode +# if a project upgrades its Python version. +bc_magic = ( + b"j2" + + pickle.dumps(bc_version, 2) + + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2) +) + + +class Bucket(object): + """Buckets are used to store the bytecode for one template. It's created + and initialized by the bytecode cache and passed to the loading functions. + + The buckets get an internal checksum from the cache assigned and use this + to automatically reject outdated cache material. Individual bytecode + cache subclasses don't have to care about cache invalidation. + """ + + def __init__(self, environment, key, checksum): + self.environment = environment + self.key = key + self.checksum = checksum + self.reset() + + def reset(self): + """Resets the bucket (unloads the bytecode).""" + self.code = None + + def load_bytecode(self, f): + """Loads bytecode from a file or file like object.""" + # make sure the magic header is correct + magic = f.read(len(bc_magic)) + if magic != bc_magic: + self.reset() + return + # the source code of the file changed, we need to reload + checksum = pickle.load(f) + if self.checksum != checksum: + self.reset() + return + # if marshal_load fails then we need to reload + try: + self.code = marshal_load(f) + except (EOFError, ValueError, TypeError): + self.reset() + return + + def write_bytecode(self, f): + """Dump the bytecode into the file or file like object passed.""" + if self.code is None: + raise TypeError("can't write empty bucket") + f.write(bc_magic) + pickle.dump(self.checksum, f, 2) + marshal_dump(self.code, f) + + def bytecode_from_string(self, string): + """Load bytecode from a string.""" + self.load_bytecode(BytesIO(string)) + + def bytecode_to_string(self): + """Return the bytecode as string.""" + out = BytesIO() + self.write_bytecode(out) + return out.getvalue() + + +class BytecodeCache(object): + """To implement your own bytecode cache you have to subclass this class + and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of + these methods are passed a :class:`~jinja2.bccache.Bucket`. + + A very basic bytecode cache that saves the bytecode on the file system:: + + from os import path + + class MyCache(BytecodeCache): + + def __init__(self, directory): + self.directory = directory + + def load_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + if path.exists(filename): + with open(filename, 'rb') as f: + bucket.load_bytecode(f) + + def dump_bytecode(self, bucket): + filename = path.join(self.directory, bucket.key) + with open(filename, 'wb') as f: + bucket.write_bytecode(f) + + A more advanced version of a filesystem based bytecode cache is part of + Jinja. + """ + + def load_bytecode(self, bucket): + """Subclasses have to override this method to load bytecode into a + bucket. If they are not able to find code in the cache for the + bucket, it must not do anything. + """ + raise NotImplementedError() + + def dump_bytecode(self, bucket): + """Subclasses have to override this method to write the bytecode + from a bucket back to the cache. If it unable to do so it must not + fail silently but raise an exception. + """ + raise NotImplementedError() + + def clear(self): + """Clears the cache. This method is not used by Jinja but should be + implemented to allow applications to clear the bytecode cache used + by a particular environment. + """ + + def get_cache_key(self, name, filename=None): + """Returns the unique hash key for this template name.""" + hash = sha1(name.encode("utf-8")) + if filename is not None: + filename = "|" + filename + if isinstance(filename, text_type): + filename = filename.encode("utf-8") + hash.update(filename) + return hash.hexdigest() + + def get_source_checksum(self, source): + """Returns a checksum for the source.""" + return sha1(source.encode("utf-8")).hexdigest() + + def get_bucket(self, environment, name, filename, source): + """Return a cache bucket for the given template. All arguments are + mandatory but filename may be `None`. + """ + key = self.get_cache_key(name, filename) + checksum = self.get_source_checksum(source) + bucket = Bucket(environment, key, checksum) + self.load_bytecode(bucket) + return bucket + + def set_bucket(self, bucket): + """Put the bucket into the cache.""" + self.dump_bytecode(bucket) + + +class FileSystemBytecodeCache(BytecodeCache): + """A bytecode cache that stores bytecode on the filesystem. It accepts + two arguments: The directory where the cache items are stored and a + pattern string that is used to build the filename. + + If no directory is specified a default cache directory is selected. On + Windows the user's temp directory is used, on UNIX systems a directory + is created for the user in the system temp directory. + + The pattern can be used to have multiple separate caches operate on the + same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` + is replaced with the cache key. + + >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') + + This bytecode cache supports clearing of the cache using the clear method. + """ + + def __init__(self, directory=None, pattern="__jinja2_%s.cache"): + if directory is None: + directory = self._get_default_cache_dir() + self.directory = directory + self.pattern = pattern + + def _get_default_cache_dir(self): + def _unsafe_dir(): + raise RuntimeError( + "Cannot determine safe temp directory. You " + "need to explicitly provide one." + ) + + tmpdir = tempfile.gettempdir() + + # On windows the temporary directory is used specific unless + # explicitly forced otherwise. We can just use that. + if os.name == "nt": + return tmpdir + if not hasattr(os, "getuid"): + _unsafe_dir() + + dirname = "_jinja2-cache-%d" % os.getuid() + actual_dir = os.path.join(tmpdir, dirname) + + try: + os.mkdir(actual_dir, stat.S_IRWXU) + except OSError as e: + if e.errno != errno.EEXIST: + raise + try: + os.chmod(actual_dir, stat.S_IRWXU) + actual_dir_stat = os.lstat(actual_dir) + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): + _unsafe_dir() + except OSError as e: + if e.errno != errno.EEXIST: + raise + + actual_dir_stat = os.lstat(actual_dir) + if ( + actual_dir_stat.st_uid != os.getuid() + or not stat.S_ISDIR(actual_dir_stat.st_mode) + or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU + ): + _unsafe_dir() + + return actual_dir + + def _get_cache_filename(self, bucket): + return path.join(self.directory, self.pattern % bucket.key) + + def load_bytecode(self, bucket): + f = open_if_exists(self._get_cache_filename(bucket), "rb") + if f is not None: + try: + bucket.load_bytecode(f) + finally: + f.close() + + def dump_bytecode(self, bucket): + f = open(self._get_cache_filename(bucket), "wb") + try: + bucket.write_bytecode(f) + finally: + f.close() + + def clear(self): + # imported lazily here because google app-engine doesn't support + # write access on the file system and the function does not exist + # normally. + from os import remove + + files = fnmatch.filter(listdir(self.directory), self.pattern % "*") + for filename in files: + try: + remove(path.join(self.directory, filename)) + except OSError: + pass + + +class MemcachedBytecodeCache(BytecodeCache): + """This class implements a bytecode cache that uses a memcache cache for + storing the information. It does not enforce a specific memcache library + (tummy's memcache or cmemcache) but will accept any class that provides + the minimal interface required. + + Libraries compatible with this class: + + - `cachelib `_ + - `python-memcached `_ + + (Unfortunately the django cache interface is not compatible because it + does not support storing binary data, only unicode. You can however pass + the underlying cache client to the bytecode cache which is available + as `django.core.cache.cache._client`.) + + The minimal interface for the client passed to the constructor is this: + + .. class:: MinimalClientInterface + + .. method:: set(key, value[, timeout]) + + Stores the bytecode in the cache. `value` is a string and + `timeout` the timeout of the key. If timeout is not provided + a default timeout or no timeout should be assumed, if it's + provided it's an integer with the number of seconds the cache + item should exist. + + .. method:: get(key) + + Returns the value for the cache key. If the item does not + exist in the cache the return value must be `None`. + + The other arguments to the constructor are the prefix for all keys that + is added before the actual cache key and the timeout for the bytecode in + the cache system. We recommend a high (or no) timeout. + + This bytecode cache does not support clearing of used items in the cache. + The clear method is a no-operation function. + + .. versionadded:: 2.7 + Added support for ignoring memcache errors through the + `ignore_memcache_errors` parameter. + """ + + def __init__( + self, + client, + prefix="jinja2/bytecode/", + timeout=None, + ignore_memcache_errors=True, + ): + self.client = client + self.prefix = prefix + self.timeout = timeout + self.ignore_memcache_errors = ignore_memcache_errors + + def load_bytecode(self, bucket): + try: + code = self.client.get(self.prefix + bucket.key) + except Exception: + if not self.ignore_memcache_errors: + raise + code = None + if code is not None: + bucket.bytecode_from_string(code) + + def dump_bytecode(self, bucket): + args = (self.prefix + bucket.key, bucket.bytecode_to_string()) + if self.timeout is not None: + args += (self.timeout,) + try: + self.client.set(*args) + except Exception: + if not self.ignore_memcache_errors: + raise diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/compiler.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/compiler.py new file mode 100644 index 0000000000000000000000000000000000000000..63297b42c30f17b0c0ae08547047d070e7a53d3c --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/compiler.py @@ -0,0 +1,1843 @@ +# -*- coding: utf-8 -*- +"""Compiles nodes from the parser into Python code.""" +from collections import namedtuple +from functools import update_wrapper +from itertools import chain +from keyword import iskeyword as is_python_keyword + +from markupsafe import escape +from markupsafe import Markup + +from . import nodes +from ._compat import imap +from ._compat import iteritems +from ._compat import izip +from ._compat import NativeStringIO +from ._compat import range_type +from ._compat import string_types +from ._compat import text_type +from .exceptions import TemplateAssertionError +from .idtracking import Symbols +from .idtracking import VAR_LOAD_ALIAS +from .idtracking import VAR_LOAD_PARAMETER +from .idtracking import VAR_LOAD_RESOLVE +from .idtracking import VAR_LOAD_UNDEFINED +from .nodes import EvalContext +from .optimizer import Optimizer +from .utils import concat +from .visitor import NodeVisitor + +operators = { + "eq": "==", + "ne": "!=", + "gt": ">", + "gteq": ">=", + "lt": "<", + "lteq": "<=", + "in": "in", + "notin": "not in", +} + +# what method to iterate over items do we want to use for dict iteration +# in generated code? on 2.x let's go with iteritems, on 3.x with items +if hasattr(dict, "iteritems"): + dict_item_iter = "iteritems" +else: + dict_item_iter = "items" + +code_features = ["division"] + +# does this python version support generator stops? (PEP 0479) +try: + exec("from __future__ import generator_stop") + code_features.append("generator_stop") +except SyntaxError: + pass + +# does this python version support yield from? +try: + exec("def f(): yield from x()") +except SyntaxError: + supports_yield_from = False +else: + supports_yield_from = True + + +def optimizeconst(f): + def new_func(self, node, frame, **kwargs): + # Only optimize if the frame is not volatile + if self.optimized and not frame.eval_ctx.volatile: + new_node = self.optimizer.visit(node, frame.eval_ctx) + if new_node != node: + return self.visit(new_node, frame) + return f(self, node, frame, **kwargs) + + return update_wrapper(new_func, f) + + +def generate( + node, environment, name, filename, stream=None, defer_init=False, optimized=True +): + """Generate the python source for a node tree.""" + if not isinstance(node, nodes.Template): + raise TypeError("Can't compile non template nodes") + generator = environment.code_generator_class( + environment, name, filename, stream, defer_init, optimized + ) + generator.visit(node) + if stream is None: + return generator.stream.getvalue() + + +def has_safe_repr(value): + """Does the node have a safe representation?""" + if value is None or value is NotImplemented or value is Ellipsis: + return True + if type(value) in (bool, int, float, complex, range_type, Markup) + string_types: + return True + if type(value) in (tuple, list, set, frozenset): + for item in value: + if not has_safe_repr(item): + return False + return True + elif type(value) is dict: + for key, value in iteritems(value): + if not has_safe_repr(key): + return False + if not has_safe_repr(value): + return False + return True + return False + + +def find_undeclared(nodes, names): + """Check if the names passed are accessed undeclared. The return value + is a set of all the undeclared names from the sequence of names found. + """ + visitor = UndeclaredNameVisitor(names) + try: + for node in nodes: + visitor.visit(node) + except VisitorExit: + pass + return visitor.undeclared + + +class MacroRef(object): + def __init__(self, node): + self.node = node + self.accesses_caller = False + self.accesses_kwargs = False + self.accesses_varargs = False + + +class Frame(object): + """Holds compile time information for us.""" + + def __init__(self, eval_ctx, parent=None, level=None): + self.eval_ctx = eval_ctx + self.symbols = Symbols(parent and parent.symbols or None, level=level) + + # a toplevel frame is the root + soft frames such as if conditions. + self.toplevel = False + + # the root frame is basically just the outermost frame, so no if + # conditions. This information is used to optimize inheritance + # situations. + self.rootlevel = False + + # in some dynamic inheritance situations the compiler needs to add + # write tests around output statements. + self.require_output_check = parent and parent.require_output_check + + # inside some tags we are using a buffer rather than yield statements. + # this for example affects {% filter %} or {% macro %}. If a frame + # is buffered this variable points to the name of the list used as + # buffer. + self.buffer = None + + # the name of the block we're in, otherwise None. + self.block = parent and parent.block or None + + # the parent of this frame + self.parent = parent + + if parent is not None: + self.buffer = parent.buffer + + def copy(self): + """Create a copy of the current one.""" + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.symbols = self.symbols.copy() + return rv + + def inner(self, isolated=False): + """Return an inner frame.""" + if isolated: + return Frame(self.eval_ctx, level=self.symbols.level + 1) + return Frame(self.eval_ctx, self) + + def soft(self): + """Return a soft frame. A soft frame may not be modified as + standalone thing as it shares the resources with the frame it + was created of, but it's not a rootlevel frame any longer. + + This is only used to implement if-statements. + """ + rv = self.copy() + rv.rootlevel = False + return rv + + __copy__ = copy + + +class VisitorExit(RuntimeError): + """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" + + +class DependencyFinderVisitor(NodeVisitor): + """A visitor that collects filter and test calls.""" + + def __init__(self): + self.filters = set() + self.tests = set() + + def visit_Filter(self, node): + self.generic_visit(node) + self.filters.add(node.name) + + def visit_Test(self, node): + self.generic_visit(node) + self.tests.add(node.name) + + def visit_Block(self, node): + """Stop visiting at blocks.""" + + +class UndeclaredNameVisitor(NodeVisitor): + """A visitor that checks if a name is accessed without being + declared. This is different from the frame visitor as it will + not stop at closure frames. + """ + + def __init__(self, names): + self.names = set(names) + self.undeclared = set() + + def visit_Name(self, node): + if node.ctx == "load" and node.name in self.names: + self.undeclared.add(node.name) + if self.undeclared == self.names: + raise VisitorExit() + else: + self.names.discard(node.name) + + def visit_Block(self, node): + """Stop visiting a blocks.""" + + +class CompilerExit(Exception): + """Raised if the compiler encountered a situation where it just + doesn't make sense to further process the code. Any block that + raises such an exception is not further processed. + """ + + +class CodeGenerator(NodeVisitor): + def __init__( + self, environment, name, filename, stream=None, defer_init=False, optimized=True + ): + if stream is None: + stream = NativeStringIO() + self.environment = environment + self.name = name + self.filename = filename + self.stream = stream + self.created_block_context = False + self.defer_init = defer_init + self.optimized = optimized + if optimized: + self.optimizer = Optimizer(environment) + + # aliases for imports + self.import_aliases = {} + + # a registry for all blocks. Because blocks are moved out + # into the global python scope they are registered here + self.blocks = {} + + # the number of extends statements so far + self.extends_so_far = 0 + + # some templates have a rootlevel extends. In this case we + # can safely assume that we're a child template and do some + # more optimizations. + self.has_known_extends = False + + # the current line number + self.code_lineno = 1 + + # registry of all filters and tests (global, not block local) + self.tests = {} + self.filters = {} + + # the debug information + self.debug_info = [] + self._write_debug_info = None + + # the number of new lines before the next write() + self._new_lines = 0 + + # the line number of the last written statement + self._last_line = 0 + + # true if nothing was written so far. + self._first_write = True + + # used by the `temporary_identifier` method to get new + # unique, temporary identifier + self._last_identifier = 0 + + # the current indentation + self._indentation = 0 + + # Tracks toplevel assignments + self._assign_stack = [] + + # Tracks parameter definition blocks + self._param_def_block = [] + + # Tracks the current context. + self._context_reference_stack = ["context"] + + # -- Various compilation helpers + + def fail(self, msg, lineno): + """Fail with a :exc:`TemplateAssertionError`.""" + raise TemplateAssertionError(msg, lineno, self.name, self.filename) + + def temporary_identifier(self): + """Get a new unique identifier.""" + self._last_identifier += 1 + return "t_%d" % self._last_identifier + + def buffer(self, frame): + """Enable buffering for the frame from that point onwards.""" + frame.buffer = self.temporary_identifier() + self.writeline("%s = []" % frame.buffer) + + def return_buffer_contents(self, frame, force_unescaped=False): + """Return the buffer contents of the frame.""" + if not force_unescaped: + if frame.eval_ctx.volatile: + self.writeline("if context.eval_ctx.autoescape:") + self.indent() + self.writeline("return Markup(concat(%s))" % frame.buffer) + self.outdent() + self.writeline("else:") + self.indent() + self.writeline("return concat(%s)" % frame.buffer) + self.outdent() + return + elif frame.eval_ctx.autoescape: + self.writeline("return Markup(concat(%s))" % frame.buffer) + return + self.writeline("return concat(%s)" % frame.buffer) + + def indent(self): + """Indent by one.""" + self._indentation += 1 + + def outdent(self, step=1): + """Outdent by step.""" + self._indentation -= step + + def start_write(self, frame, node=None): + """Yield or write into the frame buffer.""" + if frame.buffer is None: + self.writeline("yield ", node) + else: + self.writeline("%s.append(" % frame.buffer, node) + + def end_write(self, frame): + """End the writing process started by `start_write`.""" + if frame.buffer is not None: + self.write(")") + + def simple_write(self, s, frame, node=None): + """Simple shortcut for start_write + write + end_write.""" + self.start_write(frame, node) + self.write(s) + self.end_write(frame) + + def blockvisit(self, nodes, frame): + """Visit a list of nodes as block in a frame. If the current frame + is no buffer a dummy ``if 0: yield None`` is written automatically. + """ + try: + self.writeline("pass") + for node in nodes: + self.visit(node, frame) + except CompilerExit: + pass + + def write(self, x): + """Write a string into the output stream.""" + if self._new_lines: + if not self._first_write: + self.stream.write("\n" * self._new_lines) + self.code_lineno += self._new_lines + if self._write_debug_info is not None: + self.debug_info.append((self._write_debug_info, self.code_lineno)) + self._write_debug_info = None + self._first_write = False + self.stream.write(" " * self._indentation) + self._new_lines = 0 + self.stream.write(x) + + def writeline(self, x, node=None, extra=0): + """Combination of newline and write.""" + self.newline(node, extra) + self.write(x) + + def newline(self, node=None, extra=0): + """Add one or more newlines before the next write.""" + self._new_lines = max(self._new_lines, 1 + extra) + if node is not None and node.lineno != self._last_line: + self._write_debug_info = node.lineno + self._last_line = node.lineno + + def signature(self, node, frame, extra_kwargs=None): + """Writes a function call to the stream for the current node. + A leading comma is added automatically. The extra keyword + arguments may not include python keywords otherwise a syntax + error could occur. The extra keyword arguments should be given + as python dict. + """ + # if any of the given keyword arguments is a python keyword + # we have to make sure that no invalid call is created. + kwarg_workaround = False + for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): + if is_python_keyword(kwarg): + kwarg_workaround = True + break + + for arg in node.args: + self.write(", ") + self.visit(arg, frame) + + if not kwarg_workaround: + for kwarg in node.kwargs: + self.write(", ") + self.visit(kwarg, frame) + if extra_kwargs is not None: + for key, value in iteritems(extra_kwargs): + self.write(", %s=%s" % (key, value)) + if node.dyn_args: + self.write(", *") + self.visit(node.dyn_args, frame) + + if kwarg_workaround: + if node.dyn_kwargs is not None: + self.write(", **dict({") + else: + self.write(", **{") + for kwarg in node.kwargs: + self.write("%r: " % kwarg.key) + self.visit(kwarg.value, frame) + self.write(", ") + if extra_kwargs is not None: + for key, value in iteritems(extra_kwargs): + self.write("%r: %s, " % (key, value)) + if node.dyn_kwargs is not None: + self.write("}, **") + self.visit(node.dyn_kwargs, frame) + self.write(")") + else: + self.write("}") + + elif node.dyn_kwargs is not None: + self.write(", **") + self.visit(node.dyn_kwargs, frame) + + def pull_dependencies(self, nodes): + """Pull all the dependencies.""" + visitor = DependencyFinderVisitor() + for node in nodes: + visitor.visit(node) + for dependency in "filters", "tests": + mapping = getattr(self, dependency) + for name in getattr(visitor, dependency): + if name not in mapping: + mapping[name] = self.temporary_identifier() + self.writeline( + "%s = environment.%s[%r]" % (mapping[name], dependency, name) + ) + + def enter_frame(self, frame): + undefs = [] + for target, (action, param) in iteritems(frame.symbols.loads): + if action == VAR_LOAD_PARAMETER: + pass + elif action == VAR_LOAD_RESOLVE: + self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param)) + elif action == VAR_LOAD_ALIAS: + self.writeline("%s = %s" % (target, param)) + elif action == VAR_LOAD_UNDEFINED: + undefs.append(target) + else: + raise NotImplementedError("unknown load instruction") + if undefs: + self.writeline("%s = missing" % " = ".join(undefs)) + + def leave_frame(self, frame, with_python_scope=False): + if not with_python_scope: + undefs = [] + for target, _ in iteritems(frame.symbols.loads): + undefs.append(target) + if undefs: + self.writeline("%s = missing" % " = ".join(undefs)) + + def func(self, name): + if self.environment.is_async: + return "async def %s" % name + return "def %s" % name + + def macro_body(self, node, frame): + """Dump the function def of a macro or call block.""" + frame = frame.inner() + frame.symbols.analyze_node(node) + macro_ref = MacroRef(node) + + explicit_caller = None + skip_special_params = set() + args = [] + for idx, arg in enumerate(node.args): + if arg.name == "caller": + explicit_caller = idx + if arg.name in ("kwargs", "varargs"): + skip_special_params.add(arg.name) + args.append(frame.symbols.ref(arg.name)) + + undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs")) + + if "caller" in undeclared: + # In older Jinja versions there was a bug that allowed caller + # to retain the special behavior even if it was mentioned in + # the argument list. However thankfully this was only really + # working if it was the last argument. So we are explicitly + # checking this now and error out if it is anywhere else in + # the argument list. + if explicit_caller is not None: + try: + node.defaults[explicit_caller - len(node.args)] + except IndexError: + self.fail( + "When defining macros or call blocks the " + 'special "caller" argument must be omitted ' + "or be given a default.", + node.lineno, + ) + else: + args.append(frame.symbols.declare_parameter("caller")) + macro_ref.accesses_caller = True + if "kwargs" in undeclared and "kwargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("kwargs")) + macro_ref.accesses_kwargs = True + if "varargs" in undeclared and "varargs" not in skip_special_params: + args.append(frame.symbols.declare_parameter("varargs")) + macro_ref.accesses_varargs = True + + # macros are delayed, they never require output checks + frame.require_output_check = False + frame.symbols.analyze_node(node) + self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node) + self.indent() + + self.buffer(frame) + self.enter_frame(frame) + + self.push_parameter_definitions(frame) + for idx, arg in enumerate(node.args): + ref = frame.symbols.ref(arg.name) + self.writeline("if %s is missing:" % ref) + self.indent() + try: + default = node.defaults[idx - len(node.args)] + except IndexError: + self.writeline( + "%s = undefined(%r, name=%r)" + % (ref, "parameter %r was not provided" % arg.name, arg.name) + ) + else: + self.writeline("%s = " % ref) + self.visit(default, frame) + self.mark_parameter_stored(ref) + self.outdent() + self.pop_parameter_definitions() + + self.blockvisit(node.body, frame) + self.return_buffer_contents(frame, force_unescaped=True) + self.leave_frame(frame, with_python_scope=True) + self.outdent() + + return frame, macro_ref + + def macro_def(self, macro_ref, frame): + """Dump the macro definition for the def created by macro_body.""" + arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args) + name = getattr(macro_ref.node, "name", None) + if len(macro_ref.node.args) == 1: + arg_tuple += "," + self.write( + "Macro(environment, macro, %r, (%s), %r, %r, %r, " + "context.eval_ctx.autoescape)" + % ( + name, + arg_tuple, + macro_ref.accesses_kwargs, + macro_ref.accesses_varargs, + macro_ref.accesses_caller, + ) + ) + + def position(self, node): + """Return a human readable position for the node.""" + rv = "line %d" % node.lineno + if self.name is not None: + rv += " in " + repr(self.name) + return rv + + def dump_local_context(self, frame): + return "{%s}" % ", ".join( + "%r: %s" % (name, target) + for name, target in iteritems(frame.symbols.dump_stores()) + ) + + def write_commons(self): + """Writes a common preamble that is used by root and block functions. + Primarily this sets up common local helpers and enforces a generator + through a dead branch. + """ + self.writeline("resolve = context.resolve_or_missing") + self.writeline("undefined = environment.undefined") + # always use the standard Undefined class for the implicit else of + # conditional expressions + self.writeline("cond_expr_undefined = Undefined") + self.writeline("if 0: yield None") + + def push_parameter_definitions(self, frame): + """Pushes all parameter targets from the given frame into a local + stack that permits tracking of yet to be assigned parameters. In + particular this enables the optimization from `visit_Name` to skip + undefined expressions for parameters in macros as macros can reference + otherwise unbound parameters. + """ + self._param_def_block.append(frame.symbols.dump_param_targets()) + + def pop_parameter_definitions(self): + """Pops the current parameter definitions set.""" + self._param_def_block.pop() + + def mark_parameter_stored(self, target): + """Marks a parameter in the current parameter definitions as stored. + This will skip the enforced undefined checks. + """ + if self._param_def_block: + self._param_def_block[-1].discard(target) + + def push_context_reference(self, target): + self._context_reference_stack.append(target) + + def pop_context_reference(self): + self._context_reference_stack.pop() + + def get_context_ref(self): + return self._context_reference_stack[-1] + + def get_resolve_func(self): + target = self._context_reference_stack[-1] + if target == "context": + return "resolve" + return "%s.resolve" % target + + def derive_context(self, frame): + return "%s.derived(%s)" % ( + self.get_context_ref(), + self.dump_local_context(frame), + ) + + def parameter_is_undeclared(self, target): + """Checks if a given target is an undeclared parameter.""" + if not self._param_def_block: + return False + return target in self._param_def_block[-1] + + def push_assign_tracking(self): + """Pushes a new layer for assignment tracking.""" + self._assign_stack.append(set()) + + def pop_assign_tracking(self, frame): + """Pops the topmost level for assignment tracking and updates the + context variables if necessary. + """ + vars = self._assign_stack.pop() + if not frame.toplevel or not vars: + return + public_names = [x for x in vars if x[:1] != "_"] + if len(vars) == 1: + name = next(iter(vars)) + ref = frame.symbols.ref(name) + self.writeline("context.vars[%r] = %s" % (name, ref)) + else: + self.writeline("context.vars.update({") + for idx, name in enumerate(vars): + if idx: + self.write(", ") + ref = frame.symbols.ref(name) + self.write("%r: %s" % (name, ref)) + self.write("})") + if public_names: + if len(public_names) == 1: + self.writeline("context.exported_vars.add(%r)" % public_names[0]) + else: + self.writeline( + "context.exported_vars.update((%s))" + % ", ".join(imap(repr, public_names)) + ) + + # -- Statement Visitors + + def visit_Template(self, node, frame=None): + assert frame is None, "no root frame allowed" + eval_ctx = EvalContext(self.environment, self.name) + + from .runtime import exported + + self.writeline("from __future__ import %s" % ", ".join(code_features)) + self.writeline("from jinja2.runtime import " + ", ".join(exported)) + + if self.environment.is_async: + self.writeline( + "from jinja2.asyncsupport import auto_await, " + "auto_aiter, AsyncLoopContext" + ) + + # if we want a deferred initialization we cannot move the + # environment into a local name + envenv = not self.defer_init and ", environment=environment" or "" + + # do we have an extends tag at all? If not, we can save some + # overhead by just not processing any inheritance code. + have_extends = node.find(nodes.Extends) is not None + + # find all blocks + for block in node.find_all(nodes.Block): + if block.name in self.blocks: + self.fail("block %r defined twice" % block.name, block.lineno) + self.blocks[block.name] = block + + # find all imports and import them + for import_ in node.find_all(nodes.ImportedName): + if import_.importname not in self.import_aliases: + imp = import_.importname + self.import_aliases[imp] = alias = self.temporary_identifier() + if "." in imp: + module, obj = imp.rsplit(".", 1) + self.writeline("from %s import %s as %s" % (module, obj, alias)) + else: + self.writeline("import %s as %s" % (imp, alias)) + + # add the load name + self.writeline("name = %r" % self.name) + + # generate the root render function. + self.writeline( + "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1 + ) + self.indent() + self.write_commons() + + # process the root + frame = Frame(eval_ctx) + if "self" in find_undeclared(node.body, ("self",)): + ref = frame.symbols.declare_parameter("self") + self.writeline("%s = TemplateReference(context)" % ref) + frame.symbols.analyze_node(node) + frame.toplevel = frame.rootlevel = True + frame.require_output_check = have_extends and not self.has_known_extends + if have_extends: + self.writeline("parent_template = None") + self.enter_frame(frame) + self.pull_dependencies(node.body) + self.blockvisit(node.body, frame) + self.leave_frame(frame, with_python_scope=True) + self.outdent() + + # make sure that the parent root is called. + if have_extends: + if not self.has_known_extends: + self.indent() + self.writeline("if parent_template is not None:") + self.indent() + if supports_yield_from and not self.environment.is_async: + self.writeline("yield from parent_template.root_render_func(context)") + else: + self.writeline( + "%sfor event in parent_template." + "root_render_func(context):" + % (self.environment.is_async and "async " or "") + ) + self.indent() + self.writeline("yield event") + self.outdent() + self.outdent(1 + (not self.has_known_extends)) + + # at this point we now have the blocks collected and can visit them too. + for name, block in iteritems(self.blocks): + self.writeline( + "%s(context, missing=missing%s):" + % (self.func("block_" + name), envenv), + block, + 1, + ) + self.indent() + self.write_commons() + # It's important that we do not make this frame a child of the + # toplevel template. This would cause a variety of + # interesting issues with identifier tracking. + block_frame = Frame(eval_ctx) + undeclared = find_undeclared(block.body, ("self", "super")) + if "self" in undeclared: + ref = block_frame.symbols.declare_parameter("self") + self.writeline("%s = TemplateReference(context)" % ref) + if "super" in undeclared: + ref = block_frame.symbols.declare_parameter("super") + self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name)) + block_frame.symbols.analyze_node(block) + block_frame.block = name + self.enter_frame(block_frame) + self.pull_dependencies(block.body) + self.blockvisit(block.body, block_frame) + self.leave_frame(block_frame, with_python_scope=True) + self.outdent() + + self.writeline( + "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks), + extra=1, + ) + + # add a function that returns the debug info + self.writeline( + "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info) + ) + + def visit_Block(self, node, frame): + """Call a block and register it for the template.""" + level = 0 + if frame.toplevel: + # if we know that we are a child template, there is no need to + # check if we are one + if self.has_known_extends: + return + if self.extends_so_far > 0: + self.writeline("if parent_template is None:") + self.indent() + level += 1 + + if node.scoped: + context = self.derive_context(frame) + else: + context = self.get_context_ref() + + if ( + supports_yield_from + and not self.environment.is_async + and frame.buffer is None + ): + self.writeline( + "yield from context.blocks[%r][0](%s)" % (node.name, context), node + ) + else: + loop = self.environment.is_async and "async for" or "for" + self.writeline( + "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context), + node, + ) + self.indent() + self.simple_write("event", frame) + self.outdent() + + self.outdent(level) + + def visit_Extends(self, node, frame): + """Calls the extender.""" + if not frame.toplevel: + self.fail("cannot use extend from a non top-level scope", node.lineno) + + # if the number of extends statements in general is zero so + # far, we don't have to add a check if something extended + # the template before this one. + if self.extends_so_far > 0: + + # if we have a known extends we just add a template runtime + # error into the generated code. We could catch that at compile + # time too, but i welcome it not to confuse users by throwing the + # same error at different times just "because we can". + if not self.has_known_extends: + self.writeline("if parent_template is not None:") + self.indent() + self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times") + + # if we have a known extends already we don't need that code here + # as we know that the template execution will end here. + if self.has_known_extends: + raise CompilerExit() + else: + self.outdent() + + self.writeline("parent_template = environment.get_template(", node) + self.visit(node.template, frame) + self.write(", %r)" % self.name) + self.writeline( + "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter + ) + self.indent() + self.writeline("context.blocks.setdefault(name, []).append(parent_block)") + self.outdent() + + # if this extends statement was in the root level we can take + # advantage of that information and simplify the generated code + # in the top level from this point onwards + if frame.rootlevel: + self.has_known_extends = True + + # and now we have one more + self.extends_so_far += 1 + + def visit_Include(self, node, frame): + """Handles includes.""" + if node.ignore_missing: + self.writeline("try:") + self.indent() + + func_name = "get_or_select_template" + if isinstance(node.template, nodes.Const): + if isinstance(node.template.value, string_types): + func_name = "get_template" + elif isinstance(node.template.value, (tuple, list)): + func_name = "select_template" + elif isinstance(node.template, (nodes.Tuple, nodes.List)): + func_name = "select_template" + + self.writeline("template = environment.%s(" % func_name, node) + self.visit(node.template, frame) + self.write(", %r)" % self.name) + if node.ignore_missing: + self.outdent() + self.writeline("except TemplateNotFound:") + self.indent() + self.writeline("pass") + self.outdent() + self.writeline("else:") + self.indent() + + skip_event_yield = False + if node.with_context: + loop = self.environment.is_async and "async for" or "for" + self.writeline( + "%s event in template.root_render_func(" + "template.new_context(context.get_all(), True, " + "%s)):" % (loop, self.dump_local_context(frame)) + ) + elif self.environment.is_async: + self.writeline( + "for event in (await " + "template._get_default_module_async())" + "._body_stream:" + ) + else: + if supports_yield_from: + self.writeline("yield from template._get_default_module()._body_stream") + skip_event_yield = True + else: + self.writeline( + "for event in template._get_default_module()._body_stream:" + ) + + if not skip_event_yield: + self.indent() + self.simple_write("event", frame) + self.outdent() + + if node.ignore_missing: + self.outdent() + + def visit_Import(self, node, frame): + """Visit regular imports.""" + self.writeline("%s = " % frame.symbols.ref(node.target), node) + if frame.toplevel: + self.write("context.vars[%r] = " % node.target) + if self.environment.is_async: + self.write("await ") + self.write("environment.get_template(") + self.visit(node.template, frame) + self.write(", %r)." % self.name) + if node.with_context: + self.write( + "make_module%s(context.get_all(), True, %s)" + % ( + self.environment.is_async and "_async" or "", + self.dump_local_context(frame), + ) + ) + elif self.environment.is_async: + self.write("_get_default_module_async()") + else: + self.write("_get_default_module()") + if frame.toplevel and not node.target.startswith("_"): + self.writeline("context.exported_vars.discard(%r)" % node.target) + + def visit_FromImport(self, node, frame): + """Visit named imports.""" + self.newline(node) + self.write( + "included_template = %senvironment.get_template(" + % (self.environment.is_async and "await " or "") + ) + self.visit(node.template, frame) + self.write(", %r)." % self.name) + if node.with_context: + self.write( + "make_module%s(context.get_all(), True, %s)" + % ( + self.environment.is_async and "_async" or "", + self.dump_local_context(frame), + ) + ) + elif self.environment.is_async: + self.write("_get_default_module_async()") + else: + self.write("_get_default_module()") + + var_names = [] + discarded_names = [] + for name in node.names: + if isinstance(name, tuple): + name, alias = name + else: + alias = name + self.writeline( + "%s = getattr(included_template, " + "%r, missing)" % (frame.symbols.ref(alias), name) + ) + self.writeline("if %s is missing:" % frame.symbols.ref(alias)) + self.indent() + self.writeline( + "%s = undefined(%r %% " + "included_template.__name__, " + "name=%r)" + % ( + frame.symbols.ref(alias), + "the template %%r (imported on %s) does " + "not export the requested name %s" + % (self.position(node), repr(name)), + name, + ) + ) + self.outdent() + if frame.toplevel: + var_names.append(alias) + if not alias.startswith("_"): + discarded_names.append(alias) + + if var_names: + if len(var_names) == 1: + name = var_names[0] + self.writeline( + "context.vars[%r] = %s" % (name, frame.symbols.ref(name)) + ) + else: + self.writeline( + "context.vars.update({%s})" + % ", ".join( + "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names + ) + ) + if discarded_names: + if len(discarded_names) == 1: + self.writeline("context.exported_vars.discard(%r)" % discarded_names[0]) + else: + self.writeline( + "context.exported_vars.difference_" + "update((%s))" % ", ".join(imap(repr, discarded_names)) + ) + + def visit_For(self, node, frame): + loop_frame = frame.inner() + test_frame = frame.inner() + else_frame = frame.inner() + + # try to figure out if we have an extended loop. An extended loop + # is necessary if the loop is in recursive mode if the special loop + # variable is accessed in the body. + extended_loop = node.recursive or "loop" in find_undeclared( + node.iter_child_nodes(only=("body",)), ("loop",) + ) + + loop_ref = None + if extended_loop: + loop_ref = loop_frame.symbols.declare_parameter("loop") + + loop_frame.symbols.analyze_node(node, for_branch="body") + if node.else_: + else_frame.symbols.analyze_node(node, for_branch="else") + + if node.test: + loop_filter_func = self.temporary_identifier() + test_frame.symbols.analyze_node(node, for_branch="test") + self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test) + self.indent() + self.enter_frame(test_frame) + self.writeline(self.environment.is_async and "async for " or "for ") + self.visit(node.target, loop_frame) + self.write(" in ") + self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter") + self.write(":") + self.indent() + self.writeline("if ", node.test) + self.visit(node.test, test_frame) + self.write(":") + self.indent() + self.writeline("yield ") + self.visit(node.target, loop_frame) + self.outdent(3) + self.leave_frame(test_frame, with_python_scope=True) + + # if we don't have an recursive loop we have to find the shadowed + # variables at that point. Because loops can be nested but the loop + # variable is a special one we have to enforce aliasing for it. + if node.recursive: + self.writeline( + "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node + ) + self.indent() + self.buffer(loop_frame) + + # Use the same buffer for the else frame + else_frame.buffer = loop_frame.buffer + + # make sure the loop variable is a special one and raise a template + # assertion error if a loop tries to write to loop + if extended_loop: + self.writeline("%s = missing" % loop_ref) + + for name in node.find_all(nodes.Name): + if name.ctx == "store" and name.name == "loop": + self.fail( + "Can't assign to special loop variable in for-loop target", + name.lineno, + ) + + if node.else_: + iteration_indicator = self.temporary_identifier() + self.writeline("%s = 1" % iteration_indicator) + + self.writeline(self.environment.is_async and "async for " or "for ", node) + self.visit(node.target, loop_frame) + if extended_loop: + if self.environment.is_async: + self.write(", %s in AsyncLoopContext(" % loop_ref) + else: + self.write(", %s in LoopContext(" % loop_ref) + else: + self.write(" in ") + + if node.test: + self.write("%s(" % loop_filter_func) + if node.recursive: + self.write("reciter") + else: + if self.environment.is_async and not extended_loop: + self.write("auto_aiter(") + self.visit(node.iter, frame) + if self.environment.is_async and not extended_loop: + self.write(")") + if node.test: + self.write(")") + + if node.recursive: + self.write(", undefined, loop_render_func, depth):") + else: + self.write(extended_loop and ", undefined):" or ":") + + self.indent() + self.enter_frame(loop_frame) + + self.blockvisit(node.body, loop_frame) + if node.else_: + self.writeline("%s = 0" % iteration_indicator) + self.outdent() + self.leave_frame( + loop_frame, with_python_scope=node.recursive and not node.else_ + ) + + if node.else_: + self.writeline("if %s:" % iteration_indicator) + self.indent() + self.enter_frame(else_frame) + self.blockvisit(node.else_, else_frame) + self.leave_frame(else_frame) + self.outdent() + + # if the node was recursive we have to return the buffer contents + # and start the iteration code + if node.recursive: + self.return_buffer_contents(loop_frame) + self.outdent() + self.start_write(frame, node) + if self.environment.is_async: + self.write("await ") + self.write("loop(") + if self.environment.is_async: + self.write("auto_aiter(") + self.visit(node.iter, frame) + if self.environment.is_async: + self.write(")") + self.write(", loop)") + self.end_write(frame) + + def visit_If(self, node, frame): + if_frame = frame.soft() + self.writeline("if ", node) + self.visit(node.test, if_frame) + self.write(":") + self.indent() + self.blockvisit(node.body, if_frame) + self.outdent() + for elif_ in node.elif_: + self.writeline("elif ", elif_) + self.visit(elif_.test, if_frame) + self.write(":") + self.indent() + self.blockvisit(elif_.body, if_frame) + self.outdent() + if node.else_: + self.writeline("else:") + self.indent() + self.blockvisit(node.else_, if_frame) + self.outdent() + + def visit_Macro(self, node, frame): + macro_frame, macro_ref = self.macro_body(node, frame) + self.newline() + if frame.toplevel: + if not node.name.startswith("_"): + self.write("context.exported_vars.add(%r)" % node.name) + self.writeline("context.vars[%r] = " % node.name) + self.write("%s = " % frame.symbols.ref(node.name)) + self.macro_def(macro_ref, macro_frame) + + def visit_CallBlock(self, node, frame): + call_frame, macro_ref = self.macro_body(node, frame) + self.writeline("caller = ") + self.macro_def(macro_ref, call_frame) + self.start_write(frame, node) + self.visit_Call(node.call, frame, forward_caller=True) + self.end_write(frame) + + def visit_FilterBlock(self, node, frame): + filter_frame = frame.inner() + filter_frame.symbols.analyze_node(node) + self.enter_frame(filter_frame) + self.buffer(filter_frame) + self.blockvisit(node.body, filter_frame) + self.start_write(frame, node) + self.visit_Filter(node.filter, filter_frame) + self.end_write(frame) + self.leave_frame(filter_frame) + + def visit_With(self, node, frame): + with_frame = frame.inner() + with_frame.symbols.analyze_node(node) + self.enter_frame(with_frame) + for target, expr in izip(node.targets, node.values): + self.newline() + self.visit(target, with_frame) + self.write(" = ") + self.visit(expr, frame) + self.blockvisit(node.body, with_frame) + self.leave_frame(with_frame) + + def visit_ExprStmt(self, node, frame): + self.newline(node) + self.visit(node.node, frame) + + _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src")) + #: The default finalize function if the environment isn't configured + #: with one. Or if the environment has one, this is called on that + #: function's output for constants. + _default_finalize = text_type + _finalize = None + + def _make_finalize(self): + """Build the finalize function to be used on constants and at + runtime. Cached so it's only created once for all output nodes. + + Returns a ``namedtuple`` with the following attributes: + + ``const`` + A function to finalize constant data at compile time. + + ``src`` + Source code to output around nodes to be evaluated at + runtime. + """ + if self._finalize is not None: + return self._finalize + + finalize = default = self._default_finalize + src = None + + if self.environment.finalize: + src = "environment.finalize(" + env_finalize = self.environment.finalize + + def finalize(value): + return default(env_finalize(value)) + + if getattr(env_finalize, "contextfunction", False) is True: + src += "context, " + finalize = None # noqa: F811 + elif getattr(env_finalize, "evalcontextfunction", False) is True: + src += "context.eval_ctx, " + finalize = None + elif getattr(env_finalize, "environmentfunction", False) is True: + src += "environment, " + + def finalize(value): + return default(env_finalize(self.environment, value)) + + self._finalize = self._FinalizeInfo(finalize, src) + return self._finalize + + def _output_const_repr(self, group): + """Given a group of constant values converted from ``Output`` + child nodes, produce a string to write to the template module + source. + """ + return repr(concat(group)) + + def _output_child_to_const(self, node, frame, finalize): + """Try to optimize a child of an ``Output`` node by trying to + convert it to constant, finalized data at compile time. + + If :exc:`Impossible` is raised, the node is not constant and + will be evaluated at runtime. Any other exception will also be + evaluated at runtime for easier debugging. + """ + const = node.as_const(frame.eval_ctx) + + if frame.eval_ctx.autoescape: + const = escape(const) + + # Template data doesn't go through finalize. + if isinstance(node, nodes.TemplateData): + return text_type(const) + + return finalize.const(const) + + def _output_child_pre(self, node, frame, finalize): + """Output extra source code before visiting a child of an + ``Output`` node. + """ + if frame.eval_ctx.volatile: + self.write("(escape if context.eval_ctx.autoescape else to_string)(") + elif frame.eval_ctx.autoescape: + self.write("escape(") + else: + self.write("to_string(") + + if finalize.src is not None: + self.write(finalize.src) + + def _output_child_post(self, node, frame, finalize): + """Output extra source code after visiting a child of an + ``Output`` node. + """ + self.write(")") + + if finalize.src is not None: + self.write(")") + + def visit_Output(self, node, frame): + # If an extends is active, don't render outside a block. + if frame.require_output_check: + # A top-level extends is known to exist at compile time. + if self.has_known_extends: + return + + self.writeline("if parent_template is None:") + self.indent() + + finalize = self._make_finalize() + body = [] + + # Evaluate constants at compile time if possible. Each item in + # body will be either a list of static data or a node to be + # evaluated at runtime. + for child in node.nodes: + try: + if not ( + # If the finalize function requires runtime context, + # constants can't be evaluated at compile time. + finalize.const + # Unless it's basic template data that won't be + # finalized anyway. + or isinstance(child, nodes.TemplateData) + ): + raise nodes.Impossible() + + const = self._output_child_to_const(child, frame, finalize) + except (nodes.Impossible, Exception): + # The node was not constant and needs to be evaluated at + # runtime. Or another error was raised, which is easier + # to debug at runtime. + body.append(child) + continue + + if body and isinstance(body[-1], list): + body[-1].append(const) + else: + body.append([const]) + + if frame.buffer is not None: + if len(body) == 1: + self.writeline("%s.append(" % frame.buffer) + else: + self.writeline("%s.extend((" % frame.buffer) + + self.indent() + + for item in body: + if isinstance(item, list): + # A group of constant data to join and output. + val = self._output_const_repr(item) + + if frame.buffer is None: + self.writeline("yield " + val) + else: + self.writeline(val + ",") + else: + if frame.buffer is None: + self.writeline("yield ", item) + else: + self.newline(item) + + # A node to be evaluated at runtime. + self._output_child_pre(item, frame, finalize) + self.visit(item, frame) + self._output_child_post(item, frame, finalize) + + if frame.buffer is not None: + self.write(",") + + if frame.buffer is not None: + self.outdent() + self.writeline(")" if len(body) == 1 else "))") + + if frame.require_output_check: + self.outdent() + + def visit_Assign(self, node, frame): + self.push_assign_tracking() + self.newline(node) + self.visit(node.target, frame) + self.write(" = ") + self.visit(node.node, frame) + self.pop_assign_tracking(frame) + + def visit_AssignBlock(self, node, frame): + self.push_assign_tracking() + block_frame = frame.inner() + # This is a special case. Since a set block always captures we + # will disable output checks. This way one can use set blocks + # toplevel even in extended templates. + block_frame.require_output_check = False + block_frame.symbols.analyze_node(node) + self.enter_frame(block_frame) + self.buffer(block_frame) + self.blockvisit(node.body, block_frame) + self.newline(node) + self.visit(node.target, frame) + self.write(" = (Markup if context.eval_ctx.autoescape else identity)(") + if node.filter is not None: + self.visit_Filter(node.filter, block_frame) + else: + self.write("concat(%s)" % block_frame.buffer) + self.write(")") + self.pop_assign_tracking(frame) + self.leave_frame(block_frame) + + # -- Expression Visitors + + def visit_Name(self, node, frame): + if node.ctx == "store" and frame.toplevel: + if self._assign_stack: + self._assign_stack[-1].add(node.name) + ref = frame.symbols.ref(node.name) + + # If we are looking up a variable we might have to deal with the + # case where it's undefined. We can skip that case if the load + # instruction indicates a parameter which are always defined. + if node.ctx == "load": + load = frame.symbols.find_load(ref) + if not ( + load is not None + and load[0] == VAR_LOAD_PARAMETER + and not self.parameter_is_undeclared(ref) + ): + self.write( + "(undefined(name=%r) if %s is missing else %s)" + % (node.name, ref, ref) + ) + return + + self.write(ref) + + def visit_NSRef(self, node, frame): + # NSRefs can only be used to store values; since they use the normal + # `foo.bar` notation they will be parsed as a normal attribute access + # when used anywhere but in a `set` context + ref = frame.symbols.ref(node.name) + self.writeline("if not isinstance(%s, Namespace):" % ref) + self.indent() + self.writeline( + "raise TemplateRuntimeError(%r)" + % "cannot assign attribute on non-namespace object" + ) + self.outdent() + self.writeline("%s[%r]" % (ref, node.attr)) + + def visit_Const(self, node, frame): + val = node.as_const(frame.eval_ctx) + if isinstance(val, float): + self.write(str(val)) + else: + self.write(repr(val)) + + def visit_TemplateData(self, node, frame): + try: + self.write(repr(node.as_const(frame.eval_ctx))) + except nodes.Impossible: + self.write( + "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data + ) + + def visit_Tuple(self, node, frame): + self.write("(") + idx = -1 + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item, frame) + self.write(idx == 0 and ",)" or ")") + + def visit_List(self, node, frame): + self.write("[") + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item, frame) + self.write("]") + + def visit_Dict(self, node, frame): + self.write("{") + for idx, item in enumerate(node.items): + if idx: + self.write(", ") + self.visit(item.key, frame) + self.write(": ") + self.visit(item.value, frame) + self.write("}") + + def binop(operator, interceptable=True): # noqa: B902 + @optimizeconst + def visitor(self, node, frame): + if ( + self.environment.sandboxed + and operator in self.environment.intercepted_binops + ): + self.write("environment.call_binop(context, %r, " % operator) + self.visit(node.left, frame) + self.write(", ") + self.visit(node.right, frame) + else: + self.write("(") + self.visit(node.left, frame) + self.write(" %s " % operator) + self.visit(node.right, frame) + self.write(")") + + return visitor + + def uaop(operator, interceptable=True): # noqa: B902 + @optimizeconst + def visitor(self, node, frame): + if ( + self.environment.sandboxed + and operator in self.environment.intercepted_unops + ): + self.write("environment.call_unop(context, %r, " % operator) + self.visit(node.node, frame) + else: + self.write("(" + operator) + self.visit(node.node, frame) + self.write(")") + + return visitor + + visit_Add = binop("+") + visit_Sub = binop("-") + visit_Mul = binop("*") + visit_Div = binop("/") + visit_FloorDiv = binop("//") + visit_Pow = binop("**") + visit_Mod = binop("%") + visit_And = binop("and", interceptable=False) + visit_Or = binop("or", interceptable=False) + visit_Pos = uaop("+") + visit_Neg = uaop("-") + visit_Not = uaop("not ", interceptable=False) + del binop, uaop + + @optimizeconst + def visit_Concat(self, node, frame): + if frame.eval_ctx.volatile: + func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)" + elif frame.eval_ctx.autoescape: + func_name = "markup_join" + else: + func_name = "unicode_join" + self.write("%s((" % func_name) + for arg in node.nodes: + self.visit(arg, frame) + self.write(", ") + self.write("))") + + @optimizeconst + def visit_Compare(self, node, frame): + self.write("(") + self.visit(node.expr, frame) + for op in node.ops: + self.visit(op, frame) + self.write(")") + + def visit_Operand(self, node, frame): + self.write(" %s " % operators[node.op]) + self.visit(node.expr, frame) + + @optimizeconst + def visit_Getattr(self, node, frame): + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getattr(") + self.visit(node.node, frame) + self.write(", %r)" % node.attr) + + if self.environment.is_async: + self.write("))") + + @optimizeconst + def visit_Getitem(self, node, frame): + # slices bypass the environment getitem method. + if isinstance(node.arg, nodes.Slice): + self.visit(node.node, frame) + self.write("[") + self.visit(node.arg, frame) + self.write("]") + else: + if self.environment.is_async: + self.write("(await auto_await(") + + self.write("environment.getitem(") + self.visit(node.node, frame) + self.write(", ") + self.visit(node.arg, frame) + self.write(")") + + if self.environment.is_async: + self.write("))") + + def visit_Slice(self, node, frame): + if node.start is not None: + self.visit(node.start, frame) + self.write(":") + if node.stop is not None: + self.visit(node.stop, frame) + if node.step is not None: + self.write(":") + self.visit(node.step, frame) + + @optimizeconst + def visit_Filter(self, node, frame): + if self.environment.is_async: + self.write("await auto_await(") + self.write(self.filters[node.name] + "(") + func = self.environment.filters.get(node.name) + if func is None: + self.fail("no filter named %r" % node.name, node.lineno) + if getattr(func, "contextfilter", False) is True: + self.write("context, ") + elif getattr(func, "evalcontextfilter", False) is True: + self.write("context.eval_ctx, ") + elif getattr(func, "environmentfilter", False) is True: + self.write("environment, ") + + # if the filter node is None we are inside a filter block + # and want to write to the current buffer + if node.node is not None: + self.visit(node.node, frame) + elif frame.eval_ctx.volatile: + self.write( + "(context.eval_ctx.autoescape and" + " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer) + ) + elif frame.eval_ctx.autoescape: + self.write("Markup(concat(%s))" % frame.buffer) + else: + self.write("concat(%s)" % frame.buffer) + self.signature(node, frame) + self.write(")") + if self.environment.is_async: + self.write(")") + + @optimizeconst + def visit_Test(self, node, frame): + self.write(self.tests[node.name] + "(") + if node.name not in self.environment.tests: + self.fail("no test named %r" % node.name, node.lineno) + self.visit(node.node, frame) + self.signature(node, frame) + self.write(")") + + @optimizeconst + def visit_CondExpr(self, node, frame): + def write_expr2(): + if node.expr2 is not None: + return self.visit(node.expr2, frame) + self.write( + "cond_expr_undefined(%r)" + % ( + "the inline if-" + "expression on %s evaluated to false and " + "no else section was defined." % self.position(node) + ) + ) + + self.write("(") + self.visit(node.expr1, frame) + self.write(" if ") + self.visit(node.test, frame) + self.write(" else ") + write_expr2() + self.write(")") + + @optimizeconst + def visit_Call(self, node, frame, forward_caller=False): + if self.environment.is_async: + self.write("await auto_await(") + if self.environment.sandboxed: + self.write("environment.call(context, ") + else: + self.write("context.call(") + self.visit(node.node, frame) + extra_kwargs = forward_caller and {"caller": "caller"} or None + self.signature(node, frame, extra_kwargs) + self.write(")") + if self.environment.is_async: + self.write(")") + + def visit_Keyword(self, node, frame): + self.write(node.key + "=") + self.visit(node.value, frame) + + # -- Unused nodes for extensions + + def visit_MarkSafe(self, node, frame): + self.write("Markup(") + self.visit(node.expr, frame) + self.write(")") + + def visit_MarkSafeIfAutoescape(self, node, frame): + self.write("(context.eval_ctx.autoescape and Markup or identity)(") + self.visit(node.expr, frame) + self.write(")") + + def visit_EnvironmentAttribute(self, node, frame): + self.write("environment." + node.name) + + def visit_ExtensionAttribute(self, node, frame): + self.write("environment.extensions[%r].%s" % (node.identifier, node.name)) + + def visit_ImportedName(self, node, frame): + self.write(self.import_aliases[node.importname]) + + def visit_InternalName(self, node, frame): + self.write(node.name) + + def visit_ContextReference(self, node, frame): + self.write("context") + + def visit_DerivedContextReference(self, node, frame): + self.write(self.derive_context(frame)) + + def visit_Continue(self, node, frame): + self.writeline("continue", node) + + def visit_Break(self, node, frame): + self.writeline("break", node) + + def visit_Scope(self, node, frame): + scope_frame = frame.inner() + scope_frame.symbols.analyze_node(node) + self.enter_frame(scope_frame) + self.blockvisit(node.body, scope_frame) + self.leave_frame(scope_frame) + + def visit_OverlayScope(self, node, frame): + ctx = self.temporary_identifier() + self.writeline("%s = %s" % (ctx, self.derive_context(frame))) + self.writeline("%s.vars = " % ctx) + self.visit(node.context, frame) + self.push_context_reference(ctx) + + scope_frame = frame.inner(isolated=True) + scope_frame.symbols.analyze_node(node) + self.enter_frame(scope_frame) + self.blockvisit(node.body, scope_frame) + self.leave_frame(scope_frame) + self.pop_context_reference() + + def visit_EvalContextModifier(self, node, frame): + for keyword in node.options: + self.writeline("context.eval_ctx.%s = " % keyword.key) + self.visit(keyword.value, frame) + try: + val = keyword.value.as_const(frame.eval_ctx) + except nodes.Impossible: + frame.eval_ctx.volatile = True + else: + setattr(frame.eval_ctx, keyword.key, val) + + def visit_ScopedEvalContextModifier(self, node, frame): + old_ctx_name = self.temporary_identifier() + saved_ctx = frame.eval_ctx.save() + self.writeline("%s = context.eval_ctx.save()" % old_ctx_name) + self.visit_EvalContextModifier(node, frame) + for child in node.body: + self.visit(child, frame) + frame.eval_ctx.revert(saved_ctx) + self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name) diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/constants.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..bf7f2ca721789052f1e227c5e3432e7712134c55 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/constants.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +#: list of lorem ipsum words used by the lipsum() helper function +LOREM_IPSUM_WORDS = u"""\ +a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at +auctor augue bibendum blandit class commodo condimentum congue consectetuer +consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus +diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend +elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames +faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac +hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum +justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem +luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie +mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non +nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque +penatibus per pharetra phasellus placerat platea porta porttitor posuere +potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus +ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit +sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor +tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices +ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus +viverra volutpat vulputate""" diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/debug.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/debug.py new file mode 100644 index 0000000000000000000000000000000000000000..5d8aec31d05dab0b01b192b9dfe74f2656dc3077 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/debug.py @@ -0,0 +1,268 @@ +import sys +from types import CodeType + +from . import TemplateSyntaxError +from ._compat import PYPY +from .utils import internal_code +from .utils import missing + + +def rewrite_traceback_stack(source=None): + """Rewrite the current exception to replace any tracebacks from + within compiled template code with tracebacks that look like they + came from the template source. + + This must be called within an ``except`` block. + + :param exc_info: A :meth:`sys.exc_info` tuple. If not provided, + the current ``exc_info`` is used. + :param source: For ``TemplateSyntaxError``, the original source if + known. + :return: A :meth:`sys.exc_info` tuple that can be re-raised. + """ + exc_type, exc_value, tb = sys.exc_info() + + if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated: + exc_value.translated = True + exc_value.source = source + + try: + # Remove the old traceback on Python 3, otherwise the frames + # from the compiler still show up. + exc_value.with_traceback(None) + except AttributeError: + pass + + # Outside of runtime, so the frame isn't executing template + # code, but it still needs to point at the template. + tb = fake_traceback( + exc_value, None, exc_value.filename or "", exc_value.lineno + ) + else: + # Skip the frame for the render function. + tb = tb.tb_next + + stack = [] + + # Build the stack of traceback object, replacing any in template + # code with the source file and line information. + while tb is not None: + # Skip frames decorated with @internalcode. These are internal + # calls that aren't useful in template debugging output. + if tb.tb_frame.f_code in internal_code: + tb = tb.tb_next + continue + + template = tb.tb_frame.f_globals.get("__jinja_template__") + + if template is not None: + lineno = template.get_corresponding_lineno(tb.tb_lineno) + fake_tb = fake_traceback(exc_value, tb, template.filename, lineno) + stack.append(fake_tb) + else: + stack.append(tb) + + tb = tb.tb_next + + tb_next = None + + # Assign tb_next in reverse to avoid circular references. + for tb in reversed(stack): + tb_next = tb_set_next(tb, tb_next) + + return exc_type, exc_value, tb_next + + +def fake_traceback(exc_value, tb, filename, lineno): + """Produce a new traceback object that looks like it came from the + template source instead of the compiled code. The filename, line + number, and location name will point to the template, and the local + variables will be the current template context. + + :param exc_value: The original exception to be re-raised to create + the new traceback. + :param tb: The original traceback to get the local variables and + code info from. + :param filename: The template filename. + :param lineno: The line number in the template source. + """ + if tb is not None: + # Replace the real locals with the context that would be + # available at that point in the template. + locals = get_template_locals(tb.tb_frame.f_locals) + locals.pop("__jinja_exception__", None) + else: + locals = {} + + globals = { + "__name__": filename, + "__file__": filename, + "__jinja_exception__": exc_value, + } + # Raise an exception at the correct line number. + code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec") + + # Build a new code object that points to the template file and + # replaces the location with a block name. + try: + location = "template" + + if tb is not None: + function = tb.tb_frame.f_code.co_name + + if function == "root": + location = "top-level template code" + elif function.startswith("block_"): + location = 'block "%s"' % function[6:] + + # Collect arguments for the new code object. CodeType only + # accepts positional arguments, and arguments were inserted in + # new Python versions. + code_args = [] + + for attr in ( + "argcount", + "posonlyargcount", # Python 3.8 + "kwonlyargcount", # Python 3 + "nlocals", + "stacksize", + "flags", + "code", # codestring + "consts", # constants + "names", + "varnames", + ("filename", filename), + ("name", location), + "firstlineno", + "lnotab", + "freevars", + "cellvars", + ): + if isinstance(attr, tuple): + # Replace with given value. + code_args.append(attr[1]) + continue + + try: + # Copy original value if it exists. + code_args.append(getattr(code, "co_" + attr)) + except AttributeError: + # Some arguments were added later. + continue + + code = CodeType(*code_args) + except Exception: + # Some environments such as Google App Engine don't support + # modifying code objects. + pass + + # Execute the new code, which is guaranteed to raise, and return + # the new traceback without this frame. + try: + exec(code, globals, locals) + except BaseException: + return sys.exc_info()[2].tb_next + + +def get_template_locals(real_locals): + """Based on the runtime locals, get the context that would be + available at that point in the template. + """ + # Start with the current template context. + ctx = real_locals.get("context") + + if ctx: + data = ctx.get_all().copy() + else: + data = {} + + # Might be in a derived context that only sets local variables + # rather than pushing a context. Local variables follow the scheme + # l_depth_name. Find the highest-depth local that has a value for + # each name. + local_overrides = {} + + for name, value in real_locals.items(): + if not name.startswith("l_") or value is missing: + # Not a template variable, or no longer relevant. + continue + + try: + _, depth, name = name.split("_", 2) + depth = int(depth) + except ValueError: + continue + + cur_depth = local_overrides.get(name, (-1,))[0] + + if cur_depth < depth: + local_overrides[name] = (depth, value) + + # Modify the context with any derived context. + for name, (_, value) in local_overrides.items(): + if value is missing: + data.pop(name, None) + else: + data[name] = value + + return data + + +if sys.version_info >= (3, 7): + # tb_next is directly assignable as of Python 3.7 + def tb_set_next(tb, tb_next): + tb.tb_next = tb_next + return tb + + +elif PYPY: + # PyPy might have special support, and won't work with ctypes. + try: + import tputil + except ImportError: + # Without tproxy support, use the original traceback. + def tb_set_next(tb, tb_next): + return tb + + else: + # With tproxy support, create a proxy around the traceback that + # returns the new tb_next. + def tb_set_next(tb, tb_next): + def controller(op): + if op.opname == "__getattribute__" and op.args[0] == "tb_next": + return tb_next + + return op.delegate() + + return tputil.make_proxy(controller, obj=tb) + + +else: + # Use ctypes to assign tb_next at the C level since it's read-only + # from Python. + import ctypes + + class _CTraceback(ctypes.Structure): + _fields_ = [ + # Extra PyObject slots when compiled with Py_TRACE_REFS. + ("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()), + # Only care about tb_next as an object, not a traceback. + ("tb_next", ctypes.py_object), + ] + + def tb_set_next(tb, tb_next): + c_tb = _CTraceback.from_address(id(tb)) + + # Clear out the old tb_next. + if tb.tb_next is not None: + c_tb_next = ctypes.py_object(tb.tb_next) + c_tb.tb_next = ctypes.py_object() + ctypes.pythonapi.Py_DecRef(c_tb_next) + + # Assign the new tb_next. + if tb_next is not None: + c_tb_next = ctypes.py_object(tb_next) + ctypes.pythonapi.Py_IncRef(c_tb_next) + c_tb.tb_next = c_tb_next + + return tb diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/defaults.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..8e0e7d771076a8fe0dc71584018338d2caa35fb5 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/defaults.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +from ._compat import range_type +from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401 +from .tests import TESTS as DEFAULT_TESTS # noqa: F401 +from .utils import Cycler +from .utils import generate_lorem_ipsum +from .utils import Joiner +from .utils import Namespace + +# defaults for the parser / lexer +BLOCK_START_STRING = "{%" +BLOCK_END_STRING = "%}" +VARIABLE_START_STRING = "{{" +VARIABLE_END_STRING = "}}" +COMMENT_START_STRING = "{#" +COMMENT_END_STRING = "#}" +LINE_STATEMENT_PREFIX = None +LINE_COMMENT_PREFIX = None +TRIM_BLOCKS = False +LSTRIP_BLOCKS = False +NEWLINE_SEQUENCE = "\n" +KEEP_TRAILING_NEWLINE = False + +# default filters, tests and namespace + +DEFAULT_NAMESPACE = { + "range": range_type, + "dict": dict, + "lipsum": generate_lorem_ipsum, + "cycler": Cycler, + "joiner": Joiner, + "namespace": Namespace, +} + +# default policies +DEFAULT_POLICIES = { + "compiler.ascii_str": True, + "urlize.rel": "noopener", + "urlize.target": None, + "truncate.leeway": 5, + "json.dumps_function": None, + "json.dumps_kwargs": {"sort_keys": True}, + "ext.i18n.trimmed": False, +} diff --git a/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/environment.py b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/environment.py new file mode 100644 index 0000000000000000000000000000000000000000..8430390eeab412b7f3b92d88a4bdecbe1de5f538 --- /dev/null +++ b/Hackfest_Demos/OSM-MR10/HD2.6-Juju-Relations/hackfest_squid_metrics_cnf/charms/squid-operator/venv/jinja2/environment.py @@ -0,0 +1,1362 @@ +# -*- coding: utf-8 -*- +"""Classes for managing templates and their runtime and compile time +options. +""" +import os +import sys +import weakref +from functools import partial +from functools import reduce + +from markupsafe import Markup + +from . import nodes +from ._compat import encode_filename +from ._compat import implements_iterator +from ._compat import implements_to_string +from ._compat import iteritems +from ._compat import PY2 +from ._compat import PYPY +from ._compat import reraise +from ._compat import string_types +from ._compat import text_type +from .compiler import CodeGenerator +from .compiler import generate +from .defaults import BLOCK_END_STRING +from .defaults import BLOCK_START_STRING +from .defaults import COMMENT_END_STRING +from .defaults import COMMENT_START_STRING +from .defaults import DEFAULT_FILTERS +from .defaults import DEFAULT_NAMESPACE +from .defaults import DEFAULT_POLICIES +from .defaults import DEFAULT_TESTS +from .defaults import KEEP_TRAILING_NEWLINE +from .defaults import LINE_COMMENT_PREFIX +from .defaults import LINE_STATEMENT_PREFIX +from .defaults import LSTRIP_BLOCKS +from .defaults import NEWLINE_SEQUENCE +from .defaults import TRIM_BLOCKS +from .defaults import VARIABLE_END_STRING +from .defaults import VARIABLE_START_STRING +from .exceptions import TemplateNotFound +from .exceptions import TemplateRuntimeError +from .exceptions import TemplatesNotFound +from .exceptions import TemplateSyntaxError +from .exceptions import UndefinedError +from .lexer import get_lexer +from .lexer import TokenStream +from .nodes import EvalContext +from .parser import Parser +from .runtime import Context +from .runtime import new_context +from .runtime import Undefined +from .utils import concat +from .utils import consume +from .utils import have_async_gen +from .utils import import_string +from .utils import internalcode +from .utils import LRUCache +from .utils import missing + +# for direct template usage we have up to ten living environments +_spontaneous_environments = LRUCache(10) + + +def get_spontaneous_environment(cls, *args): + """Return a new spontaneous environment. A spontaneous environment + is used for templates created directly rather than through an + existing environment. + + :param cls: Environment class to create. + :param args: Positional arguments passed to environment. + """ + key = (cls, args) + + try: + return _spontaneous_environments[key] + except KeyError: + _spontaneous_environments[key] = env = cls(*args) + env.shared = True + return env + + +def create_cache(size): + """Return the cache class for the given size.""" + if size == 0: + return None + if size < 0: + return {} + return LRUCache(size) + + +def copy_cache(cache): + """Create an empty copy of the given cache.""" + if cache is None: + return None + elif type(cache) is dict: + return {} + return LRUCache(cache.capacity) + + +def load_extensions(environment, extensions): + """Load the extensions from the list and bind it to the environment. + Returns a dict of instantiated environments. + """ + result = {} + for extension in extensions: + if isinstance(extension, string_types): + extension = import_string(extension) + result[extension.identifier] = extension(environment) + return result + + +def fail_for_missing_callable(string, name): + msg = string % name + if isinstance(name, Undefined): + try: + name._fail_with_undefined_error() + except Exception as e: + msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e) + raise TemplateRuntimeError(msg) + + +def _environment_sanity_check(environment): + """Perform a sanity check on the environment.""" + assert issubclass( + environment.undefined, Undefined + ), "undefined must be a subclass of undefined because filters depend on it." + assert ( + environment.block_start_string + != environment.variable_start_string + != environment.comment_start_string + ), "block, variable and comment start strings must be different" + assert environment.newline_sequence in ( + "\r", + "\r\n", + "\n", + ), "newline_sequence set to unknown line ending string." + return environment + + +class Environment(object): + r"""The core component of Jinja is the `Environment`. It contains + important shared variables like configuration, filters, tests, + globals and others. Instances of this class may be modified if + they are not shared and if no template was loaded so far. + Modifications on environments after the first template was loaded + will lead to surprising effects and undefined behavior. + + Here are the possible initialization parameters: + + `block_start_string` + The string marking the beginning of a block. Defaults to ``'{%'``. + + `block_end_string` + The string marking the end of a block. Defaults to ``'%}'``. + + `variable_start_string` + The string marking the beginning of a print statement. + Defaults to ``'{{'``. + + `variable_end_string` + The string marking the end of a print statement. Defaults to + ``'}}'``. + + `comment_start_string` + The string marking the beginning of a comment. Defaults to ``'{#'``. + + `comment_end_string` + The string marking the end of a comment. Defaults to ``'#}'``. + + `line_statement_prefix` + If given and a string, this will be used as prefix for line based + statements. See also :ref:`line-statements`. + + `line_comment_prefix` + If given and a string, this will be used as prefix for line based + comments. See also :ref:`line-statements`. + + .. versionadded:: 2.2 + + `trim_blocks` + If this is set to ``True`` the first newline after a block is + removed (block, not variable tag!). Defaults to `False`. + + `lstrip_blocks` + If this is set to ``True`` leading spaces and tabs are stripped + from the start of a line to a block. Defaults to `False`. + + `newline_sequence` + The sequence that starts a newline. Must be one of ``'\r'``, + ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a + useful default for Linux and OS X systems as well as web + applications. + + `keep_trailing_newline` + Preserve the trailing newline when rendering templates. + The default is ``False``, which causes a single newline, + if present, to be stripped from the end of the template. + + .. versionadded:: 2.7 + + `extensions` + List of Jinja extensions to use. This can either be import paths + as strings or extension classes. For more information have a + look at :ref:`the extensions documentation `. + + `optimized` + should the optimizer be enabled? Default is ``True``. + + `undefined` + :class:`Undefined` or a subclass of it that is used to represent + undefined values in the template. + + `finalize` + A callable that can be used to process the result of a variable + expression before it is output. For example one can convert + ``None`` implicitly into an empty string here. + + `autoescape` + If set to ``True`` the XML/HTML autoescaping feature is enabled by + default. For more details about autoescaping see + :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also + be a callable that is passed the template name and has to + return ``True`` or ``False`` depending on autoescape should be + enabled by default. + + .. versionchanged:: 2.4 + `autoescape` can now be a function + + `loader` + The template loader for this environment. + + `cache_size` + The size of the cache. Per default this is ``400`` which means + that if more than 400 templates are loaded the loader will clean + out the least recently used template. If the cache size is set to + ``0`` templates are recompiled all the time, if the cache size is + ``-1`` the cache will not be cleaned. + + .. versionchanged:: 2.8 + The cache size was increased to 400 from a low 50. + + `auto_reload` + Some loaders load templates from locations where the template + sources may change (ie: file system or database). If + ``auto_reload`` is set to ``True`` (default) every time a template is + requested the loader checks if the source changed and if yes, it + will reload the template. For higher performance it's possible to + disable that. + + `bytecode_cache` + If set to a bytecode cache object, this object will provide a + cache for the internal Jinja bytecode so that templates don't + have to be parsed if they were not changed. + + See :ref:`bytecode-cache` for more information. + + `enable_async` + If set to true this enables async template execution which allows + you to take advantage of newer Python features. This requires + Python 3.6 or later. + """ + + #: if this environment is sandboxed. Modifying this variable won't make + #: the environment sandboxed though. For a real sandboxed environment + #: have a look at jinja2.sandbox. This flag alone controls the code + #: generation by the compiler. + sandboxed = False + + #: True if the environment is just an overlay + overlayed = False + + #: the environment this environment is linked to if it is an overlay + linked_to = None + + #: shared environments have this set to `True`. A shared environment + #: must not be modified + shared = False + + #: the class that is used for code generation. See + #: :class:`~jinja2.compiler.CodeGenerator` for more information. + code_generator_class = CodeGenerator + + #: the context class thatis used for templates. See + #: :class:`~jinja2.runtime.Context` for more information. + context_class = Context + + def __init__( + self, + block_start_string=BLOCK_START_STRING, + block_end_string=BLOCK_END_STRING, + variable_start_string=VARIABLE_START_STRING, + variable_end_string=VARIABLE_END_STRING, + comment_start_string=COMMENT_START_STRING, + comment_end_string=COMMENT_END_STRING, + line_statement_prefix=LINE_STATEMENT_PREFIX, + line_comment_prefix=LINE_COMMENT_PREFIX, + trim_blocks=TRIM_BLOCKS, + lstrip_blocks=LSTRIP_BLOCKS, + newline_sequence=NEWLINE_SEQUENCE, + keep_trailing_newline=KEEP_TRAILING_NEWLINE, + extensions=(), + optimized=True, + undefined=Undefined, + finalize=None, + autoescape=False, + loader=None, + cache_size=400, + auto_reload=True, + bytecode_cache=None, + enable_async=False, + ): + # !!Important notice!! + # The constructor accepts quite a few arguments that should be + # passed by keyword rather than position. However it's important to + # not change the order of arguments because it's used at least + # internally in those cases: + # - spontaneous environments (i18n extension and Template) + # - unittests + # If parameter changes are required only add parameters at the end + # and don't change the arguments (or the defaults!) of the arguments + # existing already. + + # lexer / parser information + self.block_start_string = block_start_string + self.block_end_string = block_end_string + self.variable_start_string = variable_start_string + self.variable_end_string = variable_end_string + self.comment_start_string = comment_start_string + self.comment_end_string = comment_end_string + self.line_statement_prefix = line_statement_prefix + self.line_comment_prefix = line_comment_prefix + self.trim_blocks = trim_blocks + self.lstrip_blocks = lstrip_blocks + self.newline_sequence = newline_sequence + self.keep_trailing_newline = keep_trailing_newline + + # runtime information + self.undefined = undefined + self.optimized = optimized + self.finalize = finalize + self.autoescape = autoescape + + # defaults + self.filters = DEFAULT_FILTERS.copy() + self.tests = DEFAULT_TESTS.copy() + self.globals = DEFAULT_NAMESPACE.copy() + + # set the loader provided + self.loader = loader + self.cache = create_cache(cache_size) + self.bytecode_cache = bytecode_cache + self.auto_reload = auto_reload + + # configurable policies + self.policies = DEFAULT_POLICIES.copy() + + # load extensions + self.extensions = load_extensions(self, extensions) + + self.enable_async = enable_async + self.is_async = self.enable_async and have_async_gen + if self.is_async: + # runs patch_all() to enable async support + from . import asyncsupport # noqa: F401 + + _environment_sanity_check(self) + + def add_extension(self, extension): + """Adds an extension after the environment was created. + + .. versionadded:: 2.5 + """ + self.extensions.update(load_extensions(self, [extension])) + + def extend(self, **attributes): + """Add the items to the instance of the environment if they do not exist + yet. This is used by :ref:`extensions ` to register + callbacks and configuration values without breaking inheritance. + """ + for key, value in iteritems(attributes): + if not hasattr(self, key): + setattr(self, key, value) + + def overlay( + self, + block_start_string=missing, + block_end_string=missing, + variable_start_string=missing, + variable_end_string=missing, + comment_start_string=missing, + comment_end_string=missing, + line_statement_prefix=missing, + line_comment_prefix=missing, + trim_blocks=missing, + lstrip_blocks=missing, + extensions=missing, + optimized=missing, + undefined=missing, + finalize=missing, + autoescape=missing, + loader=missing, + cache_size=missing, + auto_reload=missing, + bytecode_cache=missing, + ): + """Create a new overlay environment that shares all the data with the + current environment except for cache and the overridden attributes. + Extensions cannot be removed for an overlayed environment. An overlayed + environment automatically gets all the extensions of the environment it + is linked to plus optional extra extensions. + + Creating overlays should happen after the initial environment was set + up completely. Not all attributes are truly linked, some are just + copied over so modifications on the original environment may not shine + through. + """ + args = dict(locals()) + del args["self"], args["cache_size"], args["extensions"] + + rv = object.__new__(self.__class__) + rv.__dict__.update(self.__dict__) + rv.overlayed = True + rv.linked_to = self + + for key, value in iteritems(args): + if value is not missing: + setattr(rv, key, value) + + if cache_size is not missing: + rv.cache = create_cache(cache_size) + else: + rv.cache = copy_cache(self.cache) + + rv.extensions = {} + for key, value in iteritems(self.extensions): + rv.extensions[key] = value.bind(rv) + if extensions is not missing: + rv.extensions.update(load_extensions(rv, extensions)) + + return _environment_sanity_check(rv) + + lexer = property(get_lexer, doc="The lexer for this environment.") + + def iter_extensions(self): + """Iterates over the extensions by priority.""" + return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) + + def getitem(self, obj, argument): + """Get an item or attribute of an object but prefer the item.""" + try: + return obj[argument] + except (AttributeError, TypeError, LookupError): + if isinstance(argument, string_types): + try: + attr = str(argument) + except Exception: + pass + else: + try: + return getattr(obj, attr) + except AttributeError: + pass + return self.undefined(obj=obj, name=argument) + + def getattr(self, obj, attribute): + """Get an item or attribute of an object but prefer the attribute. + Unlike :meth:`getitem` the attribute *must* be a bytestring. + """ + try: + return getattr(obj, attribute) + except AttributeError: + pass + try: + return obj[attribute] + except (TypeError, LookupError, AttributeError): + return self.undefined(obj=obj, name=attribute) + + def call_filter( + self, name, value, args=None, kwargs=None, context=None, eval_ctx=None + ): + """Invokes a filter on a value the same way the compiler does it. + + Note that on Python 3 this might return a coroutine in case the + filter is running from an environment in async mode and the filter + supports async execution. It's your responsibility to await this + if needed. + + .. versionadded:: 2.7 + """ + func = self.filters.get(name) + if func is None: + fail_for_missing_callable("no filter named %r", name) + args = [value] + list(args or ()) + if getattr(func, "contextfilter", False) is True: + if context is None: + raise TemplateRuntimeError( + "Attempted to invoke context filter without context" + ) + args.insert(0, context) + elif getattr(func, "evalcontextfilter", False) is True: + if eval_ctx is None: + if context is not None: + eval_ctx = context.eval_ctx + else: + eval_ctx = EvalContext(self) + args.insert(0, eval_ctx) + elif getattr(func, "environmentfilter", False) is True: + args.insert(0, self) + return func(*args, **(kwargs or {})) + + def call_test(self, name, value, args=None, kwargs=None): + """Invokes a test on a value the same way the compiler does it. + + .. versionadded:: 2.7 + """ + func = self.tests.get(name) + if func is None: + fail_for_missing_callable("no test named %r", name) + return func(value, *(args or ()), **(kwargs or {})) + + @internalcode + def parse(self, source, name=None, filename=None): + """Parse the sourcecode and return the abstract syntax tree. This + tree of nodes is used by the compiler to convert the template into + executable source- or bytecode. This is useful for debugging or to + extract information from templates. + + If you are :ref:`developing Jinja extensions ` + this gives you a good overview of the node tree generated. + """ + try: + return self._parse(source, name, filename) + except TemplateSyntaxError: + self.handle_exception(source=source) + + def _parse(self, source, name, filename): + """Internal parsing function used by `parse` and `compile`.""" + return Parser(self, source, name, encode_filename(filename)).parse() + + def lex(self, source, name=None, filename=None): + """Lex the given sourcecode and return a generator that yields + tokens as tuples in the form ``(lineno, token_type, value)``. + This can be useful for :ref:`extension development ` + and debugging templates. + + This does not perform preprocessing. If you want the preprocessing + of the extensions to be applied you have to filter source through + the :meth:`preprocess` method. + """ + source = text_type(source) + try: + return self.lexer.tokeniter(source, name, filename) + except TemplateSyntaxError: + self.handle_exception(source=source) + + def preprocess(self, source, name=None, filename=None): + """Preprocesses the source with all extensions. This is automatically + called for all parsing and compiling methods but *not* for :meth:`lex` + because there you usually only want the actual source tokenized. + """ + return reduce( + lambda s, e: e.preprocess(s, name, filename), + self.iter_extensions(), + text_type(source), + ) + + def _tokenize(self, source, name, filename=None, state=None): + """Called by the parser to do the preprocessing and filtering + for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. + """ + source = self.preprocess(source, name, filename) + stream = self.lexer.tokenize(source, name, filename, state) + for ext in self.iter_extensions(): + stream = ext.filter_stream(stream) + if not isinstance(stream, TokenStream): + stream = TokenStream(stream, name, filename) + return stream + + def _generate(self, source, name, filename, defer_init=False): + """Internal hook that can be overridden to hook a different generate + method in. + + .. versionadded:: 2.5 + """ + return generate( + source, + self, + name, + filename, + defer_init=defer_init, + optimized=self.optimized, + ) + + def _compile(self, source, filename): + """Internal hook that can be overridden to hook a different compile + method in. + + .. versionadded:: 2.5 + """ + return compile(source, filename, "exec") + + @internalcode + def compile(self, source, name=None, filename=None, raw=False, defer_init=False): + """Compile a node or template source code. The `name` parameter is + the load name of the template after it was joined using + :meth:`join_path` if necessary, not the filename on the file system. + the `filename` parameter is the estimated filename of the template on + the file system. If the template came from a database or memory this + can be omitted. + + The return value of this method is a python code object. If the `raw` + parameter is `True` the return value will be a string with python + code equivalent to the bytecode returned otherwise. This method is + mainly used internally. + + `defer_init` is use internally to aid the module code generator. This + causes the generated code to be able to import without the global + environment variable to be set. + + .. versionadded:: 2.4 + `defer_init` parameter added. + """ + source_hint = None + try: + if isinstance(source, string_types): + source_hint = source + source = self._parse(source, name, filename) + source = self._generate(source, name, filename, defer_init=defer_init) + if raw: + return source + if filename is None: + filename = "