From abea6e4dea58e7bbad8b013342ce4ad4c29c8c29 Mon Sep 17 00:00:00 2001 From: beierlm <mark.beierl@canonical.com> Date: Fri, 5 Mar 2021 07:35:08 -0500 Subject: [PATCH] Added missing source Signed-off-by: beierlm <mark.beierl@canonical.com> --- .../lib/charmhelpers/__init__.py | 97 + .../lib/charmhelpers/cli/README.rst | 57 + .../lib/charmhelpers/cli/__init__.py | 189 + .../lib/charmhelpers/cli/benchmark.py | 34 + .../lib/charmhelpers/cli/commands.py | 30 + .../lib/charmhelpers/cli/hookenv.py | 21 + .../lib/charmhelpers/cli/host.py | 29 + .../lib/charmhelpers/cli/unitdata.py | 46 + .../lib/charmhelpers/context.py | 205 ++ .../lib/charmhelpers/contrib/__init__.py | 13 + .../charmhelpers/contrib/amulet/__init__.py | 13 + .../charmhelpers/contrib/amulet/deployment.py | 99 + .../lib/charmhelpers/contrib/amulet/utils.py | 820 +++++ .../charmhelpers/contrib/ansible/__init__.py | 303 ++ .../contrib/benchmark/__init__.py | 124 + .../charmhelpers/contrib/charmhelpers/IMPORT | 4 + .../contrib/charmhelpers/__init__.py | 203 ++ .../charmhelpers/contrib/charmsupport/IMPORT | 14 + .../contrib/charmsupport/__init__.py | 13 + .../charmhelpers/contrib/charmsupport/nrpe.py | 500 +++ .../contrib/charmsupport/volumes.py | 173 + .../charmhelpers/contrib/database/__init__.py | 11 + .../charmhelpers/contrib/database/mysql.py | 821 +++++ .../contrib/hahelpers/__init__.py | 13 + .../charmhelpers/contrib/hahelpers/apache.py | 86 + .../charmhelpers/contrib/hahelpers/cluster.py | 451 +++ .../contrib/hardening/README.hardening.md | 38 + .../contrib/hardening/__init__.py | 13 + .../contrib/hardening/apache/__init__.py | 17 + .../hardening/apache/checks/__init__.py | 29 + .../contrib/hardening/apache/checks/config.py | 104 + .../apache/templates/99-hardening.conf | 32 + .../hardening/apache/templates/alias.conf | 31 + .../contrib/hardening/audits/__init__.py | 54 + .../contrib/hardening/audits/apache.py | 100 + .../contrib/hardening/audits/apt.py | 104 + .../contrib/hardening/audits/file.py | 550 +++ .../contrib/hardening/defaults/apache.yaml | 16 + .../hardening/defaults/apache.yaml.schema | 12 + .../contrib/hardening/defaults/mysql.yaml | 38 + .../hardening/defaults/mysql.yaml.schema | 15 + .../contrib/hardening/defaults/os.yaml | 68 + .../contrib/hardening/defaults/os.yaml.schema | 43 + .../contrib/hardening/defaults/ssh.yaml | 49 + .../hardening/defaults/ssh.yaml.schema | 42 + .../charmhelpers/contrib/hardening/harden.py | 96 + .../contrib/hardening/host/__init__.py | 17 + .../contrib/hardening/host/checks/__init__.py | 48 + .../contrib/hardening/host/checks/apt.py | 37 + .../contrib/hardening/host/checks/limits.py | 53 + .../contrib/hardening/host/checks/login.py | 65 + .../hardening/host/checks/minimize_access.py | 50 + .../contrib/hardening/host/checks/pam.py | 132 + .../contrib/hardening/host/checks/profile.py | 49 + .../hardening/host/checks/securetty.py | 37 + .../hardening/host/checks/suid_sgid.py | 129 + .../contrib/hardening/host/checks/sysctl.py | 209 ++ .../hardening/host/templates/10.hardcore.conf | 8 + .../hardening/host/templates/99-hardening.sh | 5 + .../host/templates/99-juju-hardening.conf | 7 + .../hardening/host/templates/login.defs | 349 ++ .../contrib/hardening/host/templates/modules | 117 + .../hardening/host/templates/passwdqc.conf | 11 + .../host/templates/pinerolo_profile.sh | 8 + .../hardening/host/templates/securetty | 11 + .../contrib/hardening/host/templates/tally2 | 14 + .../contrib/hardening/mysql/__init__.py | 17 + .../hardening/mysql/checks/__init__.py | 29 + .../contrib/hardening/mysql/checks/config.py | 87 + .../hardening/mysql/templates/hardening.cnf | 12 + .../contrib/hardening/ssh/__init__.py | 17 + .../contrib/hardening/ssh/checks/__init__.py | 29 + .../contrib/hardening/ssh/checks/config.py | 435 +++ .../hardening/ssh/templates/ssh_config | 70 + .../hardening/ssh/templates/sshd_config | 159 + .../contrib/hardening/templating.py | 73 + .../charmhelpers/contrib/hardening/utils.py | 155 + .../charmhelpers/contrib/mellanox/__init__.py | 13 + .../contrib/mellanox/infiniband.py | 153 + .../charmhelpers/contrib/network/__init__.py | 13 + .../lib/charmhelpers/contrib/network/ip.py | 602 ++++ .../contrib/network/ovs/__init__.py | 541 +++ .../charmhelpers/contrib/network/ovs/ovn.py | 233 ++ .../charmhelpers/contrib/network/ovs/ovsdb.py | 206 ++ .../charmhelpers/contrib/network/ovs/utils.py | 26 + .../lib/charmhelpers/contrib/network/ufw.py | 386 ++ .../contrib/openstack/__init__.py | 13 + .../contrib/openstack/alternatives.py | 44 + .../contrib/openstack/amulet/__init__.py | 13 + .../contrib/openstack/amulet/deployment.py | 384 ++ .../contrib/openstack/amulet/utils.py | 1593 +++++++++ .../contrib/openstack/audits/__init__.py | 212 ++ .../audits/openstack_security_guide.py | 270 ++ .../contrib/openstack/cert_utils.py | 289 ++ .../charmhelpers/contrib/openstack/context.py | 3177 +++++++++++++++++ .../contrib/openstack/exceptions.py | 21 + .../contrib/openstack/files/__init__.py | 16 + .../contrib/openstack/files/check_haproxy.sh | 34 + .../files/check_haproxy_queue_depth.sh | 30 + .../contrib/openstack/ha/__init__.py | 13 + .../contrib/openstack/ha/utils.py | 348 ++ .../lib/charmhelpers/contrib/openstack/ip.py | 197 + .../contrib/openstack/keystone.py | 178 + .../charmhelpers/contrib/openstack/neutron.py | 359 ++ .../charmhelpers/contrib/openstack/policyd.py | 801 +++++ .../contrib/openstack/ssh_migrations.py | 412 +++ .../contrib/openstack/templates/__init__.py | 16 + .../contrib/openstack/templates/ceph.conf | 24 + .../contrib/openstack/templates/git.upstart | 17 + .../contrib/openstack/templates/haproxy.cfg | 77 + .../contrib/openstack/templates/logrotate | 9 + .../openstack/templates/memcached.conf | 53 + .../templates/openstack_https_frontend | 29 + .../templates/openstack_https_frontend.conf | 29 + .../templates/section-keystone-authtoken | 12 + .../section-keystone-authtoken-legacy | 10 + .../section-keystone-authtoken-mitaka | 22 + .../section-keystone-authtoken-v3only | 9 + .../openstack/templates/section-oslo-cache | 6 + .../templates/section-oslo-messaging-rabbit | 10 + .../section-oslo-messaging-rabbit-ocata | 10 + .../templates/section-oslo-middleware | 5 + .../templates/section-oslo-notifications | 15 + .../openstack/templates/section-placement | 19 + .../openstack/templates/section-rabbitmq-oslo | 22 + .../openstack/templates/section-zeromq | 14 + .../openstack/templates/vendor_data.json | 1 + .../templates/wsgi-openstack-api.conf | 91 + .../templates/wsgi-openstack-metadata.conf | 91 + .../contrib/openstack/templating.py | 379 ++ .../charmhelpers/contrib/openstack/utils.py | 2352 ++++++++++++ .../contrib/openstack/vaultlocker.py | 179 + .../contrib/peerstorage/__init__.py | 267 ++ .../lib/charmhelpers/contrib/python.py | 21 + .../contrib/saltstack/__init__.py | 116 + .../lib/charmhelpers/contrib/ssl/__init__.py | 92 + .../lib/charmhelpers/contrib/ssl/service.py | 277 ++ .../charmhelpers/contrib/storage/__init__.py | 13 + .../contrib/storage/linux/__init__.py | 13 + .../contrib/storage/linux/bcache.py | 74 + .../contrib/storage/linux/ceph.py | 1810 ++++++++++ .../contrib/storage/linux/loopback.py | 92 + .../charmhelpers/contrib/storage/linux/lvm.py | 182 + .../contrib/storage/linux/utils.py | 128 + .../contrib/templating/__init__.py | 13 + .../contrib/templating/contexts.py | 137 + .../charmhelpers/contrib/templating/jinja.py | 38 + .../contrib/templating/pyformat.py | 27 + .../charmhelpers/contrib/unison/__init__.py | 314 ++ .../lib/charmhelpers/coordinator.py | 606 ++++ .../lib/charmhelpers/core/__init__.py | 13 + .../lib/charmhelpers/core/decorators.py | 55 + .../lib/charmhelpers/core/files.py | 43 + .../lib/charmhelpers/core/fstab.py | 132 + .../lib/charmhelpers/core/hookenv.py | 1613 +++++++++ .../lib/charmhelpers/core/host.py | 1104 ++++++ .../core/host_factory/__init__.py | 0 .../charmhelpers/core/host_factory/centos.py | 72 + .../charmhelpers/core/host_factory/ubuntu.py | 116 + .../lib/charmhelpers/core/hugepage.py | 69 + .../lib/charmhelpers/core/kernel.py | 72 + .../core/kernel_factory/__init__.py | 0 .../core/kernel_factory/centos.py | 17 + .../core/kernel_factory/ubuntu.py | 13 + .../charmhelpers/core/services/__init__.py | 16 + .../lib/charmhelpers/core/services/base.py | 362 ++ .../lib/charmhelpers/core/services/helpers.py | 290 ++ .../lib/charmhelpers/core/strutils.py | 129 + .../lib/charmhelpers/core/sysctl.py | 75 + .../lib/charmhelpers/core/templating.py | 93 + .../lib/charmhelpers/core/unitdata.py | 525 +++ .../lib/charmhelpers/fetch/__init__.py | 209 ++ .../lib/charmhelpers/fetch/archiveurl.py | 165 + .../lib/charmhelpers/fetch/bzrurl.py | 76 + .../lib/charmhelpers/fetch/centos.py | 171 + .../lib/charmhelpers/fetch/giturl.py | 69 + .../lib/charmhelpers/fetch/python/__init__.py | 13 + .../lib/charmhelpers/fetch/python/debug.py | 54 + .../lib/charmhelpers/fetch/python/packages.py | 154 + .../lib/charmhelpers/fetch/python/rpdb.py | 56 + .../lib/charmhelpers/fetch/python/version.py | 32 + .../lib/charmhelpers/fetch/snap.py | 150 + .../lib/charmhelpers/fetch/ubuntu.py | 805 +++++ .../lib/charmhelpers/fetch/ubuntu_apt_pkg.py | 267 ++ .../lib/charmhelpers/osplatform.py | 46 + .../lib/charmhelpers/payload/__init__.py | 15 + .../lib/charmhelpers/payload/archive.py | 71 + .../lib/charmhelpers/payload/execd.py | 65 + .../lib/charms/osm/libansible.py | 108 + .../vyos-config-src/lib/charms/osm/ns.py | 301 ++ .../lib/charms/osm/proxy_cluster.py | 59 + .../lib/charms/osm/sshproxy.py | 375 ++ .../vyos-config-src/lib/ops/__init__.py | 20 + .../charms/vyos-config-src/lib/ops/charm.py | 575 +++ .../vyos-config-src/lib/ops/framework.py | 1067 ++++++ .../vyos-config-src/lib/ops/jujuversion.py | 98 + .../vyos-config-src/lib/ops/lib/__init__.py | 194 + .../charms/vyos-config-src/lib/ops/log.py | 51 + .../charms/vyos-config-src/lib/ops/main.py | 348 ++ .../charms/vyos-config-src/lib/ops/model.py | 1237 +++++++ .../charms/vyos-config-src/lib/ops/storage.py | 318 ++ .../charms/vyos-config-src/lib/ops/testing.py | 586 +++ .../charms/vyos-config-src/lib/ops/version.py | 50 + 203 files changed, 39171 insertions(+) create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/README.rst create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/benchmark.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/commands.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/hookenv.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/host.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/unitdata.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/context.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/deployment.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/utils.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ansible/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/benchmark/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmhelpers/IMPORT create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmhelpers/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/IMPORT create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/nrpe.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/volumes.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/database/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/database/mysql.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/apache.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/cluster.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/README.hardening.md create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/checks/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/checks/config.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/templates/alias.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/apache.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/apt.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/file.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/apache.yaml create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/apache.yaml.schema create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/os.yaml create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/os.yaml.schema create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/harden.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/apt.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/limits.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/login.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/minimize_access.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/pam.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/profile.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/securetty.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/suid_sgid.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/sysctl.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/99-hardening.sh create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/login.defs create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/modules create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/passwdqc.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/securetty create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/tally2 create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/checks/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/checks/config.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/checks/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/checks/config.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/templates/ssh_config create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/templates/sshd_config create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/templating.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/utils.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/mellanox/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/mellanox/infiniband.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ip.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/ovn.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/ovsdb.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/utils.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ufw.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/alternatives.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/deployment.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/utils.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/audits/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/audits/openstack_security_guide.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/cert_utils.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/context.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/exceptions.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/__init__.py create mode 100755 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/check_haproxy.sh create mode 100755 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ha/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ha/utils.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ip.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/keystone.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/neutron.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/policyd.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ssh_migrations.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/ceph.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/git.upstart create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/haproxy.cfg create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/logrotate create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/memcached.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-cache create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-middleware create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-notifications create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-placement create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-zeromq create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/vendor_data.json create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templating.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/utils.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/vaultlocker.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/peerstorage/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/python.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/saltstack/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ssl/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ssl/service.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/bcache.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/ceph.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/loopback.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/lvm.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/utils.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/contexts.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/jinja.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/pyformat.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/unison/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/coordinator.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/decorators.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/files.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/fstab.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/hookenv.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/centos.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/ubuntu.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/hugepage.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/centos.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/ubuntu.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/base.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/helpers.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/strutils.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/sysctl.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/templating.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/unitdata.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/archiveurl.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/bzrurl.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/centos.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/giturl.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/debug.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/packages.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/rpdb.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/version.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/snap.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/ubuntu.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/ubuntu_apt_pkg.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/osplatform.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/archive.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/execd.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/libansible.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/ns.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/proxy_cluster.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/sshproxy.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/__init__.py create mode 100755 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/charm.py create mode 100755 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/framework.py create mode 100755 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/jujuversion.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/lib/__init__.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/log.py create mode 100755 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/main.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/model.py create mode 100755 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/storage.py create mode 100755 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/testing.py create mode 100644 hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/version.py diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/__init__.py new file mode 100644 index 00000000..61ef9071 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/__init__.py @@ -0,0 +1,97 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Bootstrap charm-helpers, installing its dependencies if necessary using +# only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect +import subprocess +import sys + +try: + import six # NOQA:F401 +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) + import six # NOQA:F401 + +try: + import yaml # NOQA:F401 +except ImportError: + if sys.version_info.major == 2: + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) + else: + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) + import yaml # NOQA:F401 + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/README.rst b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/README.rst new file mode 100644 index 00000000..f7901c09 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/README.rst @@ -0,0 +1,57 @@ +========== +Commandant +========== + +----------------------------------------------------- +Automatic command-line interfaces to Python functions +----------------------------------------------------- + +One of the benefits of ``libvirt`` is the uniformity of the interface: the C API (as well as the bindings in other languages) is a set of functions that accept parameters that are nearly identical to the command-line arguments. If you run ``virsh``, you get an interactive command prompt that supports all of the same commands that your shell scripts use as ``virsh`` subcommands. + +Command execution and stdio manipulation is the greatest common factor across all development systems in the POSIX environment. By exposing your functions as commands that manipulate streams of text, you can make life easier for all the Ruby and Erlang and Go programmers in your life. + +Goals +===== + +* Single decorator to expose a function as a command. + * now two decorators - one "automatic" and one that allows authors to manipulate the arguments for fine-grained control.(MW) +* Automatic analysis of function signature through ``inspect.getargspec()`` +* Command argument parser built automatically with ``argparse`` +* Interactive interpreter loop object made with ``Cmd`` +* Options to output structured return value data via ``pprint``, ``yaml`` or ``json`` dumps. + +Other Important Features that need writing +------------------------------------------ + +* Help and Usage documentation can be automatically generated, but it will be important to let users override this behaviour +* The decorator should allow specifying further parameters to the parser's add_argument() calls, to specify types or to make arguments behave as boolean flags, etc. + - Filename arguments are important, as good practice is for functions to accept file objects as parameters. + - choices arguments help to limit bad input before the function is called +* Some automatic behaviour could make for better defaults, once the user can override them. + - We could automatically detect arguments that default to False or True, and automatically support --no-foo for foo=True. + - We could automatically support hyphens as alternates for underscores + - Arguments defaulting to sequence types could support the ``append`` action. + + +----------------------------------------------------- +Implementing subcommands +----------------------------------------------------- + +(WIP) + +So as to avoid dependencies on the cli module, subcommands should be defined separately from their implementations. The recommmendation would be to place definitions into separate modules near the implementations which they expose. + +Some examples:: + + from charmhelpers.cli import CommandLine + from charmhelpers.payload import execd + from charmhelpers.foo import bar + + cli = CommandLine() + + cli.subcommand(execd.execd_run) + + @cli.subcommand_builder("bar", help="Bar baz qux") + def barcmd_builder(subparser): + subparser.add_argument('argument1', help="yackety") + return bar diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/__init__.py new file mode 100644 index 00000000..389b490f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/__init__.py @@ -0,0 +1,189 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import argparse +import sys + +from six.moves import zip + +import charmhelpers.core.unitdata + + +class OutputFormatter(object): + def __init__(self, outfile=sys.stdout): + self.formats = ( + "raw", + "json", + "py", + "yaml", + "csv", + "tab", + ) + self.outfile = outfile + + def add_arguments(self, argument_parser): + formatgroup = argument_parser.add_mutually_exclusive_group() + choices = self.supported_formats + formatgroup.add_argument("--format", metavar='FMT', + help="Select output format for returned data, " + "where FMT is one of: {}".format(choices), + choices=choices, default='raw') + for fmt in self.formats: + fmtfunc = getattr(self, fmt) + formatgroup.add_argument("-{}".format(fmt[0]), + "--{}".format(fmt), action='store_const', + const=fmt, dest='format', + help=fmtfunc.__doc__) + + @property + def supported_formats(self): + return self.formats + + def raw(self, output): + """Output data as raw string (default)""" + if isinstance(output, (list, tuple)): + output = '\n'.join(map(str, output)) + self.outfile.write(str(output)) + + def py(self, output): + """Output data as a nicely-formatted python data structure""" + import pprint + pprint.pprint(output, stream=self.outfile) + + def json(self, output): + """Output data in JSON format""" + import json + json.dump(output, self.outfile) + + def yaml(self, output): + """Output data in YAML format""" + import yaml + yaml.safe_dump(output, self.outfile) + + def csv(self, output): + """Output data as excel-compatible CSV""" + import csv + csvwriter = csv.writer(self.outfile) + csvwriter.writerows(output) + + def tab(self, output): + """Output data in excel-compatible tab-delimited format""" + import csv + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) + csvwriter.writerows(output) + + def format_output(self, output, fmt='raw'): + fmtfunc = getattr(self, fmt) + fmtfunc(output) + + +class CommandLine(object): + argument_parser = None + subparsers = None + formatter = None + exit_code = 0 + + def __init__(self): + if not self.argument_parser: + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') + if not self.formatter: + self.formatter = OutputFormatter() + self.formatter.add_arguments(self.argument_parser) + if not self.subparsers: + self.subparsers = self.argument_parser.add_subparsers(help='Commands') + + def subcommand(self, command_name=None): + """ + Decorate a function as a subcommand. Use its arguments as the + command-line arguments""" + def wrapper(decorated): + cmd_name = command_name or decorated.__name__ + subparser = self.subparsers.add_parser(cmd_name, + description=decorated.__doc__) + for args, kwargs in describe_arguments(decorated): + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=decorated) + return decorated + return wrapper + + def test_command(self, decorated): + """ + Subcommand is a boolean test function, so bool return values should be + converted to a 0/1 exit code. + """ + decorated._cli_test_command = True + return decorated + + def no_output(self, decorated): + """ + Subcommand is not expected to return a value, so don't print a spurious None. + """ + decorated._cli_no_output = True + return decorated + + def subcommand_builder(self, command_name, description=None): + """ + Decorate a function that builds a subcommand. Builders should accept a + single argument (the subparser instance) and return the function to be + run as the command.""" + def wrapper(decorated): + subparser = self.subparsers.add_parser(command_name) + func = decorated(subparser) + subparser.set_defaults(func=func) + subparser.description = description or func.__doc__ + return wrapper + + def run(self): + "Run cli, processing arguments and executing subcommands." + arguments = self.argument_parser.parse_args() + argspec = inspect.getargspec(arguments.func) + vargs = [] + for arg in argspec.args: + vargs.append(getattr(arguments, arg)) + if argspec.varargs: + vargs.extend(getattr(arguments, argspec.varargs)) + output = arguments.func(*vargs) + if getattr(arguments.func, '_cli_test_command', False): + self.exit_code = 0 if output else 1 + output = '' + if getattr(arguments.func, '_cli_no_output', False): + output = '' + self.formatter.format_output(output, arguments.format) + if charmhelpers.core.unitdata._KV: + charmhelpers.core.unitdata._KV.flush() + + +cmdline = CommandLine() + + +def describe_arguments(func): + """ + Analyze a function's signature and return a data structure suitable for + passing in as arguments to an argparse parser's add_argument() method.""" + + argspec = inspect.getargspec(func) + # we should probably raise an exception somewhere if func includes **kwargs + if argspec.defaults: + positional_args = argspec.args[:-len(argspec.defaults)] + keyword_names = argspec.args[-len(argspec.defaults):] + for arg, default in zip(keyword_names, argspec.defaults): + yield ('--{}'.format(arg),), {'default': default} + else: + positional_args = argspec.args + + for arg in positional_args: + yield (arg,), {} + if argspec.varargs: + yield (argspec.varargs,), {'nargs': '*'} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/benchmark.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/benchmark.py new file mode 100644 index 00000000..303af14b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/benchmark.py @@ -0,0 +1,34 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.contrib.benchmark import Benchmark + + +@cmdline.subcommand(command_name='benchmark-start') +def start(): + Benchmark.start() + + +@cmdline.subcommand(command_name='benchmark-finish') +def finish(): + Benchmark.finish() + + +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") +def service(subparser): + subparser.add_argument("value", help="The composite score.") + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") + return Benchmark.set_composite_score diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/commands.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/commands.py new file mode 100644 index 00000000..b9310565 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/commands.py @@ -0,0 +1,30 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module loads sub-modules into the python runtime so they can be +discovered via the inspect module. In order to prevent flake8 from (rightfully) +telling us these are unused modules, throw a ' # noqa' at the end of each import +so that the warning is suppressed. +""" + +from . import CommandLine # noqa + +""" +Import the sub-modules which have decorated subcommands to register with chlp. +""" +from . import host # noqa +from . import benchmark # noqa +from . import unitdata # noqa +from . import hookenv # noqa diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/hookenv.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/hookenv.py new file mode 100644 index 00000000..bd72f448 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/hookenv.py @@ -0,0 +1,21 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.core import hookenv + + +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) +cmdline.subcommand('service-name')(hookenv.service_name) +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/host.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/host.py new file mode 100644 index 00000000..40396849 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/host.py @@ -0,0 +1,29 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.core import host + + +@cmdline.subcommand() +def mounts(): + "List mounts" + return host.mounts() + + +@cmdline.subcommand_builder('service', description="Control system services") +def service(subparser): + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") + subparser.add_argument("service_name", help="Name of the service to control") + return host.service diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/unitdata.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/unitdata.py new file mode 100644 index 00000000..acce846f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/cli/unitdata.py @@ -0,0 +1,46 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import cmdline +from charmhelpers.core import unitdata + + +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") +def unitdata_cmd(subparser): + nested = subparser.add_subparsers() + + get_cmd = nested.add_parser('get', help='Retrieve data') + get_cmd.add_argument('key', help='Key to retrieve the value of') + get_cmd.set_defaults(action='get', value=None) + + getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data') + getrange_cmd.add_argument('key', metavar='prefix', + help='Prefix of the keys to retrieve') + getrange_cmd.set_defaults(action='getrange', value=None) + + set_cmd = nested.add_parser('set', help='Store data') + set_cmd.add_argument('key', help='Key to set') + set_cmd.add_argument('value', help='Value to store') + set_cmd.set_defaults(action='set') + + def _unitdata_cmd(action, key, value): + if action == 'get': + return unitdata.kv().get(key) + elif action == 'getrange': + return unitdata.kv().getrange(key) + elif action == 'set': + unitdata.kv().set(key, value) + unitdata.kv().flush() + return '' + return _unitdata_cmd diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/context.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/context.py new file mode 100644 index 00000000..01864740 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/context.py @@ -0,0 +1,205 @@ +# Copyright 2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +A Pythonic API to interact with the charm hook environment. + +:author: Stuart Bishop <stuart.bishop@canonical.com> +''' + +import six + +from charmhelpers.core import hookenv + +from collections import OrderedDict +if six.PY3: + from collections import UserDict # pragma: nocover +else: + from UserDict import IterableUserDict as UserDict # pragma: nocover + + +class Relations(OrderedDict): + '''Mapping relation name -> relation id -> Relation. + + >>> rels = Relations() + >>> rels['sprog']['sprog:12']['client/6']['widget'] + 'remote widget' + >>> rels['sprog']['sprog:12'].local['widget'] = 'local widget' + >>> rels['sprog']['sprog:12'].local['widget'] + 'local widget' + >>> rels.peer.local['widget'] + 'local widget on the peer relation' + ''' + def __init__(self): + super(Relations, self).__init__() + for relname in sorted(hookenv.relation_types()): + self[relname] = OrderedDict() + relids = hookenv.relation_ids(relname) + relids.sort(key=lambda x: int(x.split(':', 1)[-1])) + for relid in relids: + self[relname][relid] = Relation(relid) + + @property + def peer(self): + peer_relid = hookenv.peer_relation_id() + for rels in self.values(): + if peer_relid in rels: + return rels[peer_relid] + + +class Relation(OrderedDict): + '''Mapping of unit -> remote RelationInfo for a relation. + + This is an OrderedDict mapping, ordered numerically by + by unit number. + + Also provides access to the local RelationInfo, and peer RelationInfo + instances by the 'local' and 'peers' attributes. + + >>> r = Relation('sprog:12') + >>> r.keys() + ['client/9', 'client/10'] # Ordered numerically + >>> r['client/10']['widget'] # A remote RelationInfo setting + 'remote widget' + >>> r.local['widget'] # The local RelationInfo setting + 'local widget' + ''' + relid = None # The relation id. + relname = None # The relation name (also known as relation type). + service = None # The remote service name, if known. + local = None # The local end's RelationInfo. + peers = None # Map of peer -> RelationInfo. None if no peer relation. + + def __init__(self, relid): + remote_units = hookenv.related_units(relid) + remote_units.sort(key=lambda u: int(u.split('/', 1)[-1])) + super(Relation, self).__init__((unit, RelationInfo(relid, unit)) + for unit in remote_units) + + self.relname = relid.split(':', 1)[0] + self.relid = relid + self.local = RelationInfo(relid, hookenv.local_unit()) + + for relinfo in self.values(): + self.service = relinfo.service + break + + # If we have peers, and they have joined both the provided peer + # relation and this relation, we can peek at their data too. + # This is useful for creating consensus without leadership. + peer_relid = hookenv.peer_relation_id() + if peer_relid and peer_relid != relid: + peers = hookenv.related_units(peer_relid) + if peers: + peers.sort(key=lambda u: int(u.split('/', 1)[-1])) + self.peers = OrderedDict((peer, RelationInfo(relid, peer)) + for peer in peers) + else: + self.peers = OrderedDict() + else: + self.peers = None + + def __str__(self): + return '{} ({})'.format(self.relid, self.service) + + +class RelationInfo(UserDict): + '''The bag of data at an end of a relation. + + Every unit participating in a relation has a single bag of + data associated with that relation. This is that bag. + + The bag of data for the local unit may be updated. Remote data + is immutable and will remain static for the duration of the hook. + + Changes made to the local units relation data only become visible + to other units after the hook completes successfully. If the hook + does not complete successfully, the changes are rolled back. + + Unlike standard Python mappings, setting an item to None is the + same as deleting it. + + >>> relinfo = RelationInfo('db:12') # Default is the local unit. + >>> relinfo['user'] = 'fred' + >>> relinfo['user'] + 'fred' + >>> relinfo['user'] = None + >>> 'fred' in relinfo + False + + This class wraps hookenv.relation_get and hookenv.relation_set. + All caching is left up to these two methods to avoid synchronization + issues. Data is only loaded on demand. + ''' + relid = None # The relation id. + relname = None # The relation name (also know as the relation type). + unit = None # The unit id. + number = None # The unit number (integer). + service = None # The service name. + + def __init__(self, relid, unit): + self.relname = relid.split(':', 1)[0] + self.relid = relid + self.unit = unit + self.service, num = self.unit.split('/', 1) + self.number = int(num) + + def __str__(self): + return '{} ({})'.format(self.relid, self.unit) + + @property + def data(self): + return hookenv.relation_get(rid=self.relid, unit=self.unit) + + def __setitem__(self, key, value): + if self.unit != hookenv.local_unit(): + raise TypeError('Attempting to set {} on remote unit {}' + ''.format(key, self.unit)) + if value is not None and not isinstance(value, six.string_types): + # We don't do implicit casting. This would cause simple + # types like integers to be read back as strings in subsequent + # hooks, and mutable types would require a lot of wrapping + # to ensure relation-set gets called when they are mutated. + raise ValueError('Only string values allowed') + hookenv.relation_set(self.relid, {key: value}) + + def __delitem__(self, key): + # Deleting a key and setting it to null is the same thing in + # Juju relations. + self[key] = None + + +class Leader(UserDict): + def __init__(self): + pass # Don't call superclass initializer, as it will nuke self.data + + @property + def data(self): + return hookenv.leader_get() + + def __setitem__(self, key, value): + if not hookenv.is_leader(): + raise TypeError('Not the leader. Cannot change leader settings.') + if value is not None and not isinstance(value, six.string_types): + # We don't do implicit casting. This would cause simple + # types like integers to be read back as strings in subsequent + # hooks, and mutable types would require a lot of wrapping + # to ensure leader-set gets called when they are mutated. + raise ValueError('Only string values allowed') + hookenv.leader_set({key: value}) + + def __delitem__(self, key): + # Deleting a key and setting it to null is the same thing in + # Juju leadership settings. + self[key] = None diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/deployment.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 00000000..d21d01d8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,99 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import os +import six + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints'), + storage=this_service.get('storage')) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints'), + storage=svc.get('storage')) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in six.iteritems(relations): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900)) + try: + self.d.setup(timeout=timeout) + self.d.sentry.wait(timeout=timeout) + except amulet.helpers.TimeoutError: + amulet.raise_status( + amulet.FAIL, + msg="Deployment timed out ({}s)".format(timeout) + ) + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/utils.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 00000000..54283088 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,820 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import json +import logging +import os +import re +import socket +import subprocess +import sys +import time +import uuid + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.ssh(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg + + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding + service units. + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + + for k, v in six.iteritems(commands): + for cmd in v: + output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name in ['rabbitmq-server', 'apache2', + 'memcached']): + # init is systemd (or regular sysv) + cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output + + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = configparser.ConfigParser(allow_no_value=True) + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + + for k, v in six.iteritems(expected): + if k in actual: + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. + + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + pid_list = self.get_process_id_list( + sentry_unit, service, pgrep_full=pgrep_full) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=None, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=None, sleep_time=20, + retry_count=30, retry_sleep_time=10): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) + time.sleep(sleep_time) + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed\n{}'.format(tries, service, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) + return True + else: + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Initial sleep time (s) before looking for file + retry_sleep_time (int): Time (s) to sleep between retries + retry_count (int): If file is not found, how many times to retry + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, or if file not found. + """ + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s updated since %s on ' + '%s' % (filename, mtime, unit_name)) + time.sleep(sleep_time) + file_mtime = None + tries = 0 + while tries <= retry_count and not file_mtime: + try: + file_mtime = self._get_file_mtime(sentry_unit, filename) + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'OK'.format(tries, filename, unit_name)) + except IOError as e: + # NOTE(beisner) - race avoidance, file may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} file mtime on {} ' + 'failed\n{}'.format(tries, filename, + unit_name, e)) + time.sleep(retry_sleep_time) + tries += 1 + + if not file_mtime: + self.log.warn('Could not determine file mtime, assuming ' + 'file does not exist') + return False + + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s) on %s (OK)' % (file_mtime, + mtime, unit_name)) + return True + else: + self.log.warn('File mtime is older than provided mtime' + '(%s < on %s) on %s' % (file_mtime, + mtime, unit_name)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=None, + sleep_time=20, retry_count=30, + retry_sleep_time=10): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers + retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return None + + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True, pgrep_full=False): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Amulet sentry instance (juju unit) + :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. + :returns: List of process IDs + """ + if pgrep_full: + cmd = 'pgrep -f "{}"'.format(process_name) + else: + cmd = 'pidof -x "{}"'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output).split() + + def get_unit_process_ids( + self, unit_processes, expect_success=True, pgrep_full=False): + """Construct a dict containing unit sentries, process names, and + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ + pid_dict = {} + for sentry_unit, process_list in six.iteritems(unit_processes): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success, + pgrep_full=pgrep_full) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + + for (e_sentry, e_proc_names) in six.iteritems(expected): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + + for (e_proc_name, e_pids), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + + a_pids_length = len(a_pids) + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids, a_pids_length, + a_pids)) + + # If expected is a list, ensure at least one PID quantity match + if isinstance(e_pids, list) and \ + a_pids_length not in e_pids: + return fail_msg + # If expected is not bool and not list, + # ensure PID quantities match + elif not isinstance(e_pids, bool) and \ + not isinstance(e_pids, list) and \ + a_pids_length != e_pids: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids, bool) and \ + e_pids is False and a_pids_length != 0: + return fail_msg + else: + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids, a_pids)) + return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + self.log.debug('Dicts within list are identical') + else: + return 'Dicts within list are not identical' + + return None + + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + + # amulet juju action helpers: + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output, + params=None): + """Translate to amulet's built in run_action(). Deprecated. + + Run the named action on a given unit sentry. + + params a dict of parameters to use + _check_output parameter is no longer used + + @return action_id. + """ + self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been ' + 'deprecated for amulet.run_action') + return unit_sentry.run_action(action, action_args=params) + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + action_id a string action uuid + _check_output parameter is no longer used + """ + data = amulet.actions.get_action_output(action_id, full_output=True) + return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ansible/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ansible/__init__.py new file mode 100644 index 00000000..824780ac --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ansible/__init__.py @@ -0,0 +1,303 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers <juju@lists.ubuntu.com> +""" +The ansible package enables you to easily use the configuration management +tool `Ansible`_ to setup and configure your charm. All of your charm +configuration options and relation-data are available as regular Ansible +variables which can be used in your playbooks and templates. + +.. _Ansible: https://www.ansible.com/ + +Usage +===== + +Here is an example directory structure for a charm to get you started:: + + charm-ansible-example/ + |-- ansible + | |-- playbook.yaml + | `-- templates + | `-- example.j2 + |-- config.yaml + |-- copyright + |-- icon.svg + |-- layer.yaml + |-- metadata.yaml + |-- reactive + | `-- example.py + |-- README.md + +Running a playbook called ``playbook.yaml`` when the ``install`` hook is run +can be as simple as:: + + from charmhelpers.contrib import ansible + from charms.reactive import hook + + @hook('install') + def install(): + ansible.install_ansible_support() + ansible.apply_playbook('ansible/playbook.yaml') + +Here is an example playbook that uses the ``template`` module to template the +file ``example.j2`` to the charm host and then uses the ``debug`` module to +print out all the host and Juju variables that you can use in your playbooks. +Note that you must target ``localhost`` as the playbook is run locally on the +charm host:: + + --- + - hosts: localhost + tasks: + - name: Template a file + template: + src: templates/example.j2 + dest: /tmp/example.j2 + + - name: Print all variables available to Ansible + debug: + var: vars + +Read more online about `playbooks`_ and standard Ansible `modules`_. + +.. _playbooks: https://docs.ansible.com/ansible/latest/user_guide/playbooks.html +.. _modules: https://docs.ansible.com/ansible/latest/user_guide/modules.html + +A further feature of the Ansible hooks is to provide a light weight "action" +scripting tool. This is a decorator that you apply to a function, and that +function can now receive cli args, and can pass extra args to the playbook:: + + @hooks.action() + def some_action(amount, force="False"): + "Usage: some-action AMOUNT [force=True]" # <-- shown on error + # process the arguments + # do some calls + # return extra-vars to be passed to ansible-playbook + return { + 'amount': int(amount), + 'type': force, + } + +You can now create a symlink to hooks.py that can be invoked like a hook, but +with cli params:: + + # link actions/some-action to hooks/hooks.py + + actions/some-action amount=10 force=true + +Install Ansible via pip +======================= + +If you want to install a specific version of Ansible via pip instead of +``install_ansible_support`` which uses APT, consider using the layer options +of `layer-basic`_ to install Ansible in a virtualenv:: + + options: + basic: + python_packages: ['ansible==2.9.0'] + include_system_packages: true + use_venv: true + +.. _layer-basic: https://charmsreactive.readthedocs.io/en/latest/layer-basic.html#layer-configuration + +""" +import os +import json +import stat +import subprocess +import functools + +import charmhelpers.contrib.templating.contexts +import charmhelpers.core.host +import charmhelpers.core.hookenv +import charmhelpers.fetch + + +charm_dir = os.environ.get('CHARM_DIR', '') +ansible_hosts_path = '/etc/ansible/hosts' +# Ansible will automatically include any vars in the following +# file in its inventory when run locally. +ansible_vars_path = '/etc/ansible/host_vars/localhost' + + +def install_ansible_support(from_ppa=True, ppa_location='ppa:ansible/ansible'): + """Installs Ansible via APT. + + By default this installs Ansible from the `PPA`_ linked from + the Ansible `website`_ or from a PPA set in ``ppa_location``. + + .. _PPA: https://launchpad.net/~ansible/+archive/ubuntu/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If ``from_ppa`` is ``False``, then Ansible will be installed from + Ubuntu's Universe repositories. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install('ansible') + with open(ansible_hosts_path, 'w+') as hosts_file: + hosts_file.write('localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp') + + +def apply_playbook(playbook, tags=None, extra_vars=None): + """Run a playbook. + + This helper runs a playbook with juju state variables as context, + therefore variables set in application config can be used directly. + List of tags (--tags) and dictionary with extra_vars (--extra-vars) + can be passed as additional parameters. + + Read more about playbook `_variables`_ online. + + .. _variables: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html + + Example:: + + # Run ansible/playbook.yaml with tag install and pass extra + # variables var_a and var_b + apply_playbook( + playbook='ansible/playbook.yaml', + tags=['install'], + extra_vars={'var_a': 'val_a', 'var_b': 'val_b'} + ) + + # Run ansible/playbook.yaml with tag config and extra variable nested, + # which is passed as json and can be used as dictionary in playbook + apply_playbook( + playbook='ansible/playbook.yaml', + tags=['config'], + extra_vars={'nested': {'a': 'value1', 'b': 'value2'}} + ) + + # Custom config file can be passed within extra_vars + apply_playbook( + playbook='ansible/playbook.yaml', + extra_vars="@some_file.json" + ) + + """ + tags = tags or [] + tags = ",".join(tags) + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + ansible_vars_path, namespace_separator='__', + allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR)) + + # we want ansible's log output to be unbuffered + env = os.environ.copy() + env['PYTHONUNBUFFERED'] = "1" + call = [ + 'ansible-playbook', + '-c', + 'local', + playbook, + ] + if tags: + call.extend(['--tags', '{}'.format(tags)]) + if extra_vars: + call.extend(['--extra-vars', json.dumps(extra_vars)]) + subprocess.check_call(call, env=env) + + +class AnsibleHooks(charmhelpers.core.hookenv.Hooks): + """Run a playbook with the hook-name as the tag. + + This helper builds on the standard hookenv.Hooks helper, + but additionally runs the playbook with the hook-name specified + using --tags (ie. running all the tasks tagged with the hook-name). + + Example:: + + hooks = AnsibleHooks(playbook_path='ansible/my_machine_state.yaml') + + # All the tasks within my_machine_state.yaml tagged with 'install' + # will be run automatically after do_custom_work() + @hooks.hook() + def install(): + do_custom_work() + + # For most of your hooks, you won't need to do anything other + # than run the tagged tasks for the hook: + @hooks.hook('config-changed', 'start', 'stop') + def just_use_playbook(): + pass + + # As a convenience, you can avoid the above noop function by specifying + # the hooks which are handled by ansible-only and they'll be registered + # for you: + # hooks = AnsibleHooks( + # 'ansible/my_machine_state.yaml', + # default_hooks=['config-changed', 'start', 'stop']) + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, playbook_path, default_hooks=None): + """Register any hooks handled by ansible.""" + super(AnsibleHooks, self).__init__() + + self._actions = {} + self.playbook_path = playbook_path + + default_hooks = default_hooks or [] + + def noop(*args, **kwargs): + pass + + for hook in default_hooks: + self.register(hook, noop) + + def register_action(self, name, function): + """Register a hook""" + self._actions[name] = function + + def execute(self, args): + """Execute the hook followed by the playbook using the hook as tag.""" + hook_name = os.path.basename(args[0]) + extra_vars = None + if hook_name in self._actions: + extra_vars = self._actions[hook_name](args[1:]) + else: + super(AnsibleHooks, self).execute(args) + + charmhelpers.contrib.ansible.apply_playbook( + self.playbook_path, tags=[hook_name], extra_vars=extra_vars) + + def action(self, *action_names): + """Decorator, registering them as actions""" + def action_wrapper(decorated): + + @functools.wraps(decorated) + def wrapper(argv): + kwargs = dict(arg.split('=') for arg in argv) + try: + return decorated(**kwargs) + except TypeError as e: + if decorated.__doc__: + e.args += (decorated.__doc__,) + raise + + self.register_action(decorated.__name__, wrapper) + if '_' in decorated.__name__: + self.register_action( + decorated.__name__.replace('_', '-'), wrapper) + + return wrapper + + return action_wrapper diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/benchmark/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/benchmark/__init__.py new file mode 100644 index 00000000..c35f7fe7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/benchmark/__init__.py @@ -0,0 +1,124 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import time +import os +from distutils.spawn import find_executable + +from charmhelpers.core.hookenv import ( + in_relation_hook, + relation_ids, + relation_set, + relation_get, +) + + +def action_set(key, val): + if find_executable('action-set'): + action_cmd = ['action-set'] + + if isinstance(val, dict): + for k, v in iter(val.items()): + action_set('%s.%s' % (key, k), v) + return True + + action_cmd.append('%s=%s' % (key, val)) + subprocess.check_call(action_cmd) + return True + return False + + +class Benchmark(): + """ + Helper class for the `benchmark` interface. + + :param list actions: Define the actions that are also benchmarks + + From inside the benchmark-relation-changed hook, you would + Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom']) + + Examples: + + siege = Benchmark(['siege']) + siege.start() + [... run siege ...] + # The higher the score, the better the benchmark + siege.set_composite_score(16.70, 'trans/sec', 'desc') + siege.finish() + + + """ + + BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing + + required_keys = [ + 'hostname', + 'port', + 'graphite_port', + 'graphite_endpoint', + 'api_port' + ] + + def __init__(self, benchmarks=None): + if in_relation_hook(): + if benchmarks is not None: + for rid in sorted(relation_ids('benchmark')): + relation_set(relation_id=rid, relation_settings={ + 'benchmarks': ",".join(benchmarks) + }) + + # Check the relation data + config = {} + for key in self.required_keys: + val = relation_get(key) + if val is not None: + config[key] = val + else: + # We don't have all of the required keys + config = {} + break + + if len(config): + with open(self.BENCHMARK_CONF, 'w') as f: + for key, val in iter(config.items()): + f.write("%s=%s\n" % (key, val)) + + @staticmethod + def start(): + action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + """ + If the collectd charm is also installed, tell it to send a snapshot + of the current profile data. + """ + COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' + if os.path.exists(COLLECT_PROFILE_DATA): + subprocess.check_output([COLLECT_PROFILE_DATA]) + + @staticmethod + def finish(): + action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + @staticmethod + def set_composite_score(value, units, direction='asc'): + """ + Set the composite score for a benchmark run. This is a single number + representative of the benchmark results. This could be the most + important metric, or an amalgamation of metric scores. + """ + return action_set( + "meta.composite", + {'value': value, 'units': units, 'direction': direction} + ) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmhelpers/IMPORT b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmhelpers/IMPORT new file mode 100644 index 00000000..d41cb041 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmhelpers/IMPORT @@ -0,0 +1,4 @@ +Source lp:charm-tools/trunk + +charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py +charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmhelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmhelpers/__init__.py new file mode 100644 index 00000000..ed63e812 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmhelpers/__init__.py @@ -0,0 +1,203 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa + +import operator +import tempfile +import time +import yaml +import subprocess + +import six +if six.PY3: + from urllib.request import urlopen + from urllib.error import (HTTPError, URLError) +else: + from urllib2 import (urlopen, HTTPError, URLError) + +"""Helper functions for writing Juju charms in Python.""" + +__metaclass__ = type +__all__ = [ + # 'get_config', # core.hookenv.config() + # 'log', # core.hookenv.log() + # 'log_entry', # core.hookenv.log() + # 'log_exit', # core.hookenv.log() + # 'relation_get', # core.hookenv.relation_get() + # 'relation_set', # core.hookenv.relation_set() + # 'relation_ids', # core.hookenv.relation_ids() + # 'relation_list', # core.hookenv.relation_units() + # 'config_get', # core.hookenv.config() + # 'unit_get', # core.hookenv.unit_get() + # 'open_port', # core.hookenv.open_port() + # 'close_port', # core.hookenv.close_port() + # 'service_control', # core.host.service() + 'unit_info', # client-side, NOT IMPLEMENTED + 'wait_for_machine', # client-side, NOT IMPLEMENTED + 'wait_for_page_contents', # client-side, NOT IMPLEMENTED + 'wait_for_relation', # client-side, NOT IMPLEMENTED + 'wait_for_unit', # client-side, NOT IMPLEMENTED +] + + +SLEEP_AMOUNT = 0.1 + + +# We create a juju_status Command here because it makes testing much, +# much easier. +def juju_status(): + subprocess.check_call(['juju', 'status']) + +# re-implemented as charmhelpers.fetch.configure_sources() +# def configure_source(update=False): +# source = config_get('source') +# if ((source.startswith('ppa:') or +# source.startswith('cloud:') or +# source.startswith('http:'))): +# run('add-apt-repository', source) +# if source.startswith("http:"): +# run('apt-key', 'import', config_get('key')) +# if update: +# run('apt-get', 'update') + + +# DEPRECATED: client-side only +def make_charm_config_file(charm_config): + charm_config_file = tempfile.NamedTemporaryFile(mode='w+') + charm_config_file.write(yaml.dump(charm_config)) + charm_config_file.flush() + # The NamedTemporaryFile instance is returned instead of just the name + # because we want to take advantage of garbage collection-triggered + # deletion of the temp file when it goes out of scope in the caller. + return charm_config_file + + +# DEPRECATED: client-side only +def unit_info(service_name, item_name, data=None, unit=None): + if data is None: + data = yaml.safe_load(juju_status()) + service = data['services'].get(service_name) + if service is None: + # XXX 2012-02-08 gmb: + # This allows us to cope with the race condition that we + # have between deploying a service and having it come up in + # `juju status`. We could probably do with cleaning it up so + # that it fails a bit more noisily after a while. + return '' + units = service['units'] + if unit is not None: + item = units[unit][item_name] + else: + # It might seem odd to sort the units here, but we do it to + # ensure that when no unit is specified, the first unit for the + # service (or at least the one with the lowest number) is the + # one whose data gets returned. + sorted_unit_names = sorted(units.keys()) + item = units[sorted_unit_names[0]][item_name] + return item + + +# DEPRECATED: client-side only +def get_machine_data(): + return yaml.safe_load(juju_status())['machines'] + + +# DEPRECATED: client-side only +def wait_for_machine(num_machines=1, timeout=300): + """Wait `timeout` seconds for `num_machines` machines to come up. + + This wait_for... function can be called by other wait_for functions + whose timeouts might be too short in situations where only a bare + Juju setup has been bootstrapped. + + :return: A tuple of (num_machines, time_taken). This is used for + testing. + """ + # You may think this is a hack, and you'd be right. The easiest way + # to tell what environment we're working in (LXC vs EC2) is to check + # the dns-name of the first machine. If it's localhost we're in LXC + # and we can just return here. + if get_machine_data()[0]['dns-name'] == 'localhost': + return 1, 0 + start_time = time.time() + while True: + # Drop the first machine, since it's the Zookeeper and that's + # not a machine that we need to wait for. This will only work + # for EC2 environments, which is why we return early above if + # we're in LXC. + machine_data = get_machine_data() + non_zookeeper_machines = [ + machine_data[key] for key in list(machine_data.keys())[1:]] + if len(non_zookeeper_machines) >= num_machines: + all_machines_running = True + for machine in non_zookeeper_machines: + if machine.get('instance-state') != 'running': + all_machines_running = False + break + if all_machines_running: + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for service to start') + time.sleep(SLEEP_AMOUNT) + return num_machines, time.time() - start_time + + +# DEPRECATED: client-side only +def wait_for_unit(service_name, timeout=480): + """Wait `timeout` seconds for a given service name to come up.""" + wait_for_machine(num_machines=1) + start_time = time.time() + while True: + state = unit_info(service_name, 'agent-state') + if 'error' in state or state == 'started': + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for service to start') + time.sleep(SLEEP_AMOUNT) + if state != 'started': + raise RuntimeError('unit did not start, agent-state: ' + state) + + +# DEPRECATED: client-side only +def wait_for_relation(service_name, relation_name, timeout=120): + """Wait `timeout` seconds for a given relation to come up.""" + start_time = time.time() + while True: + relation = unit_info(service_name, 'relations').get(relation_name) + if relation is not None and relation['state'] == 'up': + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for relation to be up') + time.sleep(SLEEP_AMOUNT) + + +# DEPRECATED: client-side only +def wait_for_page_contents(url, contents, timeout=120, validate=None): + if validate is None: + validate = operator.contains + start_time = time.time() + while True: + try: + stream = urlopen(url) + except (HTTPError, URLError): + pass + else: + page = stream.read() + if validate(page, contents): + return page + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for contents of ' + url) + time.sleep(SLEEP_AMOUNT) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/IMPORT b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/IMPORT new file mode 100644 index 00000000..554fddda --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/IMPORT @@ -0,0 +1,14 @@ +Source: lp:charmsupport/trunk + +charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py +charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py +charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py +charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py +charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py + +charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py +charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py +charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py +charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py + +charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/nrpe.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 00000000..d775861b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,500 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood <matthew.wedgwood@canonical.com> + +import subprocess +import pwd +import grp +import os +import glob +import shutil +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + hook_name, + local_unit, + log, + relation_get, + relation_ids, + relation_set, + relations_of_type, +) + +from charmhelpers.core.host import service +from charmhelpers.core import host + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 4.a If your charm is a subordinate charm set primary=False +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE(primary=False) +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_.@]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _get_check_filename(self): + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) + + def _get_service_filename(self, hostname): + return os.path.join(NRPE.nagios_exportdir, + 'service__{}_{}.cfg'.format(hostname, self.command)) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def _remove_service_files(self): + if not os.path.exists(NRPE.nagios_exportdir): + return + for f in os.listdir(NRPE.nagios_exportdir): + if f.endswith('_{}.cfg'.format(self.command)): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + def remove(self, hostname): + nrpe_check_file = self._get_check_filename() + if os.path.exists(nrpe_check_file): + os.remove(nrpe_check_file) + self._remove_service_files() + + def write(self, nagios_context, hostname, nagios_servicegroups): + nrpe_check_file = self._get_check_filename() + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + if nagios_servicegroups: + nrpe_check_config.write( + "# The following header was added automatically by juju\n") + nrpe_check_config.write( + "# Modifying it will affect nagios monitoring and alerting\n") + nrpe_check_config.write( + "# servicegroups: {}\n".format(nagios_servicegroups)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) + + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups): + self._remove_service_files() + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_servicegroups, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = self._get_service_filename(hostname) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server + + def __init__(self, hostname=None, primary=True): + super(NRPE, self).__init__() + self.config = config() + self.primary = primary + self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = self.nagios_context + self.unit_name = local_unit().replace('/', '-') + if hostname: + self.hostname = hostname + else: + nagios_hostname = get_nagios_hostname() + if nagios_hostname: + self.hostname = nagios_hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + # Iff in an nrpe-external-master relation hook, set primary status + relation = relation_ids('nrpe-external-master') + if relation: + log("Setting charm primary status {}".format(primary)) + for rid in relation: + relation_set(relation_id=rid, relation_settings={'primary': self.primary}) + self.remove_check_queue = set() + + def add_check(self, *args, **kwargs): + shortname = None + if kwargs.get('shortname') is None: + if len(args) > 0: + shortname = args[0] + else: + shortname = kwargs['shortname'] + + self.checks.append(Check(*args, **kwargs)) + try: + self.remove_check_queue.remove(shortname) + except KeyError: + pass + + def remove_check(self, *args, **kwargs): + if kwargs.get('shortname') is None: + raise ValueError('shortname of check must be specified') + + # Use sensible defaults if they're not specified - these are not + # actually used during removal, but they're required for constructing + # the Check object; check_disk is chosen because it's part of the + # nagios-plugins-basic package. + if kwargs.get('check_cmd') is None: + kwargs['check_cmd'] = 'check_disk' + if kwargs.get('description') is None: + kwargs['description'] = '' + + check = Check(*args, **kwargs) + check.remove(self.hostname) + self.remove_check_queue.add(kwargs['shortname']) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except Exception: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + # update-status hooks are configured to firing every 5 minutes by + # default. When nagios-nrpe-server is restarted, the nagios server + # reports checks failing causing unnecessary alerts. Let's not restart + # on update-status hooks. + if not hook_name() == 'update-status': + service('restart', 'nagios-nrpe-server') + + monitor_ids = relation_ids("local-monitors") + \ + relation_ids("nrpe-external-master") + for rid in monitor_ids: + reldata = relation_get(unit=local_unit(), rid=rid) + if 'monitors' in reldata: + # update the existing set of monitors with the new data + old_monitors = yaml.safe_load(reldata['monitors']) + old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] + # remove keys that are in the remove_check_queue + old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() + if k not in self.remove_check_queue} + # update/add nrpe_monitors + old_nrpe_monitors.update(nrpe_monitors) + old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors + # write back to the relation + relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) + else: + # write a brand new set of monitors, as no existing ones. + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + self.remove_check_queue.clear() + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_host_context' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + :param bool immediate_check: For sysv init, run the service check immediately + """ + for svc in services: + # Don't add a check for these services from neutron-gateway + if svc in ['ext-port', 'os-charm-phy-nic-mtu']: + next + + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + + if host.init_is_systemd(): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_systemd.py %s' % svc + ) + elif os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc) + croncmd = ( + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-e -s /etc/init.d/%s status' % svc + ) + cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='service check {%s}' % unit_name, + check_cmd='check_status_file.py -f %s' % checkpath, + ) + # if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail + # (LP: #1670223). + if immediate_check and os.path.isdir(nrpe.homedir): + f = open(checkpath, 'w') + subprocess.call( + croncmd.split(), + stdout=f, + stderr=subprocess.STDOUT + ) + f.close() + os.chmod(checkpath, 0o644) + + +def copy_nrpe_checks(nrpe_files_dir=None): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') + + +def remove_deprecated_check(nrpe, deprecated_services): + """ + Remove checks fro deprecated services in list + + :param nrpe: NRPE object to remove check from + :type nrpe: NRPE + :param deprecated_services: List of deprecated services that are removed + :type deprecated_services: list + """ + for dep_svc in deprecated_services: + log('Deprecated service: {}'.format(dep_svc)) + nrpe.remove_check(shortname=dep_svc) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/volumes.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 00000000..7ea43f08 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,173 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas:: + + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage:: + + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) + +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/database/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/database/__init__.py new file mode 100644 index 00000000..64fac9de --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/database/__init__.py @@ -0,0 +1,11 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/database/mysql.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/database/mysql.py new file mode 100644 index 00000000..c9ecce5f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/database/mysql.py @@ -0,0 +1,821 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helper for working with a MySQL database""" +import collections +import copy +import json +import re +import sys +import platform +import os +import glob +import six + +# from string import upper + +from charmhelpers.core.host import ( + CompareHostReleases, + lsb_release, + mkdir, + pwgen, + write_file +) +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + related_units, + unit_get, + log, + DEBUG, + INFO, + WARNING, + leader_get, + leader_set, + is_leader, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, + filter_installed_packages, +) +from charmhelpers.contrib.network.ip import get_host_ip + +try: + import MySQLdb +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install(filter_installed_packages(['python-mysqldb']), fatal=True) + else: + apt_install(filter_installed_packages(['python3-mysqldb']), fatal=True) + import MySQLdb + + +class MySQLSetPasswordError(Exception): + pass + + +class MySQLHelper(object): + + def __init__(self, rpasswdf_template, upasswdf_template, host='localhost', + migrate_passwd_to_leader_storage=True, + delete_ondisk_passwd_file=True, user="root", password=None, port=None): + self.user = user + self.host = host + self.password = password + self.port = port + + # Password file path templates + self.root_passwd_file_template = rpasswdf_template + self.user_passwd_file_template = upasswdf_template + + self.migrate_passwd_to_leader_storage = migrate_passwd_to_leader_storage + # If we migrate we have the option to delete local copy of root passwd + self.delete_ondisk_passwd_file = delete_ondisk_passwd_file + self.connection = None + + def connect(self, user='root', password=None, host=None, port=None): + _connection_info = { + "user": user or self.user, + "passwd": password or self.password, + "host": host or self.host + } + # port cannot be None but we also do not want to specify it unless it + # has been explicit set. + port = port or self.port + if port is not None: + _connection_info["port"] = port + + log("Opening db connection for %s@%s" % (user, host), level=DEBUG) + self.connection = MySQLdb.connect(**_connection_info) + + def database_exists(self, db_name): + cursor = self.connection.cursor() + try: + cursor.execute("SHOW DATABASES") + databases = [i[0] for i in cursor.fetchall()] + finally: + cursor.close() + + return db_name in databases + + def create_database(self, db_name): + cursor = self.connection.cursor() + try: + cursor.execute("CREATE DATABASE `{}` CHARACTER SET UTF8" + .format(db_name)) + finally: + cursor.close() + + def grant_exists(self, db_name, db_user, remote_ip): + cursor = self.connection.cursor() + priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \ + "TO '{}'@'{}'".format(db_name, db_user, remote_ip) + try: + cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user, + remote_ip)) + grants = [i[0] for i in cursor.fetchall()] + except MySQLdb.OperationalError: + return False + finally: + cursor.close() + + # TODO: review for different grants + return priv_string in grants + + def create_grant(self, db_name, db_user, remote_ip, password): + cursor = self.connection.cursor() + try: + # TODO: review for different grants + cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}' " + "IDENTIFIED BY '{}'".format(db_name, + db_user, + remote_ip, + password)) + finally: + cursor.close() + + def create_admin_grant(self, db_user, remote_ip, password): + cursor = self.connection.cursor() + try: + cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' " + "IDENTIFIED BY '{}'".format(db_user, + remote_ip, + password)) + finally: + cursor.close() + + def cleanup_grant(self, db_user, remote_ip): + cursor = self.connection.cursor() + try: + cursor.execute("DROP FROM mysql.user WHERE user='{}' " + "AND HOST='{}'".format(db_user, + remote_ip)) + finally: + cursor.close() + + def flush_priviledges(self): + cursor = self.connection.cursor() + try: + cursor.execute("FLUSH PRIVILEGES") + finally: + cursor.close() + + def execute(self, sql): + """Execute arbitary SQL against the database.""" + cursor = self.connection.cursor() + try: + cursor.execute(sql) + finally: + cursor.close() + + def select(self, sql): + """ + Execute arbitrary SQL select query against the database + and return the results. + + :param sql: SQL select query to execute + :type sql: string + :returns: SQL select query result + :rtype: list of lists + :raises: MySQLdb.Error + """ + cursor = self.connection.cursor() + try: + cursor.execute(sql) + results = [list(i) for i in cursor.fetchall()] + finally: + cursor.close() + return results + + def migrate_passwords_to_leader_storage(self, excludes=None): + """Migrate any passwords storage on disk to leader storage.""" + if not is_leader(): + log("Skipping password migration as not the lead unit", + level=DEBUG) + return + dirname = os.path.dirname(self.root_passwd_file_template) + path = os.path.join(dirname, '*.passwd') + for f in glob.glob(path): + if excludes and f in excludes: + log("Excluding %s from leader storage migration" % (f), + level=DEBUG) + continue + + key = os.path.basename(f) + with open(f, 'r') as passwd: + _value = passwd.read().strip() + + try: + leader_set(settings={key: _value}) + + if self.delete_ondisk_passwd_file: + os.unlink(f) + except ValueError: + # NOTE cluster relation not yet ready - skip for now + pass + + def get_mysql_password_on_disk(self, username=None, password=None): + """Retrieve, generate or store a mysql password for the provided + username on disk.""" + if username: + template = self.user_passwd_file_template + passwd_file = template.format(username) + else: + passwd_file = self.root_passwd_file_template + + _password = None + if os.path.exists(passwd_file): + log("Using existing password file '%s'" % passwd_file, level=DEBUG) + with open(passwd_file, 'r') as passwd: + _password = passwd.read().strip() + else: + log("Generating new password file '%s'" % passwd_file, level=DEBUG) + if not os.path.isdir(os.path.dirname(passwd_file)): + # NOTE: need to ensure this is not mysql root dir (which needs + # to be mysql readable) + mkdir(os.path.dirname(passwd_file), owner='root', group='root', + perms=0o770) + # Force permissions - for some reason the chmod in makedirs + # fails + os.chmod(os.path.dirname(passwd_file), 0o770) + + _password = password or pwgen(length=32) + write_file(passwd_file, _password, owner='root', group='root', + perms=0o660) + + return _password + + def passwd_keys(self, username): + """Generator to return keys used to store passwords in peer store. + + NOTE: we support both legacy and new format to support mysql + charm prior to refactor. This is necessary to avoid LP 1451890. + """ + keys = [] + if username == 'mysql': + log("Bad username '%s'" % (username), level=WARNING) + + if username: + # IMPORTANT: *newer* format must be returned first + keys.append('mysql-%s.passwd' % (username)) + keys.append('%s.passwd' % (username)) + else: + keys.append('mysql.passwd') + + for key in keys: + yield key + + def get_mysql_password(self, username=None, password=None): + """Retrieve, generate or store a mysql password for the provided + username using peer relation cluster.""" + excludes = [] + + # First check peer relation. + try: + for key in self.passwd_keys(username): + _password = leader_get(key) + if _password: + break + + # If root password available don't update peer relation from local + if _password and not username: + excludes.append(self.root_passwd_file_template) + + except ValueError: + # cluster relation is not yet started; use on-disk + _password = None + + # If none available, generate new one + if not _password: + _password = self.get_mysql_password_on_disk(username, password) + + # Put on wire if required + if self.migrate_passwd_to_leader_storage: + self.migrate_passwords_to_leader_storage(excludes=excludes) + + return _password + + def get_mysql_root_password(self, password=None): + """Retrieve or generate mysql root password for service units.""" + return self.get_mysql_password(username=None, password=password) + + def set_mysql_password(self, username, password, current_password=None): + """Update a mysql password for the provided username changing the + leader settings + + To update root's password pass `None` in the username + + :param username: Username to change password of + :type username: str + :param password: New password for user. + :type password: str + :param current_password: Existing password for user. + :type current_password: str + """ + + if username is None: + username = 'root' + + # get root password via leader-get, it may be that in the past (when + # changes to root-password were not supported) the user changed the + # password, so leader-get is more reliable source than + # config.previous('root-password'). + rel_username = None if username == 'root' else username + if not current_password: + current_password = self.get_mysql_password(rel_username) + + # password that needs to be set + new_passwd = password + + # update password for all users (e.g. root@localhost, root@::1, etc) + try: + self.connect(user=username, password=current_password) + cursor = self.connection.cursor() + except MySQLdb.OperationalError as ex: + raise MySQLSetPasswordError(('Cannot connect using password in ' + 'leader settings (%s)') % ex, ex) + + try: + # NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account + # fails when using SET PASSWORD so using UPDATE against the + # mysql.user table is needed, but changes to this table are not + # replicated across the cluster, so this update needs to run in + # all the nodes. More info at + # http://galeracluster.com/documentation-webpages/userchanges.html + release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) + if release < 'bionic': + SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = " + "PASSWORD( %s ) WHERE user = %s;") + else: + # PXC 5.7 (introduced in Bionic) uses authentication_string + SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET " + "authentication_string = " + "PASSWORD( %s ) WHERE user = %s;") + cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username)) + cursor.execute('FLUSH PRIVILEGES;') + self.connection.commit() + except MySQLdb.OperationalError as ex: + raise MySQLSetPasswordError('Cannot update password: %s' % str(ex), + ex) + finally: + cursor.close() + + # check the password was changed + try: + self.connect(user=username, password=new_passwd) + self.execute('select 1;') + except MySQLdb.OperationalError as ex: + raise MySQLSetPasswordError(('Cannot connect using new password: ' + '%s') % str(ex), ex) + + if not is_leader(): + log('Only the leader can set a new password in the relation', + level=DEBUG) + return + + for key in self.passwd_keys(rel_username): + _password = leader_get(key) + if _password: + log('Updating password for %s (%s)' % (key, rel_username), + level=DEBUG) + leader_set(settings={key: new_passwd}) + + def set_mysql_root_password(self, password, current_password=None): + """Update mysql root password changing the leader settings + + :param password: New password for user. + :type password: str + :param current_password: Existing password for user. + :type current_password: str + """ + self.set_mysql_password( + 'root', + password, + current_password=current_password) + + def normalize_address(self, hostname): + """Ensure that address returned is an IP address (i.e. not fqdn)""" + if config_get('prefer-ipv6'): + # TODO: add support for ipv6 dns + return hostname + + if hostname != unit_get('private-address'): + return get_host_ip(hostname, fallback=hostname) + + # Otherwise assume localhost + return '127.0.0.1' + + def get_allowed_units(self, database, username, relation_id=None): + """Get list of units with access grants for database with username. + + This is typically used to provide shared-db relations with a list of + which units have been granted access to the given database. + """ + self.connect(password=self.get_mysql_root_password()) + allowed_units = set() + for unit in related_units(relation_id): + settings = relation_get(rid=relation_id, unit=unit) + # First check for setting with prefix, then without + for attr in ["%s_hostname" % (database), 'hostname']: + hosts = settings.get(attr, None) + if hosts: + break + + if hosts: + # hostname can be json-encoded list of hostnames + try: + hosts = json.loads(hosts) + except ValueError: + hosts = [hosts] + else: + hosts = [settings['private-address']] + + if hosts: + for host in hosts: + host = self.normalize_address(host) + if self.grant_exists(database, username, host): + log("Grant exists for host '%s' on db '%s'" % + (host, database), level=DEBUG) + if unit not in allowed_units: + allowed_units.add(unit) + else: + log("Grant does NOT exist for host '%s' on db '%s'" % + (host, database), level=DEBUG) + else: + log("No hosts found for grant check", level=INFO) + + return allowed_units + + def configure_db(self, hostname, database, username, admin=False): + """Configure access to database for username from hostname.""" + self.connect(password=self.get_mysql_root_password()) + if not self.database_exists(database): + self.create_database(database) + + remote_ip = self.normalize_address(hostname) + password = self.get_mysql_password(username) + if not self.grant_exists(database, username, remote_ip): + if not admin: + self.create_grant(database, username, remote_ip, password) + else: + self.create_admin_grant(username, remote_ip, password) + self.flush_priviledges() + + return password + + +# `_singleton_config_helper` stores the instance of the helper class that is +# being used during a hook invocation. +_singleton_config_helper = None + + +def get_mysql_config_helper(): + global _singleton_config_helper + if _singleton_config_helper is None: + _singleton_config_helper = MySQLConfigHelper() + return _singleton_config_helper + + +class MySQLConfigHelper(object): + """Base configuration helper for MySQL.""" + + # Going for the biggest page size to avoid wasted bytes. + # InnoDB page size is 16MB + + DEFAULT_PAGE_SIZE = 16 * 1024 * 1024 + DEFAULT_INNODB_BUFFER_FACTOR = 0.50 + DEFAULT_INNODB_BUFFER_SIZE_MAX = 512 * 1024 * 1024 + + # Validation and lookups for InnoDB configuration + INNODB_VALID_BUFFERING_VALUES = [ + 'none', + 'inserts', + 'deletes', + 'changes', + 'purges', + 'all' + ] + INNODB_FLUSH_CONFIG_VALUES = { + 'fast': 2, + 'safest': 1, + 'unsafe': 0, + } + + def human_to_bytes(self, human): + """Convert human readable configuration options to bytes.""" + num_re = re.compile('^[0-9]+$') + if num_re.match(human): + return human + + factors = { + 'K': 1024, + 'M': 1048576, + 'G': 1073741824, + 'T': 1099511627776 + } + modifier = human[-1] + if modifier in factors: + return int(human[:-1]) * factors[modifier] + + if modifier == '%': + total_ram = self.human_to_bytes(self.get_mem_total()) + if self.is_32bit_system() and total_ram > self.sys_mem_limit(): + total_ram = self.sys_mem_limit() + factor = int(human[:-1]) * 0.01 + pctram = total_ram * factor + return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE)) + + raise ValueError("Can only convert K,M,G, or T") + + def is_32bit_system(self): + """Determine whether system is 32 or 64 bit.""" + try: + return sys.maxsize < 2 ** 32 + except OverflowError: + return False + + def sys_mem_limit(self): + """Determine the default memory limit for the current service unit.""" + if platform.machine() in ['armv7l']: + _mem_limit = self.human_to_bytes('2700M') # experimentally determined + else: + # Limit for x86 based 32bit systems + _mem_limit = self.human_to_bytes('4G') + + return _mem_limit + + def get_mem_total(self): + """Calculate the total memory in the current service unit.""" + with open('/proc/meminfo') as meminfo_file: + for line in meminfo_file: + key, mem = line.split(':', 2) + if key == 'MemTotal': + mtot, modifier = mem.strip().split(' ') + return '%s%s' % (mtot, modifier[0].upper()) + + def get_innodb_flush_log_at_trx_commit(self): + """Get value for innodb_flush_log_at_trx_commit. + + Use the innodb-flush-log-at-trx-commit or the tunning-level setting + translated by INNODB_FLUSH_CONFIG_VALUES to get the + innodb_flush_log_at_trx_commit value. + + :returns: Numeric value for innodb_flush_log_at_trx_commit + :rtype: Union[None, int] + """ + _iflatc = config_get('innodb-flush-log-at-trx-commit') + _tuning_level = config_get('tuning-level') + if _iflatc: + return _iflatc + elif _tuning_level: + return self.INNODB_FLUSH_CONFIG_VALUES.get(_tuning_level, 1) + + def get_innodb_change_buffering(self): + """Get value for innodb_change_buffering. + + Use the innodb-change-buffering validated against + INNODB_VALID_BUFFERING_VALUES to get the innodb_change_buffering value. + + :returns: String value for innodb_change_buffering. + :rtype: Union[None, str] + """ + _icb = config_get('innodb-change-buffering') + if _icb and _icb in self.INNODB_VALID_BUFFERING_VALUES: + return _icb + + def get_innodb_buffer_pool_size(self): + """Get value for innodb_buffer_pool_size. + + Return the number value of innodb-buffer-pool-size or dataset-size. If + neither is set, calculate a sane default based on total memory. + + :returns: Numeric value for innodb_buffer_pool_size. + :rtype: int + """ + total_memory = self.human_to_bytes(self.get_mem_total()) + + dataset_bytes = config_get('dataset-size') + innodb_buffer_pool_size = config_get('innodb-buffer-pool-size') + + if innodb_buffer_pool_size: + innodb_buffer_pool_size = self.human_to_bytes( + innodb_buffer_pool_size) + elif dataset_bytes: + log("Option 'dataset-size' has been deprecated, please use" + "innodb_buffer_pool_size option instead", level="WARN") + innodb_buffer_pool_size = self.human_to_bytes( + dataset_bytes) + else: + # NOTE(jamespage): pick the smallest of 50% of RAM or 512MB + # to ensure that deployments in containers + # without constraints don't try to consume + # silly amounts of memory. + innodb_buffer_pool_size = min( + int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR), + self.DEFAULT_INNODB_BUFFER_SIZE_MAX + ) + + if innodb_buffer_pool_size > total_memory: + log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( + innodb_buffer_pool_size, + total_memory), level='WARN') + + return innodb_buffer_pool_size + + +class PerconaClusterHelper(MySQLConfigHelper): + """Percona-cluster specific configuration helper.""" + + def parse_config(self): + """Parse charm configuration and calculate values for config files.""" + config = config_get() + mysql_config = {} + if 'max-connections' in config: + mysql_config['max_connections'] = config['max-connections'] + + if 'wait-timeout' in config: + mysql_config['wait_timeout'] = config['wait-timeout'] + + if self.get_innodb_flush_log_at_trx_commit() is not None: + mysql_config['innodb_flush_log_at_trx_commit'] = \ + self.get_innodb_flush_log_at_trx_commit() + + if self.get_innodb_change_buffering() is not None: + mysql_config['innodb_change_buffering'] = config['innodb-change-buffering'] + + if 'innodb-io-capacity' in config: + mysql_config['innodb_io_capacity'] = config['innodb-io-capacity'] + + # Set a sane default key_buffer size + mysql_config['key_buffer'] = self.human_to_bytes('32M') + mysql_config['innodb_buffer_pool_size'] = self.get_innodb_buffer_pool_size() + return mysql_config + + +class MySQL8Helper(MySQLHelper): + + def grant_exists(self, db_name, db_user, remote_ip): + cursor = self.connection.cursor() + priv_string = ("GRANT ALL PRIVILEGES ON {}.* " + "TO {}@{}".format(db_name, db_user, remote_ip)) + try: + cursor.execute("SHOW GRANTS FOR '{}'@'{}'".format(db_user, + remote_ip)) + grants = [i[0] for i in cursor.fetchall()] + except MySQLdb.OperationalError: + return False + finally: + cursor.close() + + # Different versions of MySQL use ' or `. Ignore these in the check. + return priv_string in [ + i.replace("'", "").replace("`", "") for i in grants] + + def create_grant(self, db_name, db_user, remote_ip, password): + if self.grant_exists(db_name, db_user, remote_ip): + return + + # Make sure the user exists + # MySQL8 must create the user before the grant + self.create_user(db_user, remote_ip, password) + + cursor = self.connection.cursor() + try: + cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}'" + .format(db_name, db_user, remote_ip)) + finally: + cursor.close() + + def create_user(self, db_user, remote_ip, password): + + SQL_USER_CREATE = ( + "CREATE USER '{db_user}'@'{remote_ip}' " + "IDENTIFIED BY '{password}'") + + cursor = self.connection.cursor() + try: + cursor.execute(SQL_USER_CREATE.format( + db_user=db_user, + remote_ip=remote_ip, + password=password) + ) + except MySQLdb._exceptions.OperationalError: + log("DB user {} already exists.".format(db_user), + "WARNING") + finally: + cursor.close() + + def create_router_grant(self, db_user, remote_ip, password): + + # Make sure the user exists + # MySQL8 must create the user before the grant + self.create_user(db_user, remote_ip, password) + + # Mysql-Router specific grants + cursor = self.connection.cursor() + try: + cursor.execute("GRANT CREATE USER ON *.* TO '{}'@'{}' WITH GRANT " + "OPTION".format(db_user, remote_ip)) + cursor.execute("GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON " + "mysql_innodb_cluster_metadata.* TO '{}'@'{}'" + .format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON mysql.user TO '{}'@'{}'" + .format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON " + "performance_schema.replication_group_members " + "TO '{}'@'{}'".format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON " + "performance_schema.replication_group_member_stats " + "TO '{}'@'{}'".format(db_user, remote_ip)) + cursor.execute("GRANT SELECT ON " + "performance_schema.global_variables " + "TO '{}'@'{}'".format(db_user, remote_ip)) + finally: + cursor.close() + + def configure_router(self, hostname, username): + + if self.connection is None: + self.connect(password=self.get_mysql_root_password()) + + remote_ip = self.normalize_address(hostname) + password = self.get_mysql_password(username) + self.create_user(username, remote_ip, password) + self.create_router_grant(username, remote_ip, password) + + return password + + +def get_prefix(requested, keys=None): + """Return existing prefix or None. + + :param requested: Request string. i.e. novacell0_username + :type requested: str + :param keys: Keys to determine prefix. Defaults set in function. + :type keys: List of str keys + :returns: String prefix i.e. novacell0 + :rtype: Union[None, str] + """ + if keys is None: + # Shared-DB default keys + keys = ["_database", "_username", "_hostname"] + for key in keys: + if requested.endswith(key): + return requested[:-len(key)] + + +def get_db_data(relation_data, unprefixed): + """Organize database requests into a collections.OrderedDict + + :param relation_data: shared-db relation data + :type relation_data: dict + :param unprefixed: Prefix to use for requests without a prefix. This should + be unique for each side of the relation to avoid + conflicts. + :type unprefixed: str + :returns: Order dict of databases and users + :rtype: collections.OrderedDict + """ + # Deep copy to avoid unintentionally changing relation data + settings = copy.deepcopy(relation_data) + databases = collections.OrderedDict() + + # Clear non-db related elements + if "egress-subnets" in settings.keys(): + settings.pop("egress-subnets") + if "ingress-address" in settings.keys(): + settings.pop("ingress-address") + if "private-address" in settings.keys(): + settings.pop("private-address") + + singleset = {"database", "username", "hostname"} + if singleset.issubset(settings): + settings["{}_{}".format(unprefixed, "hostname")] = ( + settings["hostname"]) + settings.pop("hostname") + settings["{}_{}".format(unprefixed, "database")] = ( + settings["database"]) + settings.pop("database") + settings["{}_{}".format(unprefixed, "username")] = ( + settings["username"]) + settings.pop("username") + + for k, v in settings.items(): + db = k.split("_")[0] + x = "_".join(k.split("_")[1:]) + if db not in databases: + databases[db] = collections.OrderedDict() + databases[db][x] = v + + return databases diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/apache.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 00000000..2c1e371e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,86 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page <james.page@ubuntu.com> +# Adam Gandelman <adamg@ubuntu.com> +# + +import os + +from charmhelpers.core import host +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(cn=None): + # TODO: deal with multiple https endpoints via charm config + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + if cn: + ssl_cert_attr = 'ssl_cert_{}'.format(cn) + ssl_key_attr = 'ssl_key_{}'.format(cn) + else: + ssl_cert_attr = 'ssl_cert' + ssl_key_attr = 'ssl_key' + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get(ssl_cert_attr, + rid=r_id, unit=unit) + if not key: + key = relation_get(ssl_key_attr, + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = config_get('ssl_ca') + if ca_cert is None: + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in (relation_ids('identity-service') + + relation_ids('identity-credentials')): + for unit in relation_list(r_id): + if ca_cert is None: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def retrieve_ca_cert(cert_file): + cert = None + if os.path.isfile(cert_file): + with open(cert_file, 'rb') as crt: + cert = crt.read() + return cert + + +def install_ca_cert(ca_cert): + host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert') diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/cluster.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 00000000..ba34fba0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,451 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page <james.page@ubuntu.com> +# Adam Gandelman <adamg@ubuntu.com> +# + +""" +Helpers for clustering and determining "cluster leadership" and other +clustering-related helpers. +""" + +import functools +import subprocess +import os +import time + +from socket import gethostname as get_unit_hostname + +import six + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + DEBUG, + WARNING, + unit_get, + is_leader as juju_is_leader, + status_set, +) +from charmhelpers.core.host import ( + modulo_distribution, +) +from charmhelpers.core.decorators import ( + retry_on_exception, +) +from charmhelpers.core.strutils import ( + bool_from_string, +) + +DC_RESOURCE_NAME = 'DC' + + +class HAIncompleteConfig(Exception): + pass + + +class HAIncorrectConfig(Exception): + pass + + +class CRMResourceNotFound(Exception): + pass + + +class CRMDCNotFound(Exception): + pass + + +def is_elected_leader(resource): + """ + Returns True if the charm executing this is the elected cluster leader. + + It relies on two mechanisms to determine leadership: + 1. If juju is sufficiently new and leadership election is supported, + the is_leader command will be used. + 2. If the charm is part of a corosync cluster, call corosync to + determine leadership. + 3. If the charm is not part of a corosync cluster, the leader is + determined as being "the alive unit with the lowest unit numer". In + other words, the oldest surviving unit. + """ + try: + return juju_is_leader() + except NotImplementedError: + log('Juju leadership election feature not enabled' + ', using fallback support', + level=WARNING) + + if is_clustered(): + if not is_crm_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_crm_dc(): + """ + Determine leadership by querying the pacemaker Designated Controller + """ + cmd = ['crm', 'status'] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError as ex: + raise CRMDCNotFound(str(ex)) + + current_dc = '' + for line in status.split('\n'): + if line.startswith('Current DC'): + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum + current_dc = line.split(':')[1].split()[0] + if current_dc == get_unit_hostname(): + return True + elif current_dc == 'NONE': + raise CRMDCNotFound('Current DC: NONE') + + return False + + +@retry_on_exception(5, base_delay=2, + exc_type=(CRMResourceNotFound, CRMDCNotFound)) +def is_crm_leader(resource, retry=False): + """ + Returns True if the charm calling this is the elected corosync leader, + as returned by calling the external "crm" command. + + We allow this operation to be retried to avoid the possibility of getting a + false negative. See LP #1396246 for more info. + """ + if resource == DC_RESOURCE_NAME: + return is_crm_dc() + cmd = ['crm', 'resource', 'show', resource] + try: + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + if not isinstance(status, six.text_type): + status = six.text_type(status, "utf-8") + except subprocess.CalledProcessError: + status = None + + if status and get_unit_hostname() in status: + return True + + if status and "resource %s is NOT running" % (resource) in status: + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) + + return False + + +def is_leader(resource): + log("is_leader is deprecated. Please consider using is_crm_leader " + "instead.", level=WARNING) + return is_crm_leader(resource) + + +def peer_units(peer_relation="cluster"): + peers = [] + for r_id in (relation_ids(peer_relation) or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def peer_ips(peer_relation='cluster', addr_key='private-address'): + '''Return a dict of peers and their private-address''' + peers = {} + for r_id in relation_ids(peer_relation): + for unit in relation_list(r_id): + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) + return peers + + +def oldest_peer(peers): + """Determines who the oldest peer is by comparing unit numbers.""" + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + log("eligible_leader is deprecated. Please consider using " + "is_elected_leader instead.", level=WARNING) + return is_elected_leader(resource) + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + use_https = config_get('use-https') + if use_https and bool_from_string(use_https): + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('certificates'): + for unit in relation_list(r_id): + ca = relation_get('ca', rid=r_id, unit=unit) + if ca: + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN + rel_state = [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ] + # NOTE: works around (LP: #1203241) + if (None not in rel_state) and ('' not in rel_state): + return True + return False + + +def determine_api_port(public_port, singlenode_mode=False): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the API service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_apache_port(public_port, singlenode_mode=False): + ''' + Description: Determine correct apache listening port based on public IP + + state of the cluster. + + public_port: int: standard public port for given service + + singlenode_mode: boolean: Shuffle ports when only a single unit is present + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if singlenode_mode: + i += 1 + elif len(peer_units()) > 0 or is_clustered(): + i += 1 + return public_port - (i * 10) + + +determine_apache_port_single = functools.partial( + determine_apache_port, singlenode_mode=True) + + +def get_hacluster_config(exclude_keys=None): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, os-internal-hostname, + os-admin-hostname, os-public-hostname, os-access-hostname + + param: exclude_keys: list of setting key(s) to be excluded. + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing or incorrect. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname', + 'os-admin-hostname', 'os-public-hostname', 'os-access-hostname'] + conf = {} + for setting in settings: + if exclude_keys and setting in exclude_keys: + continue + + conf[setting] = config_get(setting) + + if not valid_hacluster_config(): + raise HAIncorrectConfig('Insufficient or incorrect config data to ' + 'configure hacluster.') + return conf + + +def valid_hacluster_config(): + ''' + Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname + must be set. + + Note: ha-bindiface and ha-macastport both have defaults and will always + be set. We only care that either vip or dns-ha is set. + + :returns: boolean: valid config returns true. + raises: HAIncompatibileConfig if settings conflict. + raises: HAIncompleteConfig if settings are missing. + ''' + vip = config_get('vip') + dns = config_get('dns-ha') + if not(bool(vip) ^ bool(dns)): + msg = ('HA: Either vip or dns-ha must be set but not both in order to ' + 'use high availability') + status_set('blocked', msg) + raise HAIncorrectConfig(msg) + + # If dns-ha then one of os-*-hostname must be set + if dns: + dns_settings = ['os-internal-hostname', 'os-admin-hostname', + 'os-public-hostname', 'os-access-hostname'] + # At this point it is unknown if one or all of the possible + # network spaces are in HA. Validate at least one is set which is + # the minimum required. + for setting in dns_settings: + if config_get(setting): + log('DNS HA: At least one hostname is set {}: {}' + ''.format(setting, config_get(setting)), + level=DEBUG) + return True + + msg = ('DNS HA: At least one os-*-hostname(s) must be set to use ' + 'DNS HA') + status_set('blocked', msg) + raise HAIncompleteConfig(msg) + + log('VIP HA: VIP is set {}'.format(vip), level=DEBUG) + return True + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = unit_get('private-address') + return '%s://%s' % (scheme, addr) + + +def distributed_wait(modulo=None, wait=None, operation_name='operation'): + ''' Distribute operations by waiting based on modulo_distribution + + If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. + + :param modulo: int The modulo number creates the group distribution + :param wait: int The constant time wait value + :param operation_name: string Operation name for status message + i.e. 'restart' + :side effect: Calls config_get() + :side effect: Calls log() + :side effect: Calls status_set() + :side effect: Calls time.sleep() + ''' + if modulo is None: + modulo = config_get('modulo-nodes') or 3 + if wait is None: + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) + msg = "Waiting {} seconds for {} ...".format(calculated_wait, + operation_name) + log(msg, DEBUG) + status_set('maintenance', msg) + time.sleep(calculated_wait) + + +def get_managed_services_and_ports(services, external_ports, + external_services=None, + port_conv_f=determine_apache_port_single): + """Get the services and ports managed by this charm. + + Return only the services and corresponding ports that are managed by this + charm. This excludes haproxy when there is a relation with hacluster. This + is because this charm passes responsability for stopping and starting + haproxy to hacluster. + + Similarly, if a relation with hacluster exists then the ports returned by + this method correspond to those managed by the apache server rather than + haproxy. + + :param services: List of services. + :type services: List[str] + :param external_ports: List of ports managed by external services. + :type external_ports: List[int] + :param external_services: List of services to be removed if ha relation is + present. + :type external_services: List[str] + :param port_conv_f: Function to apply to ports to calculate the ports + managed by services controlled by this charm. + :type port_convert_func: f() + :returns: A tuple containing a list of services first followed by a list of + ports. + :rtype: Tuple[List[str], List[int]] + """ + if external_services is None: + external_services = ['haproxy'] + if relation_ids('ha'): + for svc in external_services: + try: + services.remove(svc) + except ValueError: + pass + external_ports = [port_conv_f(p) for p in external_ports] + return services, external_ports diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/README.hardening.md b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/README.hardening.md new file mode 100644 index 00000000..91280c03 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/README.hardening.md @@ -0,0 +1,38 @@ +# Juju charm-helpers hardening library + +## Description + +This library provides multiple implementations of system and application +hardening that conform to the standards of http://hardening.io/. + +Current implementations include: + + * OS + * SSH + * MySQL + * Apache + +## Requirements + +* Juju Charms + +## Usage + +1. Synchronise this library into your charm and add the harden() decorator + (from contrib.hardening.harden) to any functions or methods you want to use + to trigger hardening of your application/system. + +2. Add a config option called 'harden' to your charm config.yaml and set it to + a space-delimited list of hardening modules you want to run e.g. "os ssh" + +3. Override any config defaults (contrib.hardening.defaults) by adding a file + called hardening.yaml to your charm root containing the name(s) of the + modules whose settings you want override at root level and then any settings + with overrides e.g. + + os: + general: + desktop_enable: True + +4. Now just run your charm as usual and hardening will be applied each time the + hook runs. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/__init__.py new file mode 100644 index 00000000..30a3e943 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/__init__.py new file mode 100644 index 00000000..58bebd84 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/checks/__init__.py new file mode 100644 index 00000000..3bc2ebd4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/checks/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.apache.checks import config + + +def run_apache_checks(): + log("Starting Apache hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("Apache hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/checks/config.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/checks/config.py new file mode 100644 index 00000000..341da9ee --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/checks/config.py @@ -0,0 +1,104 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +import six +import subprocess + + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + NoReadWriteForOther, + TemplatedFile, + DeletedFile +) +from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit +from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get Apache hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0: + log("Apache server does not appear to be installed on this node - " + "skipping apache hardening", level=INFO) + return [] + + context = ApacheConfContext() + settings = utils.get_settings('apache') + audits = [ + FilePermissionAudit(paths=os.path.join( + settings['common']['apache_dir'], 'apache2.conf'), + user='root', group='root', mode=0o0640), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'mods-available/alias.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + TemplatedFile(os.path.join(settings['common']['apache_dir'], + 'conf-enabled/99-hardening.conf'), + context, + TEMPLATES_DIR, + mode=0o0640, + user='root', + service_actions=[{'service': 'apache2', + 'actions': ['restart']}]), + + DirectoryPermissionAudit(settings['common']['apache_dir'], + user='root', + group='root', + mode=0o0750), + + DisabledModuleAudit(settings['hardening']['modules_to_disable']), + + NoReadWriteForOther(settings['common']['apache_dir']), + + DeletedFile(['/var/www/html/index.html']) + ] + + return audits + + +class ApacheConfContext(object): + """Defines the set of key/value pairs to set in a apache config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/apache/conf-enabled/hardening.conf file. + """ + def __call__(self): + settings = utils.get_settings('apache') + ctxt = settings['hardening'] + + out = subprocess.check_output(['apache2', '-v']) + if six.PY3: + out = out.decode('utf-8') + ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+', + out).group(1) + ctxt['apache_icondir'] = '/usr/share/apache2/icons/' + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf new file mode 100644 index 00000000..22b68041 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/templates/99-hardening.conf @@ -0,0 +1,32 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### + +<Location / > + <LimitExcept {{ allowed_http_methods }} > + # http://httpd.apache.org/docs/2.4/upgrading.html + {% if apache_version > '2.2' -%} + Require all granted + {% else -%} + Order Allow,Deny + Deny from all + {% endif %} + </LimitExcept> +</Location> + +<Directory /> + Options -Indexes -FollowSymLinks + AllowOverride None +</Directory> + +<Directory /var/www/> + Options -Indexes -FollowSymLinks + AllowOverride None +</Directory> + +TraceEnable {{ traceenable }} +ServerTokens {{ servertokens }} + +SSLHonorCipherOrder {{ honor_cipher_order }} +SSLCipherSuite {{ cipher_suite }} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/templates/alias.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/templates/alias.conf new file mode 100644 index 00000000..e46a58a3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/apache/templates/alias.conf @@ -0,0 +1,31 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +<IfModule alias_module> + # + # Aliases: Add here as many aliases as you need (with no limit). The format is + # Alias fakename realname + # + # Note that if you include a trailing / on fakename then the server will + # require it to be present in the URL. So "/icons" isn't aliased in this + # example, only "/icons/". If the fakename is slash-terminated, then the + # realname must also be slash terminated, and if the fakename omits the + # trailing slash, the realname must also omit it. + # + # We include the /icons/ alias for FancyIndexed directory listings. If + # you do not use FancyIndexing, you may comment this out. + # + Alias /icons/ "{{ apache_icondir }}/" + + <Directory "{{ apache_icondir }}"> + Options -Indexes -MultiViews -FollowSymLinks + AllowOverride None +{% if apache_version == '2.4' -%} + Require all granted +{% else -%} + Order allow,deny + Allow from all +{% endif %} + </Directory> +</IfModule> diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/__init__.py new file mode 100644 index 00000000..6dd5b05f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/__init__.py @@ -0,0 +1,54 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class BaseAudit(object): # NO-QA + """Base class for hardening checks. + + The lifecycle of a hardening check is to first check to see if the system + is in compliance for the specified check. If it is not in compliance, the + check method will return a value which will be supplied to the. + """ + def __init__(self, *args, **kwargs): + self.unless = kwargs.get('unless', None) + super(BaseAudit, self).__init__() + + def ensure_compliance(self): + """Checks to see if the current hardening check is in compliance or + not. + + If the check that is performed is not in compliance, then an exception + should be raised. + """ + pass + + def _take_action(self): + """Determines whether to perform the action or not. + + Checks whether or not an action should be taken. This is determined by + the truthy value for the unless parameter. If unless is a callback + method, it will be invoked with no parameters in order to determine + whether or not the action should be taken. Otherwise, the truthy value + of the unless attribute will determine if the action should be + performed. + """ + # Do the action if there isn't an unless override. + if self.unless is None: + return True + + # Invoke the callback if there is one. + if hasattr(self.unless, '__call__'): + return not self.unless() + + return not self.unless diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/apache.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/apache.py new file mode 100644 index 00000000..04825f5a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/apache.py @@ -0,0 +1,100 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +import six + +from charmhelpers.core.hookenv import ( + log, + INFO, + ERROR, +) + +from charmhelpers.contrib.hardening.audits import BaseAudit + + +class DisabledModuleAudit(BaseAudit): + """Audits Apache2 modules. + + Determines if the apache2 modules are enabled. If the modules are enabled + then they are removed in the ensure_compliance. + """ + def __init__(self, modules): + if modules is None: + self.modules = [] + elif isinstance(modules, six.string_types): + self.modules = [modules] + else: + self.modules = modules + + def ensure_compliance(self): + """Ensures that the modules are not loaded.""" + if not self.modules: + return + + try: + loaded_modules = self._get_loaded_modules() + non_compliant_modules = [] + for module in self.modules: + if module in loaded_modules: + log("Module '%s' is enabled but should not be." % + (module), level=INFO) + non_compliant_modules.append(module) + + if len(non_compliant_modules) == 0: + return + + for module in non_compliant_modules: + self._disable_module(module) + self._restart_apache() + except subprocess.CalledProcessError as e: + log('Error occurred auditing apache module compliance. ' + 'This may have been already reported. ' + 'Output is: %s' % e.output, level=ERROR) + + @staticmethod + def _get_loaded_modules(): + """Returns the modules which are enabled in Apache.""" + output = subprocess.check_output(['apache2ctl', '-M']) + if six.PY3: + output = output.decode('utf-8') + modules = [] + for line in output.splitlines(): + # Each line of the enabled module output looks like: + # module_name (static|shared) + # Plus a header line at the top of the output which is stripped + # out by the regex. + matcher = re.search(r'^ (\S*)_module (\S*)', line) + if matcher: + modules.append(matcher.group(1)) + return modules + + @staticmethod + def _disable_module(module): + """Disables the specified module in Apache.""" + try: + subprocess.check_call(['a2dismod', module]) + except subprocess.CalledProcessError as e: + # Note: catch error here to allow the attempt of disabling + # multiple modules in one go rather than failing after the + # first module fails. + log('Error occurred disabling module %s. ' + 'Output is: %s' % (module, e.output), level=ERROR) + + @staticmethod + def _restart_apache(): + """Restarts the apache process""" + subprocess.check_output(['service', 'apache2', 'restart']) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/apt.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/apt.py new file mode 100644 index 00000000..cad7bf73 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/apt.py @@ -0,0 +1,104 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import # required for external apt import +from six import string_types + +from charmhelpers.fetch import ( + apt_cache, + apt_purge +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg + + +class AptConfig(BaseAudit): + + def __init__(self, config, **kwargs): + self.config = config + + def verify_config(self): + apt_pkg.init() + for cfg in self.config: + value = apt_pkg.config.get(cfg['key'], cfg.get('default', '')) + if value and value != cfg['expected']: + log("APT config '%s' has unexpected value '%s' " + "(expected='%s')" % + (cfg['key'], value, cfg['expected']), level=WARNING) + + def ensure_compliance(self): + self.verify_config() + + +class RestrictedPackages(BaseAudit): + """Class used to audit restricted packages on the system.""" + + def __init__(self, pkgs, **kwargs): + super(RestrictedPackages, self).__init__(**kwargs) + if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'): + self.pkgs = pkgs.split() + else: + self.pkgs = pkgs + + def ensure_compliance(self): + cache = apt_cache() + + for p in self.pkgs: + if p not in cache: + continue + + pkg = cache[p] + if not self.is_virtual_package(pkg): + if not pkg.current_ver: + log("Package '%s' is not installed." % pkg.name, + level=DEBUG) + continue + else: + log("Restricted package '%s' is installed" % pkg.name, + level=WARNING) + self.delete_package(cache, pkg) + else: + log("Checking restricted virtual package '%s' provides" % + pkg.name, level=DEBUG) + self.delete_package(cache, pkg) + + def delete_package(self, cache, pkg): + """Deletes the package from the system. + + Deletes the package form the system, properly handling virtual + packages. + + :param cache: the apt cache + :param pkg: the package to remove + """ + if self.is_virtual_package(pkg): + log("Package '%s' appears to be virtual - purging provides" % + pkg.name, level=DEBUG) + for _p in pkg.provides_list: + self.delete_package(cache, _p[2].parent_pkg) + elif not pkg.current_ver: + log("Package '%s' not installed" % pkg.name, level=DEBUG) + return + else: + log("Purging package '%s'" % pkg.name, level=DEBUG) + apt_purge(pkg.name) + + def is_virtual_package(self, pkg): + return (pkg.get('has_provides', False) and + not pkg.get('has_versions', False)) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/file.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/file.py new file mode 100644 index 00000000..257c6351 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/audits/file.py @@ -0,0 +1,550 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grp +import os +import pwd +import re + +from subprocess import ( + CalledProcessError, + check_output, + check_call, +) +from traceback import format_exc +from six import string_types +from stat import ( + S_ISGID, + S_ISUID +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core import unitdata +from charmhelpers.core.host import file_hash +from charmhelpers.contrib.hardening.audits import BaseAudit +from charmhelpers.contrib.hardening.templating import ( + get_template_path, + render_and_write, +) +from charmhelpers.contrib.hardening import utils + + +class BaseFileAudit(BaseAudit): + """Base class for file audits. + + Provides api stubs for compliance check flow that must be used by any class + that implemented this one. + """ + + def __init__(self, paths, always_comply=False, *args, **kwargs): + """ + :param paths: string path of list of paths of files we want to apply + compliance checks are criteria to. + :param always_comply: if true compliance criteria is always applied + else compliance is skipped for non-existent + paths. + """ + super(BaseFileAudit, self).__init__(*args, **kwargs) + self.always_comply = always_comply + if isinstance(paths, string_types) or not hasattr(paths, '__iter__'): + self.paths = [paths] + else: + self.paths = paths + + def ensure_compliance(self): + """Ensure that the all registered files comply to registered criteria. + """ + for p in self.paths: + if os.path.exists(p): + if self.is_compliant(p): + continue + + log('File %s is not in compliance.' % p, level=INFO) + else: + if not self.always_comply: + log("Non-existent path '%s' - skipping compliance check" + % (p), level=INFO) + continue + + if self._take_action(): + log("Applying compliance criteria to '%s'" % (p), level=INFO) + self.comply(p) + + def is_compliant(self, path): + """Audits the path to see if it is compliance. + + :param path: the path to the file that should be checked. + """ + raise NotImplementedError + + def comply(self, path): + """Enforces the compliance of a path. + + :param path: the path to the file that should be enforced. + """ + raise NotImplementedError + + @classmethod + def _get_stat(cls, path): + """Returns the Posix st_stat information for the specified file path. + + :param path: the path to get the st_stat information for. + :returns: an st_stat object for the path or None if the path doesn't + exist. + """ + return os.stat(path) + + +class FilePermissionAudit(BaseFileAudit): + """Implements an audit for file permissions and ownership for a user. + + This class implements functionality that ensures that a specific user/group + will own the file(s) specified and that the permissions specified are + applied properly to the file. + """ + def __init__(self, paths, user, group=None, mode=0o600, **kwargs): + self.user = user + self.group = group + self.mode = mode + super(FilePermissionAudit, self).__init__(paths, user, group, mode, + **kwargs) + + @property + def user(self): + return self._user + + @user.setter + def user(self, name): + try: + user = pwd.getpwnam(name) + except KeyError: + log('Unknown user %s' % name, level=ERROR) + user = None + self._user = user + + @property + def group(self): + return self._group + + @group.setter + def group(self, name): + try: + group = None + if name: + group = grp.getgrnam(name) + else: + group = grp.getgrgid(self.user.pw_gid) + except KeyError: + log('Unknown group %s' % name, level=ERROR) + self._group = group + + def is_compliant(self, path): + """Checks if the path is in compliance. + + Used to determine if the path specified meets the necessary + requirements to be in compliance with the check itself. + + :param path: the file path to check + :returns: True if the path is compliant, False otherwise. + """ + stat = self._get_stat(path) + user = self.user + group = self.group + + compliant = True + if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid: + log('File %s is not owned by %s:%s.' % (path, user.pw_name, + group.gr_name), + level=INFO) + compliant = False + + # POSIX refers to the st_mode bits as corresponding to both the + # file type and file permission bits, where the least significant 12 + # bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the + # file permission bits (8-0) + perms = stat.st_mode & 0o7777 + if perms != self.mode: + log('File %s has incorrect permissions, currently set to %s' % + (path, oct(stat.st_mode & 0o7777)), level=INFO) + compliant = False + + return compliant + + def comply(self, path): + """Issues a chown and chmod to the file paths specified.""" + utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name, + self.mode) + + +class DirectoryPermissionAudit(FilePermissionAudit): + """Performs a permission check for the specified directory path.""" + + def __init__(self, paths, user, group=None, mode=0o600, + recursive=True, **kwargs): + super(DirectoryPermissionAudit, self).__init__(paths, user, group, + mode, **kwargs) + self.recursive = recursive + + def is_compliant(self, path): + """Checks if the directory is compliant. + + Used to determine if the path specified and all of its children + directories are in compliance with the check itself. + + :param path: the directory path to check + :returns: True if the directory tree is compliant, otherwise False. + """ + if not os.path.isdir(path): + log('Path specified %s is not a directory.' % path, level=ERROR) + raise ValueError("%s is not a directory." % path) + + if not self.recursive: + return super(DirectoryPermissionAudit, self).is_compliant(path) + + compliant = True + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + continue + + if not super(DirectoryPermissionAudit, self).is_compliant(root): + compliant = False + continue + + return compliant + + def comply(self, path): + for root, dirs, _ in os.walk(path): + if len(dirs) > 0: + super(DirectoryPermissionAudit, self).comply(root) + + +class ReadOnly(BaseFileAudit): + """Audits that files and folders are read only.""" + def __init__(self, paths, *args, **kwargs): + super(ReadOnly, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + try: + output = check_output(['find', path, '-perm', '-go+w', + '-type', 'f']).strip() + + # The find above will find any files which have permission sets + # which allow too broad of write access. As such, the path is + # compliant if there is no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred checking finding writable files for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + return False + + def comply(self, path): + try: + check_output(['chmod', 'go-w', '-R', path]) + except CalledProcessError as e: + log('Error occurred removing writeable permissions for %s. ' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class NoReadWriteForOther(BaseFileAudit): + """Ensures that the files found under the base path are readable or + writable by anyone other than the owner or the group. + """ + def __init__(self, paths): + super(NoReadWriteForOther, self).__init__(paths) + + def is_compliant(self, path): + try: + cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o', + '-perm', '-o+w', '-type', 'f'] + output = check_output(cmd).strip() + + # The find above here will find any files which have read or + # write permissions for other, meaning there is too broad of access + # to read/write the file. As such, the path is compliant if there's + # no output. + if output: + return False + + return True + except CalledProcessError as e: + log('Error occurred while finding files which are readable or ' + 'writable to the world in %s. ' + 'Command output is: %s.' % (path, e.output), level=ERROR) + + def comply(self, path): + try: + check_output(['chmod', '-R', 'o-rw', path]) + except CalledProcessError as e: + log('Error occurred attempting to change modes of files under ' + 'path %s. Output of command is: %s' % (path, e.output)) + + +class NoSUIDSGIDAudit(BaseFileAudit): + """Audits that specified files do not have SUID/SGID bits set.""" + def __init__(self, paths, *args, **kwargs): + super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs) + + def is_compliant(self, path): + stat = self._get_stat(path) + if (stat.st_mode & (S_ISGID | S_ISUID)) != 0: + return False + + return True + + def comply(self, path): + try: + log('Removing suid/sgid from %s.' % path, level=DEBUG) + check_output(['chmod', '-s', path]) + except CalledProcessError as e: + log('Error occurred removing suid/sgid from %s.' + 'Error information is: command %s failed with returncode ' + '%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output, + format_exc(e)), level=ERROR) + + +class TemplatedFile(BaseFileAudit): + """The TemplatedFileAudit audits the contents of a templated file. + + This audit renders a file from a template, sets the appropriate file + permissions, then generates a hashsum with which to check the content + changed. + """ + def __init__(self, path, context, template_dir, mode, user='root', + group='root', service_actions=None, **kwargs): + self.context = context + self.user = user + self.group = group + self.mode = mode + self.template_dir = template_dir + self.service_actions = service_actions + super(TemplatedFile, self).__init__(paths=path, always_comply=True, + **kwargs) + + def is_compliant(self, path): + """Determines if the templated file is compliant. + + A templated file is only compliant if it has not changed (as + determined by its sha256 hashsum) AND its file permissions are set + appropriately. + + :param path: the path to check compliance. + """ + same_templates = self.templates_match(path) + same_content = self.contents_match(path) + same_permissions = self.permissions_match(path) + + if same_content and same_permissions and same_templates: + return True + + return False + + def run_service_actions(self): + """Run any actions on services requested.""" + if not self.service_actions: + return + + for svc_action in self.service_actions: + name = svc_action['service'] + actions = svc_action['actions'] + log("Running service '%s' actions '%s'" % (name, actions), + level=DEBUG) + for action in actions: + cmd = ['service', name, action] + try: + check_call(cmd) + except CalledProcessError as exc: + log("Service name='%s' action='%s' failed - %s" % + (name, action, exc), level=WARNING) + + def comply(self, path): + """Ensures the contents and the permissions of the file. + + :param path: the path to correct + """ + dirname = os.path.dirname(path) + if not os.path.exists(dirname): + os.makedirs(dirname) + + self.pre_write() + render_and_write(self.template_dir, path, self.context()) + utils.ensure_permissions(path, self.user, self.group, self.mode) + self.run_service_actions() + self.save_checksum(path) + self.post_write() + + def pre_write(self): + """Invoked prior to writing the template.""" + pass + + def post_write(self): + """Invoked after writing the template.""" + pass + + def templates_match(self, path): + """Determines if the template files are the same. + + The template file equality is determined by the hashsum of the + template files themselves. If there is no hashsum, then the content + cannot be sure to be the same so treat it as if they changed. + Otherwise, return whether or not the hashsums are the same. + + :param path: the path to check + :returns: boolean + """ + template_path = get_template_path(self.template_dir, path) + key = 'hardening:template:%s' % template_path + template_checksum = file_hash(template_path) + kv = unitdata.kv() + stored_tmplt_checksum = kv.get(key) + if not stored_tmplt_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Saved template checksum for %s.' % template_path, + level=DEBUG) + # Since we don't have a template checksum, then assume it doesn't + # match and return that the template is different. + return False + elif stored_tmplt_checksum != template_checksum: + kv.set(key, template_checksum) + kv.flush() + log('Updated template checksum for %s.' % template_path, + level=DEBUG) + return False + + # Here the template hasn't changed based upon the calculated + # checksum of the template and what was previously stored. + return True + + def contents_match(self, path): + """Determines if the file content is the same. + + This is determined by comparing hashsum of the file contents and + the saved hashsum. If there is no hashsum, then the content cannot + be sure to be the same so treat them as if they are not the same. + Otherwise, return True if the hashsums are the same, False if they + are not the same. + + :param path: the file to check. + """ + checksum = file_hash(path) + + kv = unitdata.kv() + stored_checksum = kv.get('hardening:%s' % path) + if not stored_checksum: + # If the checksum hasn't been generated, return False to ensure + # the file is written and the checksum stored. + log('Checksum for %s has not been calculated.' % path, level=DEBUG) + return False + elif stored_checksum != checksum: + log('Checksum mismatch for %s.' % path, level=DEBUG) + return False + + return True + + def permissions_match(self, path): + """Determines if the file owner and permissions match. + + :param path: the path to check. + """ + audit = FilePermissionAudit(path, self.user, self.group, self.mode) + return audit.is_compliant(path) + + def save_checksum(self, path): + """Calculates and saves the checksum for the path specified. + + :param path: the path of the file to save the checksum. + """ + checksum = file_hash(path) + kv = unitdata.kv() + kv.set('hardening:%s' % path, checksum) + kv.flush() + + +class DeletedFile(BaseFileAudit): + """Audit to ensure that a file is deleted.""" + def __init__(self, paths): + super(DeletedFile, self).__init__(paths) + + def is_compliant(self, path): + return not os.path.exists(path) + + def comply(self, path): + os.remove(path) + + +class FileContentAudit(BaseFileAudit): + """Audit the contents of a file.""" + def __init__(self, paths, cases, **kwargs): + # Cases we expect to pass + self.pass_cases = cases.get('pass', []) + # Cases we expect to fail + self.fail_cases = cases.get('fail', []) + super(FileContentAudit, self).__init__(paths, **kwargs) + + def is_compliant(self, path): + """ + Given a set of content matching cases i.e. tuple(regex, bool) where + bool value denotes whether or not regex is expected to match, check that + all cases match as expected with the contents of the file. Cases can be + expected to pass of fail. + + :param path: Path of file to check. + :returns: Boolean value representing whether or not all cases are + found to be compliant. + """ + log("Auditing contents of file '%s'" % (path), level=DEBUG) + with open(path, 'r') as fd: + contents = fd.read() + + matches = 0 + for pattern in self.pass_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if results: + matches += 1 + else: + log("Pattern '%s' was expected to pass but instead it failed" + % (pattern), level=WARNING) + + for pattern in self.fail_cases: + key = re.compile(pattern, flags=re.MULTILINE) + results = re.search(key, contents) + if not results: + matches += 1 + else: + log("Pattern '%s' was expected to fail but instead it passed" + % (pattern), level=WARNING) + + total = len(self.pass_cases) + len(self.fail_cases) + log("Checked %s cases and %s passed" % (total, matches), level=DEBUG) + return matches == total + + def comply(self, *args, **kwargs): + """NOOP since we just issue warnings. This is to avoid the + NotImplememtedError. + """ + log("Not applying any compliance criteria, only checks.", level=INFO) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/apache.yaml b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/apache.yaml new file mode 100644 index 00000000..0f940d4c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/apache.yaml @@ -0,0 +1,16 @@ +# NOTE: this file contains the default configuration for the 'apache' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'apache' as the root key followed by any of the following with new +# values. + +common: + apache_dir: '/etc/apache2' + +hardening: + traceenable: 'off' + allowed_http_methods: "GET POST" + modules_to_disable: [ cgi, cgid ] + servertokens: 'Prod' + honor_cipher_order: 'on' + cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES' diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/apache.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/apache.yaml.schema new file mode 100644 index 00000000..c112137c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/apache.yaml.schema @@ -0,0 +1,12 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + apache_dir: + traceenable: + +hardening: + allowed_http_methods: + modules_to_disable: + servertokens: + honor_cipher_order: + cipher_suite: diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml new file mode 100644 index 00000000..682d22bf --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml @@ -0,0 +1,38 @@ +# NOTE: this file contains the default configuration for the 'mysql' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'mysql' as the root key followed by any of the following with new +# values. + +hardening: + mysql-conf: /etc/mysql/my.cnf + hardening-conf: /etc/mysql/conf.d/hardening.cnf + +security: + # @see http://www.symantec.com/connect/articles/securing-mysql-step-step + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot + chroot: None + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create + safe-user-create: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth + secure-auth: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links + skip-symbolic-links: 1 + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database + skip-show-database: True + + # @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile + local-infile: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs + allow-suspicious-udfs: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges + automatic-sp-privileges: 0 + + # @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv + secure-file-priv: /tmp diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema new file mode 100644 index 00000000..2edf325c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/mysql.yaml.schema @@ -0,0 +1,15 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +hardening: + mysql-conf: + hardening-conf: +security: + chroot: + safe-user-create: + secure-auth: + skip-symbolic-links: + skip-show-database: + local-infile: + allow-suspicious-udfs: + automatic-sp-privileges: + secure-file-priv: diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/os.yaml b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/os.yaml new file mode 100644 index 00000000..9a8627b5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/os.yaml @@ -0,0 +1,68 @@ +# NOTE: this file contains the default configuration for the 'os' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'os' as the root key followed by any of the following with new +# values. + +general: + desktop_enable: False # (type:boolean) + +environment: + extra_user_paths: [] + umask: 027 + root_path: / + +auth: + pw_max_age: 60 + # discourage password cycling + pw_min_age: 7 + retries: 5 + lockout_time: 600 + timeout: 60 + allow_homeless: False # (type:boolean) + pam_passwdqc_enable: True # (type:boolean) + pam_passwdqc_options: 'min=disabled,disabled,16,12,8' + root_ttys: + console + tty1 + tty2 + tty3 + tty4 + tty5 + tty6 + uid_min: 1000 + gid_min: 1000 + sys_uid_min: 100 + sys_uid_max: 999 + sys_gid_min: 100 + sys_gid_max: 999 + chfn_restrict: + +security: + users_allow: [] + suid_sgid_enforce: True # (type:boolean) + # user-defined blacklist and whitelist + suid_sgid_blacklist: [] + suid_sgid_whitelist: [] + # if this is True, remove any suid/sgid bits from files that were not in the whitelist + suid_sgid_dry_run_on_unknown: False # (type:boolean) + suid_sgid_remove_from_unknown: False # (type:boolean) + # remove packages with known issues + packages_clean: True # (type:boolean) + packages_list: + xinetd + inetd + ypserv + telnet-server + rsh-server + rsync + kernel_enable_module_loading: True # (type:boolean) + kernel_enable_core_dump: False # (type:boolean) + ssh_tmout: 300 + +sysctl: + kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128 + kernel_enable_sysrq: False # (type:boolean) + forwarding: False # (type:boolean) + ipv6_enable: False # (type:boolean) + arp_restricted: True # (type:boolean) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/os.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/os.yaml.schema new file mode 100644 index 00000000..cc3b9c20 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/os.yaml.schema @@ -0,0 +1,43 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +general: + desktop_enable: +environment: + extra_user_paths: + umask: + root_path: +auth: + pw_max_age: + pw_min_age: + retries: + lockout_time: + timeout: + allow_homeless: + pam_passwdqc_enable: + pam_passwdqc_options: + root_ttys: + uid_min: + gid_min: + sys_uid_min: + sys_uid_max: + sys_gid_min: + sys_gid_max: + chfn_restrict: +security: + users_allow: + suid_sgid_enforce: + suid_sgid_blacklist: + suid_sgid_whitelist: + suid_sgid_dry_run_on_unknown: + suid_sgid_remove_from_unknown: + packages_clean: + packages_list: + kernel_enable_module_loading: + kernel_enable_core_dump: + ssh_tmout: +sysctl: + kernel_secure_sysrq: + kernel_enable_sysrq: + forwarding: + ipv6_enable: + arp_restricted: diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml new file mode 100644 index 00000000..cd529bca --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml @@ -0,0 +1,49 @@ +# NOTE: this file contains the default configuration for the 'ssh' hardening +# code. If you want to override any settings you must add them to a file +# called hardening.yaml in the root directory of your charm using the +# name 'ssh' as the root key followed by any of the following with new +# values. + +common: + service_name: 'ssh' + network_ipv6_enable: False # (type:boolean) + ports: [22] + remote_hosts: [] + +client: + package: 'openssh-client' + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + roaming: False + password_authentication: 'no' + +server: + host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key', + '/etc/ssh/ssh_host_ecdsa_key'] + cbc_required: False # (type:boolean) + weak_hmac: False # (type:boolean) + weak_kex: False # (type:boolean) + allow_root_with_key: False # (type:boolean) + allow_tcp_forwarding: 'no' + allow_agent_forwarding: 'no' + allow_x11_forwarding: 'no' + use_privilege_separation: 'sandbox' + listen_to: ['0.0.0.0'] + use_pam: 'no' + package: 'openssh-server' + password_authentication: 'no' + alive_interval: '600' + alive_count: '3' + sftp_enable: False # (type:boolean) + sftp_group: 'sftponly' + sftp_chroot: '/home/%u' + deny_users: [] + allow_users: [] + deny_groups: [] + allow_groups: [] + print_motd: 'no' + print_last_log: 'no' + use_dns: 'no' + max_auth_tries: 2 + max_sessions: 10 diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema new file mode 100644 index 00000000..d05e054b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/defaults/ssh.yaml.schema @@ -0,0 +1,42 @@ +# NOTE: this schema must contain all valid keys from it's associated defaults +# file. It is used to validate user-provided overrides. +common: + service_name: + network_ipv6_enable: + ports: + remote_hosts: +client: + package: + cbc_required: + weak_hmac: + weak_kex: + roaming: + password_authentication: +server: + host_key_files: + cbc_required: + weak_hmac: + weak_kex: + allow_root_with_key: + allow_tcp_forwarding: + allow_agent_forwarding: + allow_x11_forwarding: + use_privilege_separation: + listen_to: + use_pam: + package: + password_authentication: + alive_interval: + alive_count: + sftp_enable: + sftp_group: + sftp_chroot: + deny_users: + allow_users: + deny_groups: + allow_groups: + print_motd: + print_last_log: + use_dns: + max_auth_tries: + max_sessions: diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/harden.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/harden.py new file mode 100644 index 00000000..63f21b9c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/harden.py @@ -0,0 +1,96 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from collections import OrderedDict + +from charmhelpers.core.hookenv import ( + config, + log, + DEBUG, + WARNING, +) +from charmhelpers.contrib.hardening.host.checks import run_os_checks +from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks +from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks +from charmhelpers.contrib.hardening.apache.checks import run_apache_checks + +_DISABLE_HARDENING_FOR_UNIT_TEST = False + + +def harden(overrides=None): + """Hardening decorator. + + This is the main entry point for running the hardening stack. In order to + run modules of the stack you must add this decorator to charm hook(s) and + ensure that your charm config.yaml contains the 'harden' option set to + one or more of the supported modules. Setting these will cause the + corresponding hardening code to be run when the hook fires. + + This decorator can and should be applied to more than one hook or function + such that hardening modules are called multiple times. This is because + subsequent calls will perform auditing checks that will report any changes + to resources hardened by the first run (and possibly perform compliance + actions as a result of any detected infractions). + + :param overrides: Optional list of stack modules used to override those + provided with 'harden' config. + :returns: Returns value returned by decorated function once executed. + """ + if overrides is None: + overrides = [] + + def _harden_inner1(f): + # As this has to be py2.7 compat, we can't use nonlocal. Use a trick + # to capture the dictionary that can then be updated. + _logged = {'done': False} + + def _harden_inner2(*args, **kwargs): + # knock out hardening via a config var; normally it won't get + # disabled. + if _DISABLE_HARDENING_FOR_UNIT_TEST: + return f(*args, **kwargs) + if not _logged['done']: + log("Hardening function '%s'" % (f.__name__), level=DEBUG) + _logged['done'] = True + RUN_CATALOG = OrderedDict([('os', run_os_checks), + ('ssh', run_ssh_checks), + ('mysql', run_mysql_checks), + ('apache', run_apache_checks)]) + + enabled = overrides[:] or (config("harden") or "").split() + if enabled: + modules_to_run = [] + # modules will always be performed in the following order + for module, func in six.iteritems(RUN_CATALOG): + if module in enabled: + enabled.remove(module) + modules_to_run.append(func) + + if enabled: + log("Unknown hardening modules '%s' - ignoring" % + (', '.join(enabled)), level=WARNING) + + for hardener in modules_to_run: + log("Executing hardening module '%s'" % + (hardener.__name__), level=DEBUG) + hardener() + else: + log("No hardening applied to '%s'" % (f.__name__), level=DEBUG) + + return f(*args, **kwargs) + return _harden_inner2 + + return _harden_inner1 diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/__init__.py new file mode 100644 index 00000000..58bebd84 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/__init__.py new file mode 100644 index 00000000..0e7e409f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/__init__.py @@ -0,0 +1,48 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.host.checks import ( + apt, + limits, + login, + minimize_access, + pam, + profile, + securetty, + suid_sgid, + sysctl +) + + +def run_os_checks(): + log("Starting OS hardening checks.", level=DEBUG) + checks = apt.get_audits() + checks.extend(limits.get_audits()) + checks.extend(login.get_audits()) + checks.extend(minimize_access.get_audits()) + checks.extend(pam.get_audits()) + checks.extend(profile.get_audits()) + checks.extend(securetty.get_audits()) + checks.extend(suid_sgid.get_audits()) + checks.extend(sysctl.get_audits()) + + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("OS hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/apt.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/apt.py new file mode 100644 index 00000000..7ce41b00 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/apt.py @@ -0,0 +1,37 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.utils import get_settings +from charmhelpers.contrib.hardening.audits.apt import ( + AptConfig, + RestrictedPackages, +) + + +def get_audits(): + """Get OS hardening apt audits. + + :returns: dictionary of audits + """ + audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated', + 'expected': 'false'}])] + + settings = get_settings('os') + clean_packages = settings['security']['packages_clean'] + if clean_packages: + security_packages = settings['security']['packages_list'] + if security_packages: + audits.append(RestrictedPackages(security_packages)) + + return audits diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/limits.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/limits.py new file mode 100644 index 00000000..e94f5ebe --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/limits.py @@ -0,0 +1,53 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import ( + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening security limits audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Ensure that the /etc/security/limits.d directory is only writable + # by the root user, but others can execute and read. + audits.append(DirectoryPermissionAudit('/etc/security/limits.d', + user='root', group='root', + mode=0o755)) + + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf', + SecurityLimitsContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0440)) + return audits + + +class SecurityLimitsContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'disable_core_dump': + not settings['security']['kernel_enable_core_dump']} + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/login.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/login.py new file mode 100644 index 00000000..fe2bc6ef --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/login.py @@ -0,0 +1,65 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from six import string_types + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening login.defs audits. + + :returns: dictionary of audits + """ + audits = [TemplatedFile('/etc/login.defs', LoginContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', mode=0o0444)] + return audits + + +class LoginContext(object): + + def __call__(self): + settings = utils.get_settings('os') + + # Octal numbers in yaml end up being turned into decimal, + # so check if the umask is entered as a string (e.g. '027') + # or as an octal umask as we know it (e.g. 002). If its not + # a string assume it to be octal and turn it into an octal + # string. + umask = settings['environment']['umask'] + if not isinstance(umask, string_types): + umask = '%s' % oct(umask) + + ctxt = { + 'additional_user_paths': + settings['environment']['extra_user_paths'], + 'umask': umask, + 'pwd_max_age': settings['auth']['pw_max_age'], + 'pwd_min_age': settings['auth']['pw_min_age'], + 'uid_min': settings['auth']['uid_min'], + 'sys_uid_min': settings['auth']['sys_uid_min'], + 'sys_uid_max': settings['auth']['sys_uid_max'], + 'gid_min': settings['auth']['gid_min'], + 'sys_gid_min': settings['auth']['sys_gid_min'], + 'sys_gid_max': settings['auth']['sys_gid_max'], + 'login_retries': settings['auth']['retries'], + 'login_timeout': settings['auth']['timeout'], + 'chfn_restrict': settings['auth']['chfn_restrict'], + 'allow_login_without_home': settings['auth']['allow_homeless'] + } + + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/minimize_access.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/minimize_access.py new file mode 100644 index 00000000..6e64be00 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/minimize_access.py @@ -0,0 +1,50 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + ReadOnly, +) +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening access audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Remove write permissions from $PATH folders for all regular users. + # This prevents changing system-wide commands from normal users. + path_folders = {'/usr/local/sbin', + '/usr/local/bin', + '/usr/sbin', + '/usr/bin', + '/bin'} + extra_user_paths = settings['environment']['extra_user_paths'] + path_folders.update(extra_user_paths) + audits.append(ReadOnly(path_folders)) + + # Only allow the root user to have access to the shadow file. + audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600)) + + if 'change_user' not in settings['security']['users_allow']: + # su should only be accessible to user and group root, unless it is + # expressly defined to allow users to change to root via the + # security_users_allow config option. + audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750)) + + return audits diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/pam.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/pam.py new file mode 100644 index 00000000..9b38d5f0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/pam.py @@ -0,0 +1,132 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from subprocess import ( + check_output, + CalledProcessError, +) + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, +) +from charmhelpers.fetch import ( + apt_install, + apt_purge, + apt_update, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + DeletedFile, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +def get_audits(): + """Get OS hardening PAM authentication audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + + if settings['auth']['pam_passwdqc_enable']: + audits.append(PasswdqcPAM('/etc/passwdqc.conf')) + + if settings['auth']['retries']: + audits.append(Tally2PAM('/usr/share/pam-configs/tally2')) + else: + audits.append(DeletedFile('/usr/share/pam-configs/tally2')) + + return audits + + +class PasswdqcPAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_pam_passwdqc_options'] = \ + settings['auth']['pam_passwdqc_options'] + + return ctxt + + +class PasswdqcPAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(PasswdqcPAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=PasswdqcPAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + for pkg in ['libpam-ccreds', 'libpam-cracklib']: + log("Purging package '%s'" % pkg, level=DEBUG), + apt_purge(pkg) + + apt_update(fatal=True) + for pkg in ['libpam-passwdqc']: + log("Installing package '%s'" % pkg, level=DEBUG), + apt_install(pkg) + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) + + +class Tally2PAMContext(object): + + def __call__(self): + ctxt = {} + settings = utils.get_settings('os') + + ctxt['auth_lockout_time'] = settings['auth']['lockout_time'] + ctxt['auth_retries'] = settings['auth']['retries'] + + return ctxt + + +class Tally2PAM(TemplatedFile): + """The PAM Audit verifies the linux PAM settings.""" + def __init__(self, path): + super(Tally2PAM, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=Tally2PAMContext(), + user='root', + group='root', + mode=0o0640) + + def pre_write(self): + # Always remove? + apt_purge('libpam-ccreds') + apt_update(fatal=True) + apt_install('libpam-modules') + + def post_write(self): + """Updates the PAM configuration after the file has been written""" + try: + check_output(['pam-auth-update', '--package']) + except CalledProcessError as e: + log('Error calling pam-auth-update: %s' % e, level=ERROR) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/profile.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/profile.py new file mode 100644 index 00000000..2727428d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/profile.py @@ -0,0 +1,49 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening profile audits. + + :returns: dictionary of audits + """ + audits = [] + + settings = utils.get_settings('os') + # If core dumps are not enabled, then don't allow core dumps to be + # created as they may contain sensitive information. + if not settings['security']['kernel_enable_core_dump']: + audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0755, user='root', group='root')) + if settings['security']['ssh_tmout']: + audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh', + ProfileContext(), + template_dir=TEMPLATES_DIR, + mode=0o0644, user='root', group='root')) + return audits + + +class ProfileContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ssh_tmout': + settings['security']['ssh_tmout']} + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/securetty.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/securetty.py new file mode 100644 index 00000000..34cd0217 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/securetty.py @@ -0,0 +1,37 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.contrib.hardening.audits.file import TemplatedFile +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get OS hardening Secure TTY audits. + + :returns: dictionary of audits + """ + audits = [] + audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(), + template_dir=TEMPLATES_DIR, + mode=0o0400, user='root', group='root')) + return audits + + +class SecureTTYContext(object): + + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'ttys': settings['auth']['root_ttys']} + return ctxt diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/suid_sgid.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/suid_sgid.py new file mode 100644 index 00000000..bcbe3fde --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/suid_sgid.py @@ -0,0 +1,129 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, +) +from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit +from charmhelpers.contrib.hardening import utils + + +BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh', + '/usr/libexec/openssh/ssh-keysign', + '/usr/lib/openssh/ssh-keysign', + '/sbin/netreport', + '/usr/sbin/usernetctl', + '/usr/sbin/userisdnctl', + '/usr/sbin/pppd', + '/usr/bin/lockfile', + '/usr/bin/mail-lock', + '/usr/bin/mail-unlock', + '/usr/bin/mail-touchlock', + '/usr/bin/dotlockfile', + '/usr/bin/arping', + '/usr/sbin/uuidd', + '/usr/bin/mtr', + '/usr/lib/evolution/camel-lock-helper-1.2', + '/usr/lib/pt_chown', + '/usr/lib/eject/dmcrypt-get-device', + '/usr/lib/mc/cons.saver'] + +WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount', + '/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at', + '/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp', + '/usr/bin/passwd', '/usr/bin/ssh-agent', + '/usr/libexec/utempter/utempter', '/usr/sbin/lockdev', + '/usr/sbin/sendmail.sendmail', '/usr/bin/expiry', + '/bin/ping6', '/usr/bin/traceroute6.iputils', + '/sbin/mount.nfs', '/sbin/umount.nfs', + '/sbin/mount.nfs4', '/sbin/umount.nfs4', + '/usr/bin/crontab', + '/usr/bin/wall', '/usr/bin/write', + '/usr/bin/screen', + '/usr/bin/mlocate', + '/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh', + '/bin/fusermount', + '/usr/bin/pkexec', + '/usr/bin/sudo', '/usr/bin/sudoedit', + '/usr/sbin/postdrop', '/usr/sbin/postqueue', + '/usr/sbin/suexec', + '/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth', + '/usr/kerberos/bin/ksu', + '/usr/sbin/ccreds_validate', + '/usr/bin/Xorg', + '/usr/bin/X', + '/usr/lib/dbus-1.0/dbus-daemon-launch-helper', + '/usr/lib/vte/gnome-pty-helper', + '/usr/lib/libvte9/gnome-pty-helper', + '/usr/lib/libvte-2.90-9/gnome-pty-helper'] + + +def get_audits(): + """Get OS hardening suid/sgid audits. + + :returns: dictionary of audits + """ + checks = [] + settings = utils.get_settings('os') + if not settings['security']['suid_sgid_enforce']: + log("Skipping suid/sgid hardening", level=INFO) + return checks + + # Build the blacklist and whitelist of files for suid/sgid checks. + # There are a total of 4 lists: + # 1. the system blacklist + # 2. the system whitelist + # 3. the user blacklist + # 4. the user whitelist + # + # The blacklist is the set of paths which should NOT have the suid/sgid bit + # set and the whitelist is the set of paths which MAY have the suid/sgid + # bit setl. The user whitelist/blacklist effectively override the system + # whitelist/blacklist. + u_b = settings['security']['suid_sgid_blacklist'] + u_w = settings['security']['suid_sgid_whitelist'] + + blacklist = set(BLACKLIST) - set(u_w + u_b) + whitelist = set(WHITELIST) - set(u_b + u_w) + + checks.append(NoSUIDSGIDAudit(blacklist)) + + dry_run = settings['security']['suid_sgid_dry_run_on_unknown'] + + if settings['security']['suid_sgid_remove_from_unknown'] or dry_run: + # If the policy is a dry_run (e.g. complain only) or remove unknown + # suid/sgid bits then find all of the paths which have the suid/sgid + # bit set and then remove the whitelisted paths. + root_path = settings['environment']['root_path'] + unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist) + checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run)) + + return checks + + +def find_paths_with_suid_sgid(root_path): + """Finds all paths/files which have an suid/sgid bit enabled. + + Starting with the root_path, this will recursively find all paths which + have an suid or sgid bit set. + """ + cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000', + '-type', 'f', '!', '-path', '/proc/*', '-print'] + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, _ = p.communicate() + return set(out.split('\n')) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/sysctl.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/sysctl.py new file mode 100644 index 00000000..f1ea5813 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/checks/sysctl.py @@ -0,0 +1,209 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import platform +import re +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + INFO, + WARNING, +) +from charmhelpers.contrib.hardening import utils +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.host import TEMPLATES_DIR + + +SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s +net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s +net.ipv4.conf.all.rp_filter=1 +net.ipv4.conf.default.rp_filter=1 +net.ipv4.icmp_echo_ignore_broadcasts=1 +net.ipv4.icmp_ignore_bogus_error_responses=1 +net.ipv4.icmp_ratelimit=100 +net.ipv4.icmp_ratemask=88089 +net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s +net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s +net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s +net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s +net.ipv4.tcp_rfc1337=1 +net.ipv4.tcp_syncookies=1 +net.ipv4.conf.all.shared_media=1 +net.ipv4.conf.default.shared_media=1 +net.ipv4.conf.all.accept_source_route=0 +net.ipv4.conf.default.accept_source_route=0 +net.ipv4.conf.all.accept_redirects=0 +net.ipv4.conf.default.accept_redirects=0 +net.ipv6.conf.all.accept_redirects=0 +net.ipv6.conf.default.accept_redirects=0 +net.ipv4.conf.all.secure_redirects=0 +net.ipv4.conf.default.secure_redirects=0 +net.ipv4.conf.all.send_redirects=0 +net.ipv4.conf.default.send_redirects=0 +net.ipv4.conf.all.log_martians=0 +net.ipv6.conf.default.router_solicitations=0 +net.ipv6.conf.default.accept_ra_rtr_pref=0 +net.ipv6.conf.default.accept_ra_pinfo=0 +net.ipv6.conf.default.accept_ra_defrtr=0 +net.ipv6.conf.default.autoconf=0 +net.ipv6.conf.default.dad_transmits=0 +net.ipv6.conf.default.max_addresses=1 +net.ipv6.conf.all.accept_ra=0 +net.ipv6.conf.default.accept_ra=0 +kernel.modules_disabled=%(kernel_modules_disabled)s +kernel.sysrq=%(kernel_sysrq)s +fs.suid_dumpable=%(fs_suid_dumpable)s +kernel.randomize_va_space=2 +""" + + +def get_audits(): + """Get OS hardening sysctl audits. + + :returns: dictionary of audits + """ + audits = [] + settings = utils.get_settings('os') + + # Apply the sysctl settings which are configured to be applied. + audits.append(SysctlConf()) + # Make sure that only root has access to the sysctl.conf file, and + # that it is read-only. + audits.append(FilePermissionAudit('/etc/sysctl.conf', + user='root', + group='root', mode=0o0440)) + # If module loading is not enabled, then ensure that the modules + # file has the appropriate permissions and rebuild the initramfs + if not settings['security']['kernel_enable_module_loading']: + audits.append(ModulesTemplate()) + + return audits + + +class ModulesContext(object): + + def __call__(self): + settings = utils.get_settings('os') + with open('/proc/cpuinfo', 'r') as fd: + cpuinfo = fd.readlines() + + for line in cpuinfo: + match = re.search(r"^vendor_id\s+:\s+(.+)", line) + if match: + vendor = match.group(1) + + if vendor == "GenuineIntel": + vendor = "intel" + elif vendor == "AuthenticAMD": + vendor = "amd" + + ctxt = {'arch': platform.processor(), + 'cpuVendor': vendor, + 'desktop_enable': settings['general']['desktop_enable']} + + return ctxt + + +class ModulesTemplate(object): + + def __init__(self): + super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules', + ModulesContext(), + templates_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + subprocess.check_call(['update-initramfs', '-u']) + + +class SysCtlHardeningContext(object): + def __call__(self): + settings = utils.get_settings('os') + ctxt = {'sysctl': {}} + + log("Applying sysctl settings", level=INFO) + extras = {'net_ipv4_ip_forward': 0, + 'net_ipv6_conf_all_forwarding': 0, + 'net_ipv6_conf_all_disable_ipv6': 1, + 'net_ipv4_tcp_timestamps': 0, + 'net_ipv4_conf_all_arp_ignore': 0, + 'net_ipv4_conf_all_arp_announce': 0, + 'kernel_sysrq': 0, + 'fs_suid_dumpable': 0, + 'kernel_modules_disabled': 1} + + if settings['sysctl']['ipv6_enable']: + extras['net_ipv6_conf_all_disable_ipv6'] = 0 + + if settings['sysctl']['forwarding']: + extras['net_ipv4_ip_forward'] = 1 + extras['net_ipv6_conf_all_forwarding'] = 1 + + if settings['sysctl']['arp_restricted']: + extras['net_ipv4_conf_all_arp_ignore'] = 1 + extras['net_ipv4_conf_all_arp_announce'] = 2 + + if settings['security']['kernel_enable_module_loading']: + extras['kernel_modules_disabled'] = 0 + + if settings['sysctl']['kernel_enable_sysrq']: + sysrq_val = settings['sysctl']['kernel_secure_sysrq'] + extras['kernel_sysrq'] = sysrq_val + + if settings['security']['kernel_enable_core_dump']: + extras['fs_suid_dumpable'] = 1 + + settings.update(extras) + for d in (SYSCTL_DEFAULTS % settings).split(): + d = d.strip().partition('=') + key = d[0].strip() + path = os.path.join('/proc/sys', key.replace('.', '/')) + if not os.path.exists(path): + log("Skipping '%s' since '%s' does not exist" % (key, path), + level=WARNING) + continue + + ctxt['sysctl'][key] = d[2] or None + + # Translate for python3 + return {'sysctl_settings': + [(k, v) for k, v in six.iteritems(ctxt['sysctl'])]} + + +class SysctlConf(TemplatedFile): + """An audit check for sysctl settings.""" + def __init__(self): + self.conffile = '/etc/sysctl.d/99-juju-hardening.conf' + super(SysctlConf, self).__init__(self.conffile, + SysCtlHardeningContext(), + template_dir=TEMPLATES_DIR, + user='root', group='root', + mode=0o0440) + + def post_write(self): + try: + subprocess.check_call(['sysctl', '-p', self.conffile]) + except subprocess.CalledProcessError as e: + # NOTE: on some systems if sysctl cannot apply all settings it + # will return non-zero as well. + log("sysctl command returned an error (maybe some " + "keys could not be set) - %s" % (e), + level=WARNING) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf new file mode 100644 index 00000000..0014191f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/10.hardcore.conf @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% if disable_core_dump -%} +# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information. +* hard core 0 +{% endif %} \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/99-hardening.sh b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/99-hardening.sh new file mode 100644 index 00000000..616cef46 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/99-hardening.sh @@ -0,0 +1,5 @@ +TMOUT={{ tmout }} +readonly TMOUT +export TMOUT + +readonly HISTFILE diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf new file mode 100644 index 00000000..101f1e1d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/99-juju-hardening.conf @@ -0,0 +1,7 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +{% for key, value in sysctl_settings -%} +{{ key }}={{ value }} +{% endfor -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/login.defs b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/login.defs new file mode 100644 index 00000000..db137d6d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/login.defs @@ -0,0 +1,349 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# +# /etc/login.defs - Configuration control definitions for the login package. +# +# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH. +# If unspecified, some arbitrary (and possibly incorrect) value will +# be assumed. All other items are optional - if not specified then +# the described action or option will be inhibited. +# +# Comment lines (lines beginning with "#") and blank lines are ignored. +# +# Modified for Linux. --marekm + +# REQUIRED for useradd/userdel/usermod +# Directory where mailboxes reside, _or_ name of file, relative to the +# home directory. If you _do_ define MAIL_DIR and MAIL_FILE, +# MAIL_DIR takes precedence. +# +# Essentially: +# - MAIL_DIR defines the location of users mail spool files +# (for mbox use) by appending the username to MAIL_DIR as defined +# below. +# - MAIL_FILE defines the location of the users mail spool files as the +# fully-qualified filename obtained by prepending the user home +# directory before $MAIL_FILE +# +# NOTE: This is no more used for setting up users MAIL environment variable +# which is, starting from shadow 4.0.12-1 in Debian, entirely the +# job of the pam_mail PAM modules +# See default PAM configuration files provided for +# login, su, etc. +# +# This is a temporary situation: setting these variables will soon +# move to /etc/default/useradd and the variables will then be +# no more supported +MAIL_DIR /var/mail +#MAIL_FILE .mail + +# +# Enable logging and display of /var/log/faillog login failure info. +# This option conflicts with the pam_tally PAM module. +# +FAILLOG_ENAB yes + +# +# Enable display of unknown usernames when login failures are recorded. +# +# WARNING: Unknown usernames may become world readable. +# See #290803 and #298773 for details about how this could become a security +# concern +LOG_UNKFAIL_ENAB no + +# +# Enable logging of successful logins +# +LOG_OK_LOGINS yes + +# +# Enable "syslog" logging of su activity - in addition to sulog file logging. +# SYSLOG_SG_ENAB does the same for newgrp and sg. +# +SYSLOG_SU_ENAB yes +SYSLOG_SG_ENAB yes + +# +# If defined, all su activity is logged to this file. +# +#SULOG_FILE /var/log/sulog + +# +# If defined, file which maps tty line to TERM environment parameter. +# Each line of the file is in a format something like "vt100 tty01". +# +#TTYTYPE_FILE /etc/ttytype + +# +# If defined, login failures will be logged here in a utmp format +# last, when invoked as lastb, will read /var/log/btmp, so... +# +FTMP_FILE /var/log/btmp + +# +# If defined, the command name to display when running "su -". For +# example, if this is defined as "su" then a "ps" will display the +# command is "-su". If not defined, then "ps" would display the +# name of the shell actually being run, e.g. something like "-sh". +# +SU_NAME su + +# +# If defined, file which inhibits all the usual chatter during the login +# sequence. If a full pathname, then hushed mode will be enabled if the +# user's name or shell are found in the file. If not a full pathname, then +# hushed mode will be enabled if the file exists in the user's home directory. +# +HUSHLOGIN_FILE .hushlogin +#HUSHLOGIN_FILE /etc/hushlogins + +# +# *REQUIRED* The default PATH settings, for superuser and normal users. +# +# (they are minimal, add the rest in the shell startup files) +ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %} + +# +# Terminal permissions +# +# TTYGROUP Login tty will be assigned this group ownership. +# TTYPERM Login tty will be set to this permission. +# +# If you have a "write" program which is "setgid" to a special group +# which owns the terminals, define TTYGROUP to the group number and +# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign +# TTYPERM to either 622 or 600. +# +# In Debian /usr/bin/bsd-write or similar programs are setgid tty +# However, the default and recommended value for TTYPERM is still 0600 +# to not allow anyone to write to anyone else console or terminal + +# Users can still allow other people to write them by issuing +# the "mesg y" command. + +TTYGROUP tty +TTYPERM 0600 + +# +# Login configuration initializations: +# +# ERASECHAR Terminal ERASE character ('\010' = backspace). +# KILLCHAR Terminal KILL character ('\025' = CTRL/U). +# UMASK Default "umask" value. +# +# The ERASECHAR and KILLCHAR are used only on System V machines. +# +# UMASK is the default umask value for pam_umask and is used by +# useradd and newusers to set the mode of the new home directories. +# 022 is the "historical" value in Debian for UMASK +# 027, or even 077, could be considered better for privacy +# There is no One True Answer here : each sysadmin must make up his/her +# mind. +# +# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value +# for private user groups, i. e. the uid is the same as gid, and username is +# the same as the primary group name: for these, the user permissions will be +# used as group permissions, e. g. 022 will become 002. +# +# Prefix these values with "0" to get octal, "0x" to get hexadecimal. +# +ERASECHAR 0177 +KILLCHAR 025 +UMASK {{ umask }} + +# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name. +# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user. +USERGROUPS_ENAB yes + +# +# Password aging controls: +# +# PASS_MAX_DAYS Maximum number of days a password may be used. +# PASS_MIN_DAYS Minimum number of days allowed between password changes. +# PASS_WARN_AGE Number of days warning given before a password expires. +# +PASS_MAX_DAYS {{ pwd_max_age }} +PASS_MIN_DAYS {{ pwd_min_age }} +PASS_WARN_AGE 7 + +# +# Min/max values for automatic uid selection in useradd +# +UID_MIN {{ uid_min }} +UID_MAX 60000 +# System accounts +SYS_UID_MIN {{ sys_uid_min }} +SYS_UID_MAX {{ sys_uid_max }} + +# Min/max values for automatic gid selection in groupadd +GID_MIN {{ gid_min }} +GID_MAX 60000 +# System accounts +SYS_GID_MIN {{ sys_gid_min }} +SYS_GID_MAX {{ sys_gid_max }} + +# +# Max number of login retries if password is bad. This will most likely be +# overriden by PAM, since the default pam_unix module has it's own built +# in of 3 retries. However, this is a safe fallback in case you are using +# an authentication module that does not enforce PAM_MAXTRIES. +# +LOGIN_RETRIES {{ login_retries }} + +# +# Max time in seconds for login +# +LOGIN_TIMEOUT {{ login_timeout }} + +# +# Which fields may be changed by regular users using chfn - use +# any combination of letters "frwh" (full name, room number, work +# phone, home phone). If not defined, no changes are allowed. +# For backward compatibility, "yes" = "rwh" and "no" = "frwh". +# +{% if chfn_restrict %} +CHFN_RESTRICT {{ chfn_restrict }} +{% endif %} + +# +# Should login be allowed if we can't cd to the home directory? +# Default in no. +# +DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %} + +# +# If defined, this command is run when removing a user. +# It should remove any at/cron/print jobs etc. owned by +# the user to be removed (passed as the first argument). +# +#USERDEL_CMD /usr/sbin/userdel_local + +# +# Enable setting of the umask group bits to be the same as owner bits +# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is +# the same as gid, and username is the same as the primary group name. +# +# If set to yes, userdel will remove the user´s group if it contains no +# more members, and useradd will create by default a group with the name +# of the user. +# +USERGROUPS_ENAB yes + +# +# Instead of the real user shell, the program specified by this parameter +# will be launched, although its visible name (argv[0]) will be the shell's. +# The program may do whatever it wants (logging, additional authentification, +# banner, ...) before running the actual shell. +# +# FAKE_SHELL /bin/fakeshell + +# +# If defined, either full pathname of a file containing device names or +# a ":" delimited list of device names. Root logins will be allowed only +# upon these devices. +# +# This variable is used by login and su. +# +#CONSOLE /etc/consoles +#CONSOLE console:tty01:tty02:tty03:tty04 + +# +# List of groups to add to the user's supplementary group set +# when logging in on the console (as determined by the CONSOLE +# setting). Default is none. +# +# Use with caution - it is possible for users to gain permanent +# access to these groups, even when not logged in on the console. +# How to do it is left as an exercise for the reader... +# +# This variable is used by login and su. +# +#CONSOLE_GROUPS floppy:audio:cdrom + +# +# If set to "yes", new passwords will be encrypted using the MD5-based +# algorithm compatible with the one used by recent releases of FreeBSD. +# It supports passwords of unlimited length and longer salt strings. +# Set to "no" if you need to copy encrypted passwords to other systems +# which don't understand the new algorithm. Default is "no". +# +# This variable is deprecated. You should use ENCRYPT_METHOD. +# +MD5_CRYPT_ENAB no + +# +# If set to MD5 , MD5-based algorithm will be used for encrypting password +# If set to SHA256, SHA256-based algorithm will be used for encrypting password +# If set to SHA512, SHA512-based algorithm will be used for encrypting password +# If set to DES, DES-based algorithm will be used for encrypting password (default) +# Overrides the MD5_CRYPT_ENAB option +# +# Note: It is recommended to use a value consistent with +# the PAM modules configuration. +# +ENCRYPT_METHOD SHA512 + +# +# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512. +# +# Define the number of SHA rounds. +# With a lot of rounds, it is more difficult to brute forcing the password. +# But note also that it more CPU resources will be needed to authenticate +# users. +# +# If not specified, the libc will choose the default number of rounds (5000). +# The values must be inside the 1000-999999999 range. +# If only one of the MIN or MAX values is set, then this value will be used. +# If MIN > MAX, the highest value will be used. +# +# SHA_CRYPT_MIN_ROUNDS 5000 +# SHA_CRYPT_MAX_ROUNDS 5000 + +################# OBSOLETED BY PAM ############## +# # +# These options are now handled by PAM. Please # +# edit the appropriate file in /etc/pam.d/ to # +# enable the equivelants of them. +# +############### + +#MOTD_FILE +#DIALUPS_CHECK_ENAB +#LASTLOG_ENAB +#MAIL_CHECK_ENAB +#OBSCURE_CHECKS_ENAB +#PORTTIME_CHECKS_ENAB +#SU_WHEEL_ONLY +#CRACKLIB_DICTPATH +#PASS_CHANGE_TRIES +#PASS_ALWAYS_WARN +#ENVIRON_FILE +#NOLOGINS_FILE +#ISSUE_FILE +#PASS_MIN_LEN +#PASS_MAX_LEN +#ULIMIT +#ENV_HZ +#CHFN_AUTH +#CHSH_AUTH +#FAIL_DELAY + +################# OBSOLETED ####################### +# # +# These options are no more handled by shadow. # +# # +# Shadow utilities will display a warning if they # +# still appear. # +# # +################################################### + +# CLOSE_SESSIONS +# LOGIN_STRING +# NO_PASSWORD_CONSOLE +# QMAIL_DIR + + + diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/modules b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/modules new file mode 100644 index 00000000..ef0354ee --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/modules @@ -0,0 +1,117 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# /etc/modules: kernel modules to load at boot time. +# +# This file contains the names of kernel modules that should be loaded +# at boot time, one per line. Lines beginning with "#" are ignored. +# Parameters can be specified after the module name. + +# Arch +# ---- +# +# Modules for certains builds, contains support modules and some CPU-specific optimizations. + +{% if arch == "x86_64" -%} +# Optimize for x86_64 cryptographic features +twofish-x86_64-3way +twofish-x86_64 +aes-x86_64 +salsa20-x86_64 +blowfish-x86_64 +{% endif -%} + +{% if cpuVendor == "intel" -%} +# Intel-specific optimizations +ghash-clmulni-intel +aesni-intel +kvm-intel +{% endif -%} + +{% if cpuVendor == "amd" -%} +# AMD-specific optimizations +kvm-amd +{% endif -%} + +kvm + + +# Crypto +# ------ + +# Some core modules which comprise strong cryptography. +blowfish_common +blowfish_generic +ctr +cts +lrw +lzo +rmd160 +rmd256 +rmd320 +serpent +sha512_generic +twofish_common +twofish_generic +xts +zlib + + +# Drivers +# ------- + +# Basics +lp +rtc +loop + +# Filesystems +ext2 +btrfs + +{% if desktop_enable -%} +# Desktop +psmouse +snd +snd_ac97_codec +snd_intel8x0 +snd_page_alloc +snd_pcm +snd_timer +soundcore +usbhid +{% endif -%} + +# Lib +# --- +xz + + +# Net +# --- + +# All packets needed for netfilter rules (ie iptables, ebtables). +ip_tables +x_tables +iptable_filter +iptable_nat + +# Targets +ipt_LOG +ipt_REJECT + +# Modules +xt_connlimit +xt_tcpudp +xt_recent +xt_limit +xt_conntrack +nf_conntrack +nf_conntrack_ipv4 +nf_defrag_ipv4 +xt_state +nf_nat + +# Addons +xt_pknock \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/passwdqc.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/passwdqc.conf new file mode 100644 index 00000000..f98d14e5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/passwdqc.conf @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: passwdqc password strength enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Password-Type: Primary +Password: + requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh new file mode 100644 index 00000000..fd2de791 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/pinerolo_profile.sh @@ -0,0 +1,8 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Disable core dumps via soft limits for all users. Compliance to this setting +# is voluntary and can be modified by users up to a hard limit. This setting is +# a sane default. +ulimit -S -c 0 > /dev/null 2>&1 diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/securetty b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/securetty new file mode 100644 index 00000000..15b18d4e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/securetty @@ -0,0 +1,11 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# A list of TTYs, from which root can log in +# see `man securetty` for reference +{% if ttys -%} +{% for tty in ttys -%} +{{ tty }} +{% endfor -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/tally2 b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/tally2 new file mode 100644 index 00000000..d9620299 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/host/templates/tally2 @@ -0,0 +1,14 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +Name: tally2 lockout after failed attempts enforcement +Default: yes +Priority: 1024 +Conflicts: cracklib +Auth-Type: Primary +Auth-Initial: + required pam_tally2.so deny={{ auth_retries }} onerr=fail unlock_time={{ auth_lockout_time }} +Account-Type: Primary +Account-Initial: + required pam_tally2.so diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/__init__.py new file mode 100644 index 00000000..58bebd84 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/checks/__init__.py new file mode 100644 index 00000000..1990d851 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/checks/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.mysql.checks import config + + +def run_mysql_checks(): + log("Starting MySQL hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("MySQL hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/checks/config.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/checks/config.py new file mode 100644 index 00000000..a79f33b7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/checks/config.py @@ -0,0 +1,87 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import subprocess + +from charmhelpers.core.hookenv import ( + log, + WARNING, +) +from charmhelpers.contrib.hardening.audits.file import ( + FilePermissionAudit, + DirectoryPermissionAudit, + TemplatedFile, +) +from charmhelpers.contrib.hardening.mysql import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get MySQL hardening config audits. + + :returns: dictionary of audits + """ + if subprocess.call(['which', 'mysql'], stdout=subprocess.PIPE) != 0: + log("MySQL does not appear to be installed on this node - " + "skipping mysql hardening", level=WARNING) + return [] + + settings = utils.get_settings('mysql') + hardening_settings = settings['hardening'] + my_cnf = hardening_settings['mysql-conf'] + + audits = [ + FilePermissionAudit(paths=[my_cnf], user='root', + group='root', mode=0o0600), + + TemplatedFile(hardening_settings['hardening-conf'], + MySQLConfContext(), + TEMPLATES_DIR, + mode=0o0750, + user='mysql', + group='root', + service_actions=[{'service': 'mysql', + 'actions': ['restart']}]), + + # MySQL and Percona charms do not allow configuration of the + # data directory, so use the default. + DirectoryPermissionAudit('/var/lib/mysql', + user='mysql', + group='mysql', + recursive=False, + mode=0o755), + + DirectoryPermissionAudit('/etc/mysql', + user='root', + group='root', + recursive=False, + mode=0o700), + ] + + return audits + + +class MySQLConfContext(object): + """Defines the set of key/value pairs to set in a mysql config file. + + This context, when called, will return a dictionary containing the + key/value pairs of setting to specify in the + /etc/mysql/conf.d/hardening.cnf file. + """ + def __call__(self): + settings = utils.get_settings('mysql') + # Translate for python3 + return {'mysql_settings': + [(k, v) for k, v in six.iteritems(settings['security'])]} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf new file mode 100644 index 00000000..8242586c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/mysql/templates/hardening.cnf @@ -0,0 +1,12 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +[mysqld] +{% for setting, value in mysql_settings -%} +{% if value == 'True' -%} +{{ setting }} +{% elif value != 'None' and value != None -%} +{{ setting }} = {{ value }} +{% endif -%} +{% endfor -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/__init__.py new file mode 100644 index 00000000..58bebd84 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path + +TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates') diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/checks/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/checks/__init__.py new file mode 100644 index 00000000..edaf484b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/checks/__init__.py @@ -0,0 +1,29 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.contrib.hardening.ssh.checks import config + + +def run_ssh_checks(): + log("Starting SSH hardening checks.", level=DEBUG) + checks = config.get_audits() + for check in checks: + log("Running '%s' check" % (check.__class__.__name__), level=DEBUG) + check.ensure_compliance() + + log("SSH hardening checks complete.", level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/checks/config.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/checks/config.py new file mode 100644 index 00000000..41bed2d1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/checks/config.py @@ -0,0 +1,435 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_iface_addr, + is_ip, +) +from charmhelpers.core.hookenv import ( + log, + DEBUG, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, +) +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) +from charmhelpers.contrib.hardening.audits.file import ( + TemplatedFile, + FileContentAudit, +) +from charmhelpers.contrib.hardening.ssh import TEMPLATES_DIR +from charmhelpers.contrib.hardening import utils + + +def get_audits(): + """Get SSH hardening config audits. + + :returns: dictionary of audits + """ + audits = [SSHConfig(), SSHDConfig(), SSHConfigFileContentAudit(), + SSHDConfigFileContentAudit()] + return audits + + +class SSHConfigContext(object): + + type = 'client' + + def get_macs(self, allow_weak_mac): + if allow_weak_mac: + weak_macs = 'weak' + else: + weak_macs = 'default' + + default = 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160' + macs = {'default': default, + 'weak': default + ',hmac-sha1'} + + default = ('hmac-sha2-512-etm@openssh.com,' + 'hmac-sha2-256-etm@openssh.com,' + 'hmac-ripemd160-etm@openssh.com,umac-128-etm@openssh.com,' + 'hmac-sha2-512,hmac-sha2-256,hmac-ripemd160') + macs_66 = {'default': default, + 'weak': default + ',hmac-sha1'} + + # Use newer ciphers on Ubuntu Trusty and above + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + log("Detected Ubuntu 14.04 or newer, using new macs", level=DEBUG) + macs = macs_66 + + return macs[weak_macs] + + def get_kexs(self, allow_weak_kex): + if allow_weak_kex: + weak_kex = 'weak' + else: + weak_kex = 'default' + + default = 'diffie-hellman-group-exchange-sha256' + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex = {'default': default, + 'weak': weak} + + default = ('curve25519-sha256@libssh.org,' + 'diffie-hellman-group-exchange-sha256') + weak = (default + ',diffie-hellman-group14-sha1,' + 'diffie-hellman-group-exchange-sha1,' + 'diffie-hellman-group1-sha1') + kex_66 = {'default': default, + 'weak': weak} + + # Use newer kex on Ubuntu Trusty and above + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new key exchange ' + 'algorithms', level=DEBUG) + kex = kex_66 + + return kex[weak_kex] + + def get_ciphers(self, cbc_required): + if cbc_required: + weak_ciphers = 'weak' + else: + weak_ciphers = 'default' + + default = 'aes256-ctr,aes192-ctr,aes128-ctr' + cipher = {'default': default, + 'weak': default + 'aes256-cbc,aes192-cbc,aes128-cbc'} + + default = ('chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,' + 'aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr') + ciphers_66 = {'default': default, + 'weak': default + ',aes256-cbc,aes192-cbc,aes128-cbc'} + + # Use newer ciphers on ubuntu Trusty and above + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + log('Detected Ubuntu 14.04 or newer, using new ciphers', + level=DEBUG) + cipher = ciphers_66 + + return cipher[weak_ciphers] + + def get_listening(self, listen=['0.0.0.0']): + """Returns a list of addresses SSH can list on + + Turns input into a sensible list of IPs SSH can listen on. Input + must be a python list of interface names, IPs and/or CIDRs. + + :param listen: list of IPs, CIDRs, interface names + + :returns: list of IPs available on the host + """ + if listen == ['0.0.0.0']: + return listen + + value = [] + for network in listen: + try: + ip = get_address_in_network(network=network, fatal=True) + except ValueError: + if is_ip(network): + ip = network + else: + try: + ip = get_iface_addr(iface=network, fatal=False)[0] + except IndexError: + continue + value.append(ip) + if value == []: + return ['0.0.0.0'] + return value + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'addr_family': addr_family, + 'remote_hosts': settings['common']['remote_hosts'], + 'password_auth_allowed': + settings['client']['password_authentication'], + 'ports': settings['common']['ports'], + 'ciphers': self.get_ciphers(settings['client']['cbc_required']), + 'macs': self.get_macs(settings['client']['weak_hmac']), + 'kexs': self.get_kexs(settings['client']['weak_kex']), + 'roaming': settings['client']['roaming'], + } + return ctxt + + +class SSHConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/ssh_config' + super(SSHConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHConfigContext(), + user='root', + group='root', + mode=0o0644) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['client']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHDConfigContext(SSHConfigContext): + + type = 'server' + + def __call__(self): + settings = utils.get_settings('ssh') + if settings['common']['network_ipv6_enable']: + addr_family = 'any' + else: + addr_family = 'inet' + + ctxt = { + 'ssh_ip': self.get_listening(settings['server']['listen_to']), + 'password_auth_allowed': + settings['server']['password_authentication'], + 'ports': settings['common']['ports'], + 'addr_family': addr_family, + 'ciphers': self.get_ciphers(settings['server']['cbc_required']), + 'macs': self.get_macs(settings['server']['weak_hmac']), + 'kexs': self.get_kexs(settings['server']['weak_kex']), + 'host_key_files': settings['server']['host_key_files'], + 'allow_root_with_key': settings['server']['allow_root_with_key'], + 'password_authentication': + settings['server']['password_authentication'], + 'use_priv_sep': settings['server']['use_privilege_separation'], + 'use_pam': settings['server']['use_pam'], + 'allow_x11_forwarding': settings['server']['allow_x11_forwarding'], + 'print_motd': settings['server']['print_motd'], + 'print_last_log': settings['server']['print_last_log'], + 'client_alive_interval': + settings['server']['alive_interval'], + 'client_alive_count': settings['server']['alive_count'], + 'allow_tcp_forwarding': settings['server']['allow_tcp_forwarding'], + 'allow_agent_forwarding': + settings['server']['allow_agent_forwarding'], + 'deny_users': settings['server']['deny_users'], + 'allow_users': settings['server']['allow_users'], + 'deny_groups': settings['server']['deny_groups'], + 'allow_groups': settings['server']['allow_groups'], + 'use_dns': settings['server']['use_dns'], + 'sftp_enable': settings['server']['sftp_enable'], + 'sftp_group': settings['server']['sftp_group'], + 'sftp_chroot': settings['server']['sftp_chroot'], + 'max_auth_tries': settings['server']['max_auth_tries'], + 'max_sessions': settings['server']['max_sessions'], + } + return ctxt + + +class SSHDConfig(TemplatedFile): + def __init__(self): + path = '/etc/ssh/sshd_config' + super(SSHDConfig, self).__init__(path=path, + template_dir=TEMPLATES_DIR, + context=SSHDConfigContext(), + user='root', + group='root', + mode=0o0600, + service_actions=[{'service': 'ssh', + 'actions': + ['restart']}]) + + def pre_write(self): + settings = utils.get_settings('ssh') + apt_update(fatal=True) + apt_install(settings['server']['package']) + if not os.path.exists('/etc/ssh'): + os.makedir('/etc/ssh') + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + def post_write(self): + # NOTE: don't recurse + utils.ensure_permissions('/etc/ssh', 'root', 'root', 0o0755, + maxdepth=0) + + +class SSHConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/ssh_config' + super(SSHConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['client']['weak_hmac']: + self.fail_cases.append(r'^MACs.+,hmac-sha1$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['client']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['client']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['client']['roaming']: + self.pass_cases.append(r'^UseRoaming yes$') + else: + self.fail_cases.append(r'^UseRoaming yes$') + + return super(SSHConfigFileContentAudit, self).is_compliant(*args, + **kwargs) + + +class SSHDConfigFileContentAudit(FileContentAudit): + def __init__(self): + self.path = '/etc/ssh/sshd_config' + super(SSHDConfigFileContentAudit, self).__init__(self.path, {}) + + def is_compliant(self, *args, **kwargs): + self.pass_cases = [] + self.fail_cases = [] + settings = utils.get_settings('ssh') + + _release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(_release) >= 'trusty': + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms.+,diffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms.*diffie-hellman-group14-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\schacha20-poly1305@openssh.com,.+') # noqa + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr$') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + if not settings['server']['weak_hmac']: + self.pass_cases.append(r'^MACs.+,hmac-ripemd160$') + else: + self.pass_cases.append(r'^MACs.+,hmac-sha1$') + + if settings['server']['weak_kex']: + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + else: + self.pass_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha256$') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group14-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group-exchange-sha1[,\s]?') # noqa + self.fail_cases.append(r'^KexAlgorithms\sdiffie-hellman-group1-sha1[,\s]?') # noqa + + if settings['server']['cbc_required']: + self.pass_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.fail_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + else: + self.fail_cases.append(r'^Ciphers\s.*-cbc[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes128-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes192-ctr[,\s]?') + self.pass_cases.append(r'^Ciphers\s.*aes256-ctr[,\s]?') + + if settings['server']['sftp_enable']: + self.pass_cases.append(r'^Subsystem\ssftp') + else: + self.fail_cases.append(r'^Subsystem\ssftp') + + return super(SSHDConfigFileContentAudit, self).is_compliant(*args, + **kwargs) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/templates/ssh_config b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/templates/ssh_config new file mode 100644 index 00000000..9742d8e2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/templates/ssh_config @@ -0,0 +1,70 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# This is the ssh client system-wide configuration file. See +# ssh_config(5) for more information. This file provides defaults for +# users, and the values can be changed in per-user configuration files +# or on the command line. + +# Configuration data is parsed as follows: +# 1. command line options +# 2. user-specific file +# 3. system-wide file +# Any configuration value is only changed the first time it is set. +# Thus, host-specific definitions should be at the beginning of the +# configuration file, and defaults at the end. + +# Site-wide defaults for some commonly used options. For a comprehensive +# list of available options, their meanings and defaults, please see the +# ssh_config(5) man page. + +# Restrict the following configuration to be limited to this Host. +{% if remote_hosts -%} +Host {{ ' '.join(remote_hosts) }} +{% endif %} +ForwardAgent no +ForwardX11 no +ForwardX11Trusted yes +RhostsRSAAuthentication no +RSAAuthentication yes +PasswordAuthentication {{ password_auth_allowed }} +HostbasedAuthentication no +GSSAPIAuthentication no +GSSAPIDelegateCredentials no +GSSAPIKeyExchange no +GSSAPITrustDNS no +BatchMode no +CheckHostIP yes +AddressFamily {{ addr_family }} +ConnectTimeout 0 +StrictHostKeyChecking ask +IdentityFile ~/.ssh/identity +IdentityFile ~/.ssh/id_rsa +IdentityFile ~/.ssh/id_dsa +# The port at the destination should be defined +{% for port in ports -%} +Port {{ port }} +{% endfor %} +Protocol 2 +Cipher 3des +{% if ciphers -%} +Ciphers {{ ciphers }} +{%- endif %} +{% if macs -%} +MACs {{ macs }} +{%- endif %} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{%- endif %} +EscapeChar ~ +Tunnel no +TunnelDevice any:any +PermitLocalCommand no +VisualHostKey no +RekeyLimit 1G 1h +SendEnv LANG LC_* +HashKnownHosts yes +{% if roaming -%} +UseRoaming {{ roaming }} +{% endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/templates/sshd_config b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/templates/sshd_config new file mode 100644 index 00000000..5f87298a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/ssh/templates/sshd_config @@ -0,0 +1,159 @@ +############################################################################### +# WARNING: This configuration file is maintained by Juju. Local changes may +# be overwritten. +############################################################################### +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +{% for port in ports -%} +Port {{ port }} +{% endfor -%} +AddressFamily {{ addr_family }} +# Use these options to restrict which interfaces/protocols sshd will bind to +{% if ssh_ip -%} +{% for ip in ssh_ip -%} +ListenAddress {{ ip }} +{% endfor %} +{%- else -%} +ListenAddress :: +ListenAddress 0.0.0.0 +{% endif -%} +Protocol 2 +{% if ciphers -%} +Ciphers {{ ciphers }} +{% endif -%} +{% if macs -%} +MACs {{ macs }} +{% endif -%} +{% if kexs -%} +KexAlgorithms {{ kexs }} +{% endif -%} +# HostKeys for protocol version 2 +{% for keyfile in host_key_files -%} +HostKey {{ keyfile }} +{% endfor -%} + +# Privilege Separation is turned on for security +{% if use_priv_sep -%} +UsePrivilegeSeparation {{ use_priv_sep }} +{% endif -%} + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel VERBOSE + +# Authentication: +LoginGraceTime 30s +{% if allow_root_with_key -%} +PermitRootLogin without-password +{% else -%} +PermitRootLogin no +{% endif %} +PermitTunnel no +PermitUserEnvironment no +StrictModes yes + +RSAAuthentication yes +PubkeyAuthentication yes +AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication {{ password_authentication }} + +# Kerberos options +KerberosAuthentication no +KerberosGetAFSToken no +KerberosOrLocalPasswd no +KerberosTicketCleanup yes + +# GSSAPI options +GSSAPIAuthentication no +GSSAPICleanupCredentials yes + +X11Forwarding {{ allow_x11_forwarding }} +X11DisplayOffset 10 +X11UseLocalhost yes +GatewayPorts no +PrintMotd {{ print_motd }} +PrintLastLog {{ print_last_log }} +TCPKeepAlive no +UseLogin no + +ClientAliveInterval {{ client_alive_interval }} +ClientAliveCountMax {{ client_alive_count }} +AllowTcpForwarding {{ allow_tcp_forwarding }} +AllowAgentForwarding {{ allow_agent_forwarding }} + +MaxStartups 10:30:100 +#Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +UsePAM {{ use_pam }} + +{% if deny_users -%} +DenyUsers {{ deny_users }} +{% endif -%} +{% if allow_users -%} +AllowUsers {{ allow_users }} +{% endif -%} +{% if deny_groups -%} +DenyGroups {{ deny_groups }} +{% endif -%} +{% if allow_groups -%} +AllowGroups allow_groups +{% endif -%} +UseDNS {{ use_dns }} +MaxAuthTries {{ max_auth_tries }} +MaxSessions {{ max_sessions }} + +{% if sftp_enable -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +Subsystem sftp internal-sftp -l VERBOSE + +## These lines must appear at the *end* of sshd_config +Match Group {{ sftp_group }} +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory {{ sftp_chroot }} +{% else -%} +# Configuration, in case SFTP is used +## override default of no subsystems +## Subsystem sftp /opt/app/openssh5/libexec/sftp-server +## These lines must appear at the *end* of sshd_config +Match Group sftponly +ForceCommand internal-sftp -l VERBOSE +ChrootDirectory /sftpchroot/home/%u +{% endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/templating.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/templating.py new file mode 100644 index 00000000..5b6765f7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/templating.py @@ -0,0 +1,73 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, +) + +try: + from jinja2 import FileSystemLoader, Environment +except ImportError: + from charmhelpers.fetch import apt_install + from charmhelpers.fetch import apt_update + apt_update(fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment + + +# NOTE: function separated from main rendering code to facilitate easier +# mocking in unit tests. +def write(path, data): + with open(path, 'wb') as out: + out.write(data) + + +def get_template_path(template_dir, path): + """Returns the template file which would be used to render the path. + + The path to the template file is returned. + :param template_dir: the directory the templates are located in + :param path: the file path to be written to. + :returns: path to the template file + """ + return os.path.join(template_dir, os.path.basename(path)) + + +def render_and_write(template_dir, path, context): + """Renders the specified template into the file. + + :param template_dir: the directory to load the template from + :param path: the path to write the templated contents to + :param context: the parameters to pass to the rendering engine + """ + env = Environment(loader=FileSystemLoader(template_dir)) + template_file = os.path.basename(path) + template = env.get_template(template_file) + log('Rendering from template: %s' % template.name, level=DEBUG) + rendered_content = template.render(context) + if not rendered_content: + log("Render returned None - skipping '%s'" % path, + level=WARNING) + return + + write(path, rendered_content.encode('utf-8').strip()) + log('Wrote template %s' % path, level=DEBUG) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/utils.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/utils.py new file mode 100644 index 00000000..ff7485c2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/hardening/utils.py @@ -0,0 +1,155 @@ +# Copyright 2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import grp +import os +import pwd +import six +import yaml + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + INFO, + WARNING, + ERROR, +) + + +# Global settings cache. Since each hook fire entails a fresh module import it +# is safe to hold this in memory and not risk missing config changes (since +# they will result in a new hook fire and thus re-import). +__SETTINGS__ = {} + + +def _get_defaults(modules): + """Load the default config for the provided modules. + + :param modules: stack modules config defaults to lookup. + :returns: modules default config dictionary. + """ + default = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml' % (modules)) + return yaml.safe_load(open(default)) + + +def _get_schema(modules): + """Load the config schema for the provided modules. + + NOTE: this schema is intended to have 1-1 relationship with they keys in + the default config and is used a means to verify valid overrides provided + by the user. + + :param modules: stack modules config schema to lookup. + :returns: modules default schema dictionary. + """ + schema = os.path.join(os.path.dirname(__file__), + 'defaults/%s.yaml.schema' % (modules)) + return yaml.safe_load(open(schema)) + + +def _get_user_provided_overrides(modules): + """Load user-provided config overrides. + + :param modules: stack modules to lookup in user overrides yaml file. + :returns: overrides dictionary. + """ + overrides = os.path.join(os.environ['JUJU_CHARM_DIR'], + 'hardening.yaml') + if os.path.exists(overrides): + log("Found user-provided config overrides file '%s'" % + (overrides), level=DEBUG) + settings = yaml.safe_load(open(overrides)) + if settings and settings.get(modules): + log("Applying '%s' overrides" % (modules), level=DEBUG) + return settings.get(modules) + + log("No overrides found for '%s'" % (modules), level=DEBUG) + else: + log("No hardening config overrides file '%s' found in charm " + "root dir" % (overrides), level=DEBUG) + + return {} + + +def _apply_overrides(settings, overrides, schema): + """Get overrides config overlayed onto modules defaults. + + :param modules: require stack modules config. + :returns: dictionary of modules config with user overrides applied. + """ + if overrides: + for k, v in six.iteritems(overrides): + if k in schema: + if schema[k] is None: + settings[k] = v + elif type(schema[k]) is dict: + settings[k] = _apply_overrides(settings[k], overrides[k], + schema[k]) + else: + raise Exception("Unexpected type found in schema '%s'" % + type(schema[k]), level=ERROR) + else: + log("Unknown override key '%s' - ignoring" % (k), level=INFO) + + return settings + + +def get_settings(modules): + global __SETTINGS__ + if modules in __SETTINGS__: + return __SETTINGS__[modules] + + schema = _get_schema(modules) + settings = _get_defaults(modules) + overrides = _get_user_provided_overrides(modules) + __SETTINGS__[modules] = _apply_overrides(settings, overrides, schema) + return __SETTINGS__[modules] + + +def ensure_permissions(path, user, group, permissions, maxdepth=-1): + """Ensure permissions for path. + + If path is a file, apply to file and return. If path is a directory, + apply recursively (if required) to directory contents and return. + + :param user: user name + :param group: group name + :param permissions: octal permissions + :param maxdepth: maximum recursion depth. A negative maxdepth allows + infinite recursion and maxdepth=0 means no recursion. + :returns: None + """ + if not os.path.exists(path): + log("File '%s' does not exist - cannot set permissions" % (path), + level=WARNING) + return + + _user = pwd.getpwnam(user) + os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid) + os.chmod(path, permissions) + + if maxdepth == 0: + log("Max recursion depth reached - skipping further recursion", + level=DEBUG) + return + elif maxdepth > 0: + maxdepth -= 1 + + if os.path.isdir(path): + contents = glob.glob("%s/*" % (path)) + for c in contents: + ensure_permissions(c, user=user, group=group, + permissions=permissions, maxdepth=maxdepth) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/mellanox/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/mellanox/__init__.py new file mode 100644 index 00000000..9b088de8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/mellanox/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/mellanox/infiniband.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/mellanox/infiniband.py new file mode 100644 index 00000000..0edb2314 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/mellanox/infiniband.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" + +import six + +from charmhelpers.fetch import ( + apt_install, + apt_update, +) + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + +try: + from netifaces import interfaces as network_interfaces +except ImportError: + if six.PY2: + apt_install('python-netifaces') + else: + apt_install('python3-netifaces') + from netifaces import interfaces as network_interfaces + +import os +import re +import subprocess + +from charmhelpers.core.kernel import modprobe + +REQUIRED_MODULES = ( + "mlx4_ib", + "mlx4_en", + "mlx4_core", + "ib_ipath", + "ib_mthca", + "ib_srpt", + "ib_srp", + "ib_ucm", + "ib_isert", + "ib_iser", + "ib_ipoib", + "ib_cm", + "ib_uverbs" + "ib_umad", + "ib_sa", + "ib_mad", + "ib_core", + "ib_addr", + "rdma_ucm", +) + +REQUIRED_PACKAGES = ( + "ibutils", + "infiniband-diags", + "ibverbs-utils", +) + +IPOIB_DRIVERS = ( + "ib_ipoib", +) + +ABI_VERSION_FILE = "/sys/class/infiniband_mad/abi_version" + + +class DeviceInfo(object): + pass + + +def install_packages(): + apt_update() + apt_install(REQUIRED_PACKAGES, fatal=True) + + +def load_modules(): + for module in REQUIRED_MODULES: + modprobe(module, persist=True) + + +def is_enabled(): + """Check if infiniband is loaded on the system""" + return os.path.exists(ABI_VERSION_FILE) + + +def stat(): + """Return full output of ibstat""" + return subprocess.check_output(["ibstat"]) + + +def devices(): + """Returns a list of IB enabled devices""" + return subprocess.check_output(['ibstat', '-l']).splitlines() + + +def device_info(device): + """Returns a DeviceInfo object with the current device settings""" + + status = subprocess.check_output([ + 'ibstat', device, '-s']).splitlines() + + regexes = { + "CA type: (.*)": "device_type", + "Number of ports: (.*)": "num_ports", + "Firmware version: (.*)": "fw_ver", + "Hardware version: (.*)": "hw_ver", + "Node GUID: (.*)": "node_guid", + "System image GUID: (.*)": "sys_guid", + } + + device = DeviceInfo() + + for line in status: + for expression, key in regexes.items(): + matches = re.search(expression, line) + if matches: + setattr(device, key, matches.group(1)) + + return device + + +def ipoib_interfaces(): + """Return a list of IPOIB capable ethernet interfaces""" + interfaces = [] + + for interface in network_interfaces(): + try: + driver = re.search('^driver: (.+)$', subprocess.check_output([ + 'ethtool', '-i', + interface]), re.M).group(1) + + if driver in IPOIB_DRIVERS: + interfaces.append(interface) + except Exception: + log("Skipping interface %s" % interface, level=INFO) + continue + + return interfaces diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ip.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ip.py new file mode 100644 index 00000000..b13277bb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ip.py @@ -0,0 +1,602 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import re +import subprocess +import six +import socket + +from functools import partial + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + config, + log, + network_get_primary_address, + unit_get, + WARNING, + NoNetworkBinding, +) + +from charmhelpers.core.host import ( + lsb_release, + CompareHostReleases, +) + +try: + import netifaces +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) + import netifaces + +try: + import netaddr +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-netaddr', fatal=True) + else: + apt_install('python3-netaddr', fatal=True) + import netaddr + + +def _validate_cidr(network): + try: + netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + +def no_ip_found_error_out(network): + errmsg = ("No IP address found in network(s): %s" % network) + raise ValueError(errmsg) + + +def _get_ipv6_network_from_address(address): + """Get an netaddr.IPNetwork for the given IPv6 address + :param address: a dict as returned by netifaces.ifaddresses + :returns netaddr.IPNetwork: None if the address is a link local or loopback + address + """ + if address['addr'].startswith('fe80') or address['addr'] == "::1": + return None + + prefix = address['netmask'].split("/") + if len(prefix) > 1: + netmask = prefix[1] + else: + netmask = address['netmask'] + return netaddr.IPNetwork("%s/%s" % (address['addr'], + netmask)) + + +def get_address_in_network(network, fallback=None, fatal=False): + """Get an IPv4 or IPv6 address within the network from the host. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. + :param fallback (str): If no address is found, return fallback. + :param fatal (boolean): If no address is found, fallback is not + set and fatal is True then exit(1). + """ + if network is None: + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + else: + return None + + networks = network.split() or [network] + for network in networks: + _validate_cidr(network) + network = netaddr.IPNetwork(network) + for iface in netifaces.interfaces(): + try: + addresses = netifaces.ifaddresses(iface) + except ValueError: + # If an instance was deleted between + # netifaces.interfaces() run and now, its interfaces are gone + continue + if network.version == 4 and netifaces.AF_INET in addresses: + for addr in addresses[netifaces.AF_INET]: + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], + addr['netmask'])) + if cidr in network: + return str(cidr.ip) + + if network.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + cidr = _get_ipv6_network_from_address(addr) + if cidr and cidr in network: + return str(cidr.ip) + + if fallback is not None: + return fallback + + if fatal: + no_ip_found_error_out(network) + + return None + + +def is_ipv6(address): + """Determine whether provided address is IPv6 or not.""" + try: + address = netaddr.IPAddress(address) + except netaddr.AddrFormatError: + # probably a hostname - so not an address at all! + return False + + return address.version == 6 + + +def is_address_in_network(network, address): + """ + Determine whether the provided address is within a network range. + + :param network (str): CIDR presentation format. For example, + '192.168.1.0/24'. + :param address: An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :returns boolean: Flag indicating whether address is in network. + """ + try: + network = netaddr.IPNetwork(network) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Network (%s) is not in CIDR presentation format" % + network) + + try: + address = netaddr.IPAddress(address) + except (netaddr.core.AddrFormatError, ValueError): + raise ValueError("Address (%s) is not in correct presentation format" % + address) + + if address in network: + return True + else: + return False + + +def _get_for_address(address, key): + """Retrieve an attribute of or the physical interface that + the IP address provided could be bound to. + + :param address (str): An individual IPv4 or IPv6 address without a net + mask or subnet prefix. For example, '192.168.1.1'. + :param key: 'iface' for the physical interface name or an attribute + of the configured interface, for example 'netmask'. + :returns str: Requested attribute or None if address is not bindable. + """ + address = netaddr.IPAddress(address) + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + if address.version == 4 and netifaces.AF_INET in addresses: + addr = addresses[netifaces.AF_INET][0]['addr'] + netmask = addresses[netifaces.AF_INET][0]['netmask'] + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + else: + return addresses[netifaces.AF_INET][0][key] + + if address.version == 6 and netifaces.AF_INET6 in addresses: + for addr in addresses[netifaces.AF_INET6]: + network = _get_ipv6_network_from_address(addr) + if not network: + continue + + cidr = network.cidr + if address in cidr: + if key == 'iface': + return iface + elif key == 'netmask' and cidr: + return str(cidr).split('/')[1] + else: + return addr[key] + return None + + +get_iface_for_address = partial(_get_for_address, key='iface') + + +get_netmask_for_address = partial(_get_for_address, key='netmask') + + +def resolve_network_cidr(ip_address): + ''' + Resolves the full address cidr of an ip_address based on + configured network interfaces + ''' + netmask = get_netmask_for_address(ip_address) + return str(netaddr.IPNetwork("%s/%s" % (ip_address, netmask)).cidr) + + +def format_ipv6_addr(address): + """If address is IPv6, wrap it in '[]' otherwise return None. + + This is required by most configuration files when specifying IPv6 + addresses. + """ + if is_ipv6(address): + return "[%s]" % address + + return None + + +def is_ipv6_disabled(): + try: + result = subprocess.check_output( + ['sysctl', 'net.ipv6.conf.all.disable_ipv6'], + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError: + return True + + return "net.ipv6.conf.all.disable_ipv6 = 1" in result + + +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, + fatal=True, exc_list=None): + """Return the assigned IP address for a given interface, if any. + + :param iface: network interface on which address(es) are expected to + be found. + :param inet_type: inet address family + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :return: list of ip addresses + """ + # Extract nic if passed /dev/ethX + if '/' in iface: + iface = iface.split('/')[-1] + + if not exc_list: + exc_list = [] + + try: + inet_num = getattr(netifaces, inet_type) + except AttributeError: + raise Exception("Unknown inet type '%s'" % str(inet_type)) + + interfaces = netifaces.interfaces() + if inc_aliases: + ifaces = [] + for _iface in interfaces: + if iface == _iface or _iface.split(':')[0] == iface: + ifaces.append(_iface) + + if fatal and not ifaces: + raise Exception("Invalid interface '%s'" % iface) + + ifaces.sort() + else: + if iface not in interfaces: + if fatal: + raise Exception("Interface '%s' not found " % (iface)) + else: + return [] + + else: + ifaces = [iface] + + addresses = [] + for netiface in ifaces: + net_info = netifaces.ifaddresses(netiface) + if inet_num in net_info: + for entry in net_info[inet_num]: + if 'addr' in entry and entry['addr'] not in exc_list: + addresses.append(entry['addr']) + + if fatal and not addresses: + raise Exception("Interface '%s' doesn't have any %s addresses." % + (iface, inet_type)) + + return sorted(addresses) + + +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') + + +def get_iface_from_addr(addr): + """Work out on which interface the provided address is configured.""" + for iface in netifaces.interfaces(): + addresses = netifaces.ifaddresses(iface) + for inet_type in addresses: + for _addr in addresses[inet_type]: + _addr = _addr['addr'] + # link local + ll_key = re.compile("(.+)%.*") + raw = re.match(ll_key, _addr) + if raw: + _addr = raw.group(1) + + if _addr == addr: + log("Address '%s' is configured on iface '%s'" % + (addr, iface)) + return iface + + msg = "Unable to infer net iface on which '%s' is configured" % (addr) + raise Exception(msg) + + +def sniff_iface(f): + """Ensure decorated function is called with a value for iface. + + If no iface provided, inject net iface inferred from unit private address. + """ + def iface_sniffer(*args, **kwargs): + if not kwargs.get('iface', None): + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) + + return f(*args, **kwargs) + + return iface_sniffer + + +@sniff_iface +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, + dynamic_only=True): + """Get assigned IPv6 address for a given interface. + + Returns list of addresses found. If no address found, returns empty list. + + If iface is None, we infer the current primary interface by doing a reverse + lookup on the unit private-address. + + We currently only support scope global IPv6 addresses i.e. non-temporary + addresses. If no global IPv6 address is found, return the first one found + in the ipv6 address list. + + :param iface: network interface on which ipv6 address(es) are expected to + be found. + :param inc_aliases: include alias interfaces in search + :param fatal: if True, raise exception if address not found + :param exc_list: list of addresses to ignore + :param dynamic_only: only recognise dynamic addresses + :return: list of ipv6 addresses + """ + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', + inc_aliases=inc_aliases, fatal=fatal, + exc_list=exc_list) + + if addresses: + global_addrs = [] + for addr in addresses: + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") + m = re.match(key_scope_link_local, addr) + if m: + eui_64_mac = m.group(1) + iface = m.group(2) + else: + global_addrs.append(addr) + + if global_addrs: + # Make sure any found global addresses are not temporary + cmd = ['ip', 'addr', 'show', iface] + out = subprocess.check_output(cmd).decode('UTF-8') + if dynamic_only: + key = re.compile("inet6 (.+)/[0-9]+ scope global.* dynamic.*") + else: + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") + + addrs = [] + for line in out.split('\n'): + line = line.strip() + m = re.match(key, line) + if m and 'temporary' not in line: + # Return the first valid address we find + for addr in global_addrs: + if m.group(1) == addr: + if not dynamic_only or \ + m.group(1).endswith(eui_64_mac): + addrs.append(addr) + + if addrs: + return addrs + + if fatal: + raise Exception("Interface '%s' does not have a scope global " + "non-temporary ipv6 address." % iface) + + return [] + + +def get_bridges(vnic_dir='/sys/devices/virtual/net'): + """Return a list of bridges on the system.""" + b_regex = "%s/*/bridge" % vnic_dir + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] + + +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): + """Return a list of nics comprising a given bridge on the system.""" + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) + return [x.split('/')[-1] for x in glob.glob(brif_regex)] + + +def is_bridge_member(nic): + """Check if a given nic is a member of a bridge.""" + for bridge in get_bridges(): + if nic in get_bridge_nics(bridge): + return True + + return False + + +def is_ip(address): + """ + Returns True if address is a valid IP address. + """ + try: + # Test to see if already an IPv4/IPv6 address + address = netaddr.IPAddress(address) + return True + except (netaddr.AddrFormatError, ValueError): + return False + + +def ns_query(address): + try: + import dns.resolver + except ImportError: + if six.PY2: + apt_install('python-dnspython', fatal=True) + else: + apt_install('python3-dnspython', fatal=True) + import dns.resolver + + if isinstance(address, dns.name.Name): + rtype = 'PTR' + elif isinstance(address, six.string_types): + rtype = 'A' + else: + return None + + try: + answers = dns.resolver.query(address, rtype) + except dns.resolver.NXDOMAIN: + return None + + if answers: + return str(answers[0]) + return None + + +def get_host_ip(hostname, fallback=None): + """ + Resolves the IP for a given hostname, or returns + the input if it is already an IP. + """ + if is_ip(hostname): + return hostname + + ip_addr = ns_query(hostname) + if not ip_addr: + try: + ip_addr = socket.gethostbyname(hostname) + except Exception: + log("Failed to resolve hostname '%s'" % (hostname), + level=WARNING) + return fallback + return ip_addr + + +def get_hostname(address, fqdn=True): + """ + Resolves hostname for given IP, or returns the input + if it is already a hostname. + """ + if is_ip(address): + try: + import dns.reversename + except ImportError: + if six.PY2: + apt_install("python-dnspython", fatal=True) + else: + apt_install("python3-dnspython", fatal=True) + import dns.reversename + + rev = dns.reversename.from_address(address) + result = ns_query(rev) + + if not result: + try: + result = socket.gethostbyaddr(address)[0] + except Exception: + return None + else: + result = address + + if fqdn: + # strip trailing . + if result.endswith('.'): + return result[:-1] + else: + return result + else: + return result.split('.')[0] + + +def port_has_listener(address, port): + """ + Returns True if the address:port is open and being listened to, + else False. + + @param address: an IP address or hostname + @param port: integer port + + Note calls 'zc' via a subprocess shell + """ + cmd = ['nc', '-z', address, str(port)] + result = subprocess.call(cmd) + return not(bool(result)) + + +def assert_charm_supports_ipv6(): + """Check whether we are able to support charms ipv6.""" + release = lsb_release()['DISTRIB_CODENAME'].lower() + if CompareHostReleases(release) < "trusty": + raise Exception("IPv6 is not supported in the charms for Ubuntu " + "versions less than Trusty 14.04") + + +def get_relation_ip(interface, cidr_network=None): + """Return this unit's IP for the given interface. + + Allow for an arbitrary interface to use with network-get to select an IP. + Handle all address selection options including passed cidr network and + IPv6. + + Usage: get_relation_ip('amqp', cidr_network='10.0.0.0/8') + + @param interface: string name of the relation. + @param cidr_network: string CIDR Network to select an address from. + @raises Exception if prefer-ipv6 is configured but IPv6 unsupported. + @returns IPv6 or IPv4 address + """ + # Select the interface address first + # For possible use as a fallback bellow with get_address_in_network + try: + # Get the interface specific IP + address = network_get_primary_address(interface) + except NotImplementedError: + # If network-get is not available + address = get_host_ip(unit_get('private-address')) + except NoNetworkBinding: + log("No network binding for {}".format(interface), WARNING) + address = get_host_ip(unit_get('private-address')) + + if config('prefer-ipv6'): + # Currently IPv6 has priority, eventually we want IPv6 to just be + # another network space. + assert_charm_supports_ipv6() + return get_ipv6_addr()[0] + elif cidr_network: + # If a specific CIDR network is passed get the address from that + # network. + return get_address_in_network(cidr_network, address) + + # Return the interface address + return address diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/__init__.py new file mode 100644 index 00000000..fd001bc2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/__init__.py @@ -0,0 +1,541 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' Helpers for interacting with OpenvSwitch ''' +import hashlib +import subprocess +import os +import six + +from charmhelpers.fetch import apt_install + + +from charmhelpers.core.hookenv import ( + log, WARNING, INFO, DEBUG +) +from charmhelpers.core.host import ( + service +) + + +BRIDGE_TEMPLATE = """\ +# This veth pair is required when neutron data-port is mapped to an existing linux bridge. lp:1635067 + +auto {linuxbridge_port} +iface {linuxbridge_port} inet manual + pre-up ip link add name {linuxbridge_port} type veth peer name {ovsbridge_port} + pre-up ip link set {ovsbridge_port} master {bridge} + pre-up ip link set {ovsbridge_port} up + up ip link set {linuxbridge_port} up + down ip link del {linuxbridge_port} +""" + +MAX_KERNEL_INTERFACE_NAME_LEN = 15 + + +def get_bridges(): + """Return list of the bridges on the default openvswitch + + :returns: List of bridge names + :rtype: List[str] + :raises: subprocess.CalledProcessError if ovs-vsctl fails + """ + cmd = ["ovs-vsctl", "list-br"] + lines = subprocess.check_output(cmd).decode('utf-8').split("\n") + maybe_bridges = [l.strip() for l in lines] + return [b for b in maybe_bridges if b] + + +def get_bridge_ports(name): + """Return a list the ports on a named bridge + + :param name: the name of the bridge to list + :type name: str + :returns: List of ports on the named bridge + :rtype: List[str] + :raises: subprocess.CalledProcessError if the ovs-vsctl command fails. If + the named bridge doesn't exist, then the exception will be raised. + """ + cmd = ["ovs-vsctl", "--", "list-ports", name] + lines = subprocess.check_output(cmd).decode('utf-8').split("\n") + maybe_ports = [l.strip() for l in lines] + return [p for p in maybe_ports if p] + + +def get_bridges_and_ports_map(): + """Return dictionary of bridge to ports for the default openvswitch + + :returns: a mapping of bridge name to a list of ports. + :rtype: Dict[str, List[str]] + :raises: subprocess.CalledProcessError if any of the underlying ovs-vsctl + command fail. + """ + return {b: get_bridge_ports(b) for b in get_bridges()} + + +def _dict_to_vsctl_set(data, table, entity): + """Helper that takes dictionary and provides ``ovs-vsctl set`` commands + + :param data: Additional data to attach to interface + The keys in the data dictionary map directly to column names in the + OpenvSwitch table specified as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type data: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param table: Name of table to operate on + :type table: str + :param entity: Name of entity to operate on + :type entity: str + :returns: '--' separated ``ovs-vsctl set`` commands + :rtype: Iterator[Tuple[str, str, str, str, str]] + """ + for (k, v) in data.items(): + if isinstance(v, dict): + entries = { + '{}:{}'.format(k, dk): dv for (dk, dv) in v.items()} + else: + entries = {k: v} + for (colk, colv) in entries.items(): + yield ('--', 'set', table, entity, '{}={}'.format(colk, colv)) + + +def add_bridge(name, datapath_type=None, brdata=None, exclusive=False): + """Add the named bridge to openvswitch and set/update bridge data for it + + :param name: Name of bridge to create + :type name: str + :param datapath_type: Add datapath_type to bridge (DEPRECATED, use brdata) + :type datapath_type: Optional[str] + :param brdata: Additional data to attach to bridge + The keys in the brdata dictionary map directly to column names in the + OpenvSwitch bridge table as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type brdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param exclusive: If True, raise exception if bridge exists + :type exclusive: bool + :raises: subprocess.CalledProcessError + """ + log('Creating bridge {}'.format(name)) + cmd = ['ovs-vsctl', '--'] + if not exclusive: + cmd.append('--may-exist') + cmd.extend(('add-br', name)) + if brdata: + for setcmd in _dict_to_vsctl_set(brdata, 'bridge', name): + cmd.extend(setcmd) + if datapath_type is not None: + log('DEPRECATION WARNING: add_bridge called with datapath_type, ' + 'please use the brdata keyword argument instead.') + cmd += ['--', 'set', 'bridge', name, + 'datapath_type={}'.format(datapath_type)] + subprocess.check_call(cmd) + + +def del_bridge(name): + """Delete the named bridge from openvswitch + + :param name: Name of bridge to remove + :type name: str + :raises: subprocess.CalledProcessError + """ + log('Deleting bridge {}'.format(name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name]) + + +def add_bridge_port(name, port, promisc=False, ifdata=None, exclusive=False, + linkup=True, portdata=None): + """Add port to bridge and optionally set/update interface data for it + + :param name: Name of bridge to attach port to + :type name: str + :param port: Name of port as represented in netdev + :type port: str + :param promisc: Whether to set promiscuous mode on interface + True=on, False=off, None leave untouched + :type promisc: Optional[bool] + :param ifdata: Additional data to attach to interface + The keys in the ifdata dictionary map directly to column names in the + OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param exclusive: If True, raise exception if port exists + :type exclusive: bool + :param linkup: Bring link up + :type linkup: bool + :param portdata: Additional data to attach to port. Similar to ifdata. + :type portdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :raises: subprocess.CalledProcessError + """ + cmd = ['ovs-vsctl', '--'] + if not exclusive: + cmd.append('--may-exist') + cmd.extend(('add-port', name, port)) + for ovs_table, data in (('Interface', ifdata), ('Port', portdata)): + if data: + for setcmd in _dict_to_vsctl_set(data, ovs_table, port): + cmd.extend(setcmd) + + log('Adding port {} to bridge {}'.format(port, name)) + subprocess.check_call(cmd) + if linkup: + # This is mostly a workaround for CI environments, in the real world + # the bare metal provider would most likely have configured and brought + # up the link for us. + subprocess.check_call(["ip", "link", "set", port, "up"]) + if promisc: + subprocess.check_call(["ip", "link", "set", port, "promisc", "on"]) + elif promisc is False: + subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) + + +def del_bridge_port(name, port): + """Delete a port from the named openvswitch bridge + + :param name: Name of bridge to remove port from + :type name: str + :param port: Name of port to remove + :type port: str + :raises: subprocess.CalledProcessError + """ + log('Deleting port {} from bridge {}'.format(port, name)) + subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port", + name, port]) + subprocess.check_call(["ip", "link", "set", port, "down"]) + subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) + + +def add_bridge_bond(bridge, port, interfaces, portdata=None, ifdatamap=None, + exclusive=False): + """Add bonded port in bridge from interfaces. + + :param bridge: Name of bridge to add bonded port to + :type bridge: str + :param port: Name of created port + :type port: str + :param interfaces: Underlying interfaces that make up the bonded port + :type interfaces: Iterator[str] + :param portdata: Additional data to attach to the created bond port + See _dict_to_vsctl_set() for detailed description. + Example: + { + 'bond-mode': 'balance-tcp', + 'lacp': 'active', + 'other-config': { + 'lacp-time': 'fast', + }, + } + :type portdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + :param ifdatamap: Map of data to attach to created bond interfaces + See _dict_to_vsctl_set() for detailed description. + Example: + { + 'eth0': { + 'type': 'dpdk', + 'mtu-request': '9000', + 'options': { + 'dpdk-devargs': '0000:01:00.0', + }, + }, + } + :type ifdatamap: Optional[Dict[str,Dict[str,Union[str,Dict[str,str]]]]] + :param exclusive: If True, raise exception if port exists + :type exclusive: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['ovs-vsctl', '--'] + if not exclusive: + cmd.append('--may-exist') + cmd.extend(('add-bond', bridge, port)) + cmd.extend(interfaces) + if portdata: + for setcmd in _dict_to_vsctl_set(portdata, 'port', port): + cmd.extend(setcmd) + if ifdatamap: + for ifname, ifdata in ifdatamap.items(): + for setcmd in _dict_to_vsctl_set(ifdata, 'Interface', ifname): + cmd.extend(setcmd) + subprocess.check_call(cmd) + + +def add_ovsbridge_linuxbridge(name, bridge, ifdata=None): + """Add linux bridge to the named openvswitch bridge + + :param name: Name of ovs bridge to be added to Linux bridge + :type name: str + :param bridge: Name of Linux bridge to be added to ovs bridge + :type name: str + :param ifdata: Additional data to attach to interface + The keys in the ifdata dictionary map directly to column names in the + OpenvSwitch Interface table as defined in DB-SCHEMA [0] referenced in + RFC 7047 [1] + + There are some established conventions for keys in the external-ids + column of various tables, consult the OVS Integration Guide [2] for + more details. + + NOTE(fnordahl): Technically the ``external-ids`` column is called + ``external_ids`` (with an underscore) and we rely on ``ovs-vsctl``'s + behaviour of transforming dashes to underscores for us [3] so we can + have a more pleasant data structure. + + 0: http://www.openvswitch.org/ovs-vswitchd.conf.db.5.pdf + 1: https://tools.ietf.org/html/rfc7047 + 2: http://docs.openvswitch.org/en/latest/topics/integration/ + 3: https://github.com/openvswitch/ovs/blob/ + 20dac08fdcce4b7fda1d07add3b346aa9751cfbc/ + lib/db-ctl-base.c#L189-L215 + :type ifdata: Optional[Dict[str,Union[str,Dict[str,str]]]] + """ + try: + import netifaces + except ImportError: + if six.PY2: + apt_install('python-netifaces', fatal=True) + else: + apt_install('python3-netifaces', fatal=True) + import netifaces + + # NOTE(jamespage): + # Older code supported addition of a linuxbridge directly + # to an OVS bridge; ensure we don't break uses on upgrade + existing_ovs_bridge = port_to_br(bridge) + if existing_ovs_bridge is not None: + log('Linuxbridge {} is already directly in use' + ' by OVS bridge {}'.format(bridge, existing_ovs_bridge), + level=INFO) + return + + # NOTE(jamespage): + # preserve existing naming because interfaces may already exist. + ovsbridge_port = "veth-" + name + linuxbridge_port = "veth-" + bridge + if (len(ovsbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN or + len(linuxbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN): + # NOTE(jamespage): + # use parts of hashed bridgename (openstack style) when + # a bridge name exceeds 15 chars + hashed_bridge = hashlib.sha256(bridge.encode('UTF-8')).hexdigest() + base = '{}-{}'.format(hashed_bridge[:8], hashed_bridge[-2:]) + ovsbridge_port = "cvo{}".format(base) + linuxbridge_port = "cvb{}".format(base) + + interfaces = netifaces.interfaces() + for interface in interfaces: + if interface == ovsbridge_port or interface == linuxbridge_port: + log('Interface {} already exists'.format(interface), level=INFO) + return + + log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name), + level=INFO) + + check_for_eni_source() + + with open('/etc/network/interfaces.d/{}.cfg'.format( + linuxbridge_port), 'w') as config: + config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port, + ovsbridge_port=ovsbridge_port, + bridge=bridge)) + + subprocess.check_call(["ifup", linuxbridge_port]) + add_bridge_port(name, linuxbridge_port, ifdata=ifdata) + + +def is_linuxbridge_interface(port): + ''' Check if the interface is a linuxbridge bridge + :param port: Name of an interface to check whether it is a Linux bridge + :returns: True if port is a Linux bridge''' + + if os.path.exists('/sys/class/net/' + port + '/bridge'): + log('Interface {} is a Linux bridge'.format(port), level=DEBUG) + return True + else: + log('Interface {} is not a Linux bridge'.format(port), level=DEBUG) + return False + + +def set_manager(manager): + ''' Set the controller for the local openvswitch ''' + log('Setting manager for local ovs to {}'.format(manager)) + subprocess.check_call(['ovs-vsctl', 'set-manager', + 'ssl:{}'.format(manager)]) + + +def set_Open_vSwitch_column_value(column_value): + """ + Calls ovs-vsctl and sets the 'column_value' in the Open_vSwitch table. + + :param column_value: + See http://www.openvswitch.org//ovs-vswitchd.conf.db.5.pdf for + details of the relevant values. + :type str + :raises CalledProcessException: possibly ovsdb-server is not running + """ + log('Setting {} in the Open_vSwitch table'.format(column_value)) + subprocess.check_call(['ovs-vsctl', 'set', 'Open_vSwitch', '.', column_value]) + + +CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem' + + +def get_certificate(): + ''' Read openvswitch certificate from disk ''' + if os.path.exists(CERT_PATH): + log('Reading ovs certificate from {}'.format(CERT_PATH)) + with open(CERT_PATH, 'r') as cert: + full_cert = cert.read() + begin_marker = "-----BEGIN CERTIFICATE-----" + end_marker = "-----END CERTIFICATE-----" + begin_index = full_cert.find(begin_marker) + end_index = full_cert.rfind(end_marker) + if end_index == -1 or begin_index == -1: + raise RuntimeError("Certificate does not contain valid begin" + " and end markers.") + full_cert = full_cert[begin_index:(end_index + len(end_marker))] + return full_cert + else: + log('Certificate not found', level=WARNING) + return None + + +def check_for_eni_source(): + ''' Juju removes the source line when setting up interfaces, + replace if missing ''' + + with open('/etc/network/interfaces', 'r') as eni: + for line in eni: + if line == 'source /etc/network/interfaces.d/*': + return + with open('/etc/network/interfaces', 'a') as eni: + eni.write('\nsource /etc/network/interfaces.d/*') + + +def full_restart(): + ''' Full restart and reload of openvswitch ''' + if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'): + service('start', 'openvswitch-force-reload-kmod') + else: + service('force-reload-kmod', 'openvswitch-switch') + + +def enable_ipfix(bridge, target, + cache_active_timeout=60, + cache_max_flows=128, + sampling=64): + '''Enable IPFIX on bridge to target. + :param bridge: Bridge to monitor + :param target: IPFIX remote endpoint + :param cache_active_timeout: The maximum period in seconds for + which an IPFIX flow record is cached + and aggregated before being sent + :param cache_max_flows: The maximum number of IPFIX flow records + that can be cached at a time + :param sampling: The rate at which packets should be sampled and + sent to each target collector + ''' + cmd = [ + 'ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--', + '--id=@i', 'create', 'IPFIX', + 'targets="{}"'.format(target), + 'sampling={}'.format(sampling), + 'cache_active_timeout={}'.format(cache_active_timeout), + 'cache_max_flows={}'.format(cache_max_flows), + ] + log('Enabling IPfix on {}.'.format(bridge)) + subprocess.check_call(cmd) + + +def disable_ipfix(bridge): + '''Diable IPFIX on target bridge. + :param bridge: Bridge to modify + ''' + cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix'] + subprocess.check_call(cmd) + + +def port_to_br(port): + '''Determine the bridge that contains a port + :param port: Name of port to check for + :returns str: OVS bridge containing port or None if not found + ''' + try: + return subprocess.check_output( + ['ovs-vsctl', 'port-to-br', port] + ).decode('UTF-8').strip() + except subprocess.CalledProcessError: + return None + + +def ovs_appctl(target, args): + """Run `ovs-appctl` for target with args and return output. + + :param target: Name of daemon to contact. Unless target begins with '/', + `ovs-appctl` looks for a pidfile and will build the path to + a /var/run/openvswitch/target.pid.ctl for you. + :type target: str + :param args: Command and arguments to pass to `ovs-appctl` + :type args: Tuple[str, ...] + :returns: Output from command + :rtype: str + :raises: subprocess.CalledProcessError + """ + cmd = ['ovs-appctl', '-t', target] + cmd.extend(args) + return subprocess.check_output(cmd, universal_newlines=True) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/ovn.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/ovn.py new file mode 100644 index 00000000..2075f11a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/ovn.py @@ -0,0 +1,233 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import subprocess +import uuid + +from . import utils + + +OVN_RUNDIR = '/var/run/ovn' +OVN_SYSCONFDIR = '/etc/ovn' + + +def ovn_appctl(target, args, rundir=None, use_ovs_appctl=False): + """Run ovn/ovs-appctl for target with args and return output. + + :param target: Name of daemon to contact. Unless target begins with '/', + `ovn-appctl` looks for a pidfile and will build the path to + a /var/run/ovn/target.pid.ctl for you. + :type target: str + :param args: Command and arguments to pass to `ovn-appctl` + :type args: Tuple[str, ...] + :param rundir: Override path to sockets + :type rundir: Optional[str] + :param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03, + set this to True to use ``ovs-appctl`` instead. + :type use_ovs_appctl: bool + :returns: Output from command + :rtype: str + :raises: subprocess.CalledProcessError + """ + # NOTE(fnordahl): The ovsdb-server processes for the OVN databases use a + # non-standard naming scheme for their daemon control socket and we need + # to pass the full path to the socket. + if target in ('ovnnb_db', 'ovnsb_db',): + target = os.path.join(rundir or OVN_RUNDIR, target + '.ctl') + + if use_ovs_appctl: + tool = 'ovs-appctl' + else: + tool = 'ovn-appctl' + + return utils._run(tool, '-t', target, *args) + + +class OVNClusterStatus(object): + + def __init__(self, name, cluster_id, server_id, address, status, role, + term, leader, vote, election_timer, log, + entries_not_yet_committed, entries_not_yet_applied, + connections, servers): + """Initialize and populate OVNClusterStatus object. + + Use class initializer so we can define types in a compatible manner. + + :param name: Name of schema used for database + :type name: str + :param cluster_id: UUID of cluster + :type cluster_id: uuid.UUID + :param server_id: UUID of server + :type server_id: uuid.UUID + :param address: OVSDB connection method + :type address: str + :param status: Status text + :type status: str + :param role: Role of server + :type role: str + :param term: Election term + :type term: int + :param leader: Short form UUID of leader + :type leader: str + :param vote: Vote + :type vote: str + :param election_timer: Current value of election timer + :type election_timer: int + :param log: Log + :type log: str + :param entries_not_yet_committed: Entries not yet committed + :type entries_not_yet_committed: int + :param entries_not_yet_applied: Entries not yet applied + :type entries_not_yet_applied: int + :param connections: Connections + :type connections: str + :param servers: Servers in the cluster + [('0ea6', 'ssl:192.0.2.42:6643')] + :type servers: List[Tuple[str,str]] + """ + self.name = name + self.cluster_id = cluster_id + self.server_id = server_id + self.address = address + self.status = status + self.role = role + self.term = term + self.leader = leader + self.vote = vote + self.election_timer = election_timer + self.log = log + self.entries_not_yet_committed = entries_not_yet_committed + self.entries_not_yet_applied = entries_not_yet_applied + self.connections = connections + self.servers = servers + + def __eq__(self, other): + return ( + self.name == other.name and + self.cluster_id == other.cluster_id and + self.server_id == other.server_id and + self.address == other.address and + self.status == other.status and + self.role == other.role and + self.term == other.term and + self.leader == other.leader and + self.vote == other.vote and + self.election_timer == other.election_timer and + self.log == other.log and + self.entries_not_yet_committed == other.entries_not_yet_committed and + self.entries_not_yet_applied == other.entries_not_yet_applied and + self.connections == other.connections and + self.servers == other.servers) + + @property + def is_cluster_leader(self): + """Retrieve status information from clustered OVSDB. + + :returns: Whether target is cluster leader + :rtype: bool + """ + return self.leader == 'self' + + +def cluster_status(target, schema=None, use_ovs_appctl=False, rundir=None): + """Retrieve status information from clustered OVSDB. + + :param target: Usually one of 'ovsdb-server', 'ovnnb_db', 'ovnsb_db', can + also be full path to control socket. + :type target: str + :param schema: Database schema name, deduced from target if not provided + :type schema: Optional[str] + :param use_ovs_appctl: The ``ovn-appctl`` command appeared in OVN 20.03, + set this to True to use ``ovs-appctl`` instead. + :type use_ovs_appctl: bool + :param rundir: Override path to sockets + :type rundir: Optional[str] + :returns: cluster status data object + :rtype: OVNClusterStatus + :raises: subprocess.CalledProcessError, KeyError, RuntimeError + """ + schema_map = { + 'ovnnb_db': 'OVN_Northbound', + 'ovnsb_db': 'OVN_Southbound', + } + if schema and schema not in schema_map.keys(): + raise RuntimeError('Unknown schema provided: "{}"'.format(schema)) + + status = {} + k = '' + for line in ovn_appctl(target, + ('cluster/status', schema or schema_map[target]), + rundir=rundir, + use_ovs_appctl=use_ovs_appctl).splitlines(): + if k and line.startswith(' '): + # there is no key which means this is a instance of a multi-line/ + # multi-value item, populate the List which is already stored under + # the key. + if k == 'servers': + status[k].append( + tuple(line.replace(')', '').lstrip().split()[0:4:3])) + else: + status[k].append(line.lstrip()) + elif ':' in line: + # this is a line with a key + k, v = line.split(':', 1) + k = k.lower() + k = k.replace(' ', '_') + if v: + # this is a line with both key and value + if k in ('cluster_id', 'server_id',): + v = v.replace('(', '') + v = v.replace(')', '') + status[k] = tuple(v.split()) + else: + status[k] = v.lstrip() + else: + # this is a line with only key which means a multi-line/ + # multi-value item. Store key as List which will be + # populated on subsequent iterations. + status[k] = [] + return OVNClusterStatus( + status['name'], + uuid.UUID(status['cluster_id'][1]), + uuid.UUID(status['server_id'][1]), + status['address'], + status['status'], + status['role'], + int(status['term']), + status['leader'], + status['vote'], + int(status['election_timer']), + status['log'], + int(status['entries_not_yet_committed']), + int(status['entries_not_yet_applied']), + status['connections'], + status['servers']) + + +def is_northd_active(): + """Query `ovn-northd` for active status. + + Note that the active status information for ovn-northd is available for + OVN 20.03 and onward. + + :returns: True if local `ovn-northd` instance is active, False otherwise + :rtype: bool + """ + try: + for line in ovn_appctl('ovn-northd', ('status',)).splitlines(): + if line.startswith('Status:') and 'active' in line: + return True + except subprocess.CalledProcessError: + pass + return False diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/ovsdb.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/ovsdb.py new file mode 100644 index 00000000..5e50bc36 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/ovsdb.py @@ -0,0 +1,206 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import uuid + +from . import utils + + +class SimpleOVSDB(object): + """Simple interface to OVSDB through the use of command line tools. + + OVS and OVN is managed through a set of databases. These databases have + similar command line tools to manage them. We make use of the similarity + to provide a generic class that can be used to manage them. + + The OpenvSwitch project does provide a Python API, but on the surface it + appears to be a bit too involved for our simple use case. + + Examples: + sbdb = SimpleOVSDB('ovn-sbctl') + for chs in sbdb.chassis: + print(chs) + + ovsdb = SimpleOVSDB('ovs-vsctl') + for br in ovsdb.bridge: + if br['name'] == 'br-test': + ovsdb.bridge.set(br['uuid'], 'external_ids:charm', 'managed') + """ + + # For validation we keep a complete map of currently known good tool and + # table combinations. This requires maintenance down the line whenever + # upstream adds things that downstream wants, and the cost of maintaining + # that will most likely be lower then the cost of finding the needle in + # the haystack whenever downstream code misspells something. + _tool_table_map = { + 'ovs-vsctl': ( + 'autoattach', + 'bridge', + 'ct_timeout_policy', + 'ct_zone', + 'controller', + 'datapath', + 'flow_sample_collector_set', + 'flow_table', + 'ipfix', + 'interface', + 'manager', + 'mirror', + 'netflow', + 'open_vswitch', + 'port', + 'qos', + 'queue', + 'ssl', + 'sflow', + ), + 'ovn-nbctl': ( + 'acl', + 'address_set', + 'connection', + 'dhcp_options', + 'dns', + 'forwarding_group', + 'gateway_chassis', + 'ha_chassis', + 'ha_chassis_group', + 'load_balancer', + 'load_balancer_health_check', + 'logical_router', + 'logical_router_policy', + 'logical_router_port', + 'logical_router_static_route', + 'logical_switch', + 'logical_switch_port', + 'meter', + 'meter_band', + 'nat', + 'nb_global', + 'port_group', + 'qos', + 'ssl', + ), + 'ovn-sbctl': ( + 'address_set', + 'chassis', + 'connection', + 'controller_event', + 'dhcp_options', + 'dhcpv6_options', + 'dns', + 'datapath_binding', + 'encap', + 'gateway_chassis', + 'ha_chassis', + 'ha_chassis_group', + 'igmp_group', + 'ip_multicast', + 'logical_flow', + 'mac_binding', + 'meter', + 'meter_band', + 'multicast_group', + 'port_binding', + 'port_group', + 'rbac_permission', + 'rbac_role', + 'sb_global', + 'ssl', + 'service_monitor', + ), + } + + def __init__(self, tool): + """SimpleOVSDB constructor. + + :param tool: Which tool with database commands to operate on. + Usually one of `ovs-vsctl`, `ovn-nbctl`, `ovn-sbctl` + :type tool: str + """ + if tool not in self._tool_table_map: + raise RuntimeError( + 'tool must be one of "{}"'.format(self._tool_table_map.keys())) + self._tool = tool + + def __getattr__(self, table): + if table not in self._tool_table_map[self._tool]: + raise AttributeError( + 'table "{}" not known for use with "{}"' + .format(table, self._tool)) + return self.Table(self._tool, table) + + class Table(object): + """Methods to interact with contents of OVSDB tables. + + NOTE: At the time of this writing ``find`` is the only command + line argument to OVSDB manipulating tools that actually supports + JSON output. + """ + + def __init__(self, tool, table): + """SimpleOVSDBTable constructor. + + :param table: Which table to operate on + :type table: str + """ + self._tool = tool + self._table = table + + def _find_tbl(self, condition=None): + """Run and parse output of OVSDB `find` command. + + :param condition: An optional RFC 7047 5.1 match condition + :type condition: Optional[str] + :returns: Dictionary with data + :rtype: Dict[str, any] + """ + # When using json formatted output to OVS commands Internal OVSDB + # notation may occur that require further deserializing. + # Reference: https://tools.ietf.org/html/rfc7047#section-5.1 + ovs_type_cb_map = { + 'uuid': uuid.UUID, + # FIXME sets also appear to sometimes contain type/value tuples + 'set': list, + 'map': dict, + } + cmd = [self._tool, '-f', 'json', 'find', self._table] + if condition: + cmd.append(condition) + output = utils._run(*cmd) + data = json.loads(output) + for row in data['data']: + values = [] + for col in row: + if isinstance(col, list): + f = ovs_type_cb_map.get(col[0], str) + values.append(f(col[1])) + else: + values.append(col) + yield dict(zip(data['headings'], values)) + + def __iter__(self): + return self._find_tbl() + + def clear(self, rec, col): + utils._run(self._tool, 'clear', self._table, rec, col) + + def find(self, condition): + return self._find_tbl(condition=condition) + + def remove(self, rec, col, value): + utils._run(self._tool, 'remove', self._table, rec, col, value) + + def set(self, rec, col, value): + utils._run(self._tool, 'set', self._table, rec, + '{}={}'.format(col, value)) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/utils.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/utils.py new file mode 100644 index 00000000..53c9b4dd --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ovs/utils.py @@ -0,0 +1,26 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import subprocess + + +def _run(*args): + """Run a process, check result, capture decoded output from STDOUT. + + :param args: Command and arguments to run + :type args: Tuple[str, ...] + :returns: Information about the completed process + :rtype: str + :raises subprocess.CalledProcessError + """ + return subprocess.check_output(args, universal_newlines=True) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ufw.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ufw.py new file mode 100644 index 00000000..b9bf7c9d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/network/ufw.py @@ -0,0 +1,386 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module contains helpers to add and remove ufw rules. + +Examples: + +- open SSH port for subnet 10.0.3.0/24: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp') + +- open service by name as defined in /etc/services: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.service('ssh', 'open') + +- close service by port number: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.service('4949', 'close') # munin +""" +import os +import re +import subprocess + +from charmhelpers.core import hookenv +from charmhelpers.core.kernel import modprobe, is_module_loaded + +__author__ = "Felipe Reyes <felipe.reyes@canonical.com>" + + +class UFWError(Exception): + pass + + +class UFWIPv6Error(UFWError): + pass + + +def is_enabled(): + """ + Check if `ufw` is enabled + + :returns: True if ufw is enabled + """ + output = subprocess.check_output(['ufw', 'status'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall(r'^Status: active\n', output, re.M) + + return len(m) >= 1 + + +def is_ipv6_ok(soft_fail=False): + """ + Check if IPv6 support is present and ip6tables functional + + :param soft_fail: If set to True and IPv6 support is broken, then reports + that the host doesn't have IPv6 support, otherwise a + UFWIPv6Error exception is raised. + :returns: True if IPv6 is working, False otherwise + """ + + # do we have IPv6 in the machine? + if os.path.isdir('/proc/sys/net/ipv6'): + # is ip6tables kernel module loaded? + if not is_module_loaded('ip6_tables'): + # ip6tables support isn't complete, let's try to load it + try: + modprobe('ip6_tables') + # great, we can load the module + return True + except subprocess.CalledProcessError as ex: + hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, + level="WARN") + # we are in a world where ip6tables isn't working + if soft_fail: + # so we inform that the machine doesn't have IPv6 + return False + else: + raise UFWIPv6Error("IPv6 firewall support broken") + else: + # the module is present :) + return True + + else: + # the system doesn't have IPv6 + return False + + +def disable_ipv6(): + """ + Disable ufw IPv6 support in /etc/default/ufw + """ + exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g', + '/etc/default/ufw']) + if exit_code == 0: + hookenv.log('IPv6 support in ufw disabled', level='INFO') + else: + hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR") + raise UFWError("Couldn't disable IPv6 support in ufw") + + +def enable(soft_fail=False): + """ + Enable ufw + + :param soft_fail: If set to True silently disables IPv6 support in ufw, + otherwise a UFWIPv6Error exception is raised when IP6 + support is broken. + :returns: True if ufw is successfully enabled + """ + if is_enabled(): + return True + + if not is_ipv6_ok(soft_fail): + disable_ipv6() + + output = subprocess.check_output(['ufw', 'enable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall('^Firewall is active and enabled on system startup\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be enabled", level='WARN') + return False + else: + hookenv.log("ufw enabled", level='INFO') + return True + + +def reload(): + """ + Reload ufw + + :returns: True if ufw is successfully enabled + """ + output = subprocess.check_output(['ufw', 'reload'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall('^Firewall reloaded\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be reloaded", level='WARN') + return False + else: + hookenv.log("ufw reloaded", level='INFO') + return True + + +def disable(): + """ + Disable ufw + + :returns: True if ufw is successfully disabled + """ + if not is_enabled(): + return True + + output = subprocess.check_output(['ufw', 'disable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall(r'^Firewall stopped and disabled on system startup\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be disabled", level='WARN') + return False + else: + hookenv.log("ufw disabled", level='INFO') + return True + + +def default_policy(policy='deny', direction='incoming'): + """ + Changes the default policy for traffic `direction` + + :param policy: allow, deny or reject + :param direction: traffic direction, possible values: incoming, outgoing, + routed + """ + if policy not in ['allow', 'deny', 'reject']: + raise UFWError(('Unknown policy %s, valid values: ' + 'allow, deny, reject') % policy) + + if direction not in ['incoming', 'outgoing', 'routed']: + raise UFWError(('Unknown direction %s, valid values: ' + 'incoming, outgoing, routed') % direction) + + output = subprocess.check_output(['ufw', 'default', policy, direction], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + hookenv.log(output, level='DEBUG') + + m = re.findall("^Default %s policy changed to '%s'\n" % (direction, + policy), + output, re.M) + if len(m) == 0: + hookenv.log("ufw couldn't change the default policy to %s for %s" + % (policy, direction), level='WARN') + return False + else: + hookenv.log("ufw default policy for %s changed to %s" + % (direction, policy), level='INFO') + return True + + +def modify_access(src, dst='any', port=None, proto=None, action='allow', + index=None, prepend=False, comment=None): + """ + Grant access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :type src: Optional[str] + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :type dst: Optional[str] + :param port: destiny port + :type port: Optional[int] + :param proto: protocol (tcp or udp) + :type proto: Optional[str] + :param action: `allow` or `delete` + :type action: str + :param index: if different from None the rule is inserted at the given + `index`. + :type index: Optional[int] + :param prepend: Whether to insert the rule before all other rules matching + the rule's IP type. + :type prepend: bool + :param comment: Create the rule with a comment + :type comment: Optional[str] + """ + if not is_enabled(): + hookenv.log('ufw is disabled, skipping modify_access()', level='WARN') + return + + if action == 'delete': + if index is not None: + cmd = ['ufw', '--force', 'delete', str(index)] + else: + cmd = ['ufw', 'delete', 'allow'] + elif index is not None: + cmd = ['ufw', 'insert', str(index), action] + elif prepend: + cmd = ['ufw', 'prepend', action] + else: + cmd = ['ufw', action] + + if src is not None: + cmd += ['from', src] + + if dst is not None: + cmd += ['to', dst] + + if port is not None: + cmd += ['port', str(port)] + + if proto is not None: + cmd += ['proto', proto] + + if comment: + cmd.extend(['comment', comment]) + + hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG') + p = subprocess.Popen(cmd, stdout=subprocess.PIPE) + (stdout, stderr) = p.communicate() + + hookenv.log(stdout, level='INFO') + + if p.returncode != 0: + hookenv.log(stderr, level='ERROR') + hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd), + p.returncode), + level='ERROR') + + +def grant_access(src, dst='any', port=None, proto=None, index=None): + """ + Grant access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :param port: destiny port + :param proto: protocol (tcp or udp) + :param index: if different from None the rule is inserted at the given + `index`. + """ + return modify_access(src, dst=dst, port=port, proto=proto, action='allow', + index=index) + + +def revoke_access(src, dst='any', port=None, proto=None): + """ + Revoke access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :param port: destiny port + :param proto: protocol (tcp or udp) + """ + return modify_access(src, dst=dst, port=port, proto=proto, action='delete') + + +def service(name, action): + """ + Open/close access to a service + + :param name: could be a service name defined in `/etc/services` or a port + number. + :param action: `open` or `close` + """ + if action == 'open': + subprocess.check_output(['ufw', 'allow', str(name)], + universal_newlines=True) + elif action == 'close': + subprocess.check_output(['ufw', 'delete', 'allow', str(name)], + universal_newlines=True) + else: + raise UFWError(("'{}' not supported, use 'allow' " + "or 'delete'").format(action)) + + +def status(): + """Retrieve firewall rules as represented by UFW. + + :returns: Tuples with rule number and data + (1, {'to': '', 'action':, 'from':, '', ipv6: True, 'comment': ''}) + :rtype: Iterator[Tuple[int, Dict[str, Union[bool, str]]]] + """ + cp = subprocess.check_output(('ufw', 'status', 'numbered',), + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in cp.splitlines(): + if not line.startswith('['): + continue + ipv6 = True if '(v6)' in line else False + line = line.replace('(v6)', '') + line = line.replace('[', '') + line = line.replace(']', '') + line = line.replace('Anywhere', 'any') + row = line.split() + yield (int(row[0]), { + 'to': row[1], + 'action': ' '.join(row[2:4]).lower(), + 'from': row[4], + 'ipv6': ipv6, + 'comment': row[6] if len(row) > 5 and row[5] == '#' else '', + }) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/alternatives.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/alternatives.py new file mode 100644 index 00000000..547de09c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/alternatives.py @@ -0,0 +1,44 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' Helper for managing alternatives for file conflict resolution ''' + +import subprocess +import shutil +import os + + +def install_alternative(name, target, source, priority=50): + ''' Install alternative configuration ''' + if (os.path.exists(target) and not os.path.islink(target)): + # Move existing file/directory away before installing + shutil.move(target, '{}.bak'.format(target)) + cmd = [ + 'update-alternatives', '--force', '--install', + target, name, source, str(priority) + ] + subprocess.check_call(cmd) + + +def remove_alternative(name, source): + """Remove an installed alternative configuration file + + :param name: string name of the alternative to remove + :param source: string full path to alternative to remove + """ + cmd = [ + 'update-alternatives', '--remove', + name, source + ] + subprocess.check_call(cmd) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/deployment.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..dd3aebe9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,384 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import re +import sys +import six +from collections import OrderedDict +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) +from charmhelpers.contrib.openstack.amulet.utils import ( + OPENSTACK_RELEASES_PAIRS +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletDeployment(AmuletDeployment): + """OpenStack amulet deployment. + + This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms. + """ + + def __init__(self, series=None, openstack=None, source=None, + stable=True, log_level=DEBUG): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.log = self.get_logger(level=log_level) + self.log.info('OpenStackAmuletDeployment: init') + self.openstack = openstack + self.source = source + self.stable = stable + + def get_logger(self, name="deployment-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def _determine_branch_locations(self, other_services): + """Determine the branch locations for the other services. + + Determine if the local branch being tested is derived from its + stable or next (dev) branch, and based on this, use the corresonding + stable or next branches for the other_services.""" + + self.log.info('OpenStackAmuletDeployment: determine branch locations') + + # Charms outside the ~openstack-charmers + base_charms = { + 'mysql': ['trusty'], + 'mongodb': ['trusty'], + 'nrpe': ['trusty', 'xenial'], + } + + for svc in other_services: + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if svc['name'] in base_charms: + # NOTE: not all charms have support for all series we + # want/need to test against, so fix to most recent + # that each base charm supports + target_series = self.series + if self.series not in base_charms[svc['name']]: + target_series = base_charms[svc['name']][-1] + svc['location'] = 'cs:{}/{}'.format(target_series, + svc['name']) + elif self.stable: + svc['location'] = 'cs:{}/{}'.format(self.series, + svc['name']) + else: + svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format( + self.series, + svc['name'] + ) + + return other_services + + def _add_services(self, this_service, other_services, use_source=None, + no_origin=None): + """Add services to the deployment and optionally set + openstack-origin/source. + + :param this_service dict: Service dictionary describing the service + whose amulet tests are being run + :param other_services dict: List of service dictionaries describing + the services needed to support the target + service + :param use_source list: List of services which use the 'source' config + option rather than 'openstack-origin' + :param no_origin list: List of services which do not support setting + the Cloud Archive. + Service Dict: + { + 'name': str charm-name, + 'units': int number of units, + 'constraints': dict of juju constraints, + 'location': str location of charm, + } + eg + this_service = { + 'name': 'openvswitch-odl', + 'constraints': {'mem': '8G'}, + } + other_services = [ + { + 'name': 'nova-compute', + 'units': 2, + 'constraints': {'mem': '4G'}, + 'location': cs:~bob/xenial/nova-compute + }, + { + 'name': 'mysql', + 'constraints': {'mem': '2G'}, + }, + {'neutron-api-odl'}] + use_source = ['mysql'] + no_origin = ['neutron-api-odl'] + """ + self.log.info('OpenStackAmuletDeployment: adding services') + + other_services = self._determine_branch_locations(other_services) + + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + + services = other_services + services.append(this_service) + + use_source = use_source or [] + no_origin = no_origin or [] + + # Charms which should use the source config option + use_source = list(set( + use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', + 'ceph-osd', 'ceph-radosgw', 'ceph-mon', + 'ceph-proxy', 'percona-cluster', 'lxd'])) + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = list(set( + no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch', + 'nrpe', 'openvswitch-odl', 'neutron-api-odl', + 'odl-controller', 'cinder-backup', 'nexentaedge-data', + 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw', + 'cinder-nexentaedge', 'nexentaedge-mgmt', + 'ceilometer-agent'])) + + if self.openstack: + for svc in services: + if svc['name'] not in use_source + no_origin: + config = {'openstack-origin': self.openstack} + self.d.configure(svc['name'], config) + + if self.source: + for svc in services: + if svc['name'] in use_source and svc['name'] not in no_origin: + config = {'source': self.source} + self.d.configure(svc['name'], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + self.log.info('OpenStackAmuletDeployment: configure services') + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _auto_wait_for_status(self, message=None, exclude_services=None, + include_only=None, timeout=None): + """Wait for all units to have a specific extended status, except + for any defined as excluded. Unless specified via message, any + status containing any case of 'ready' will be considered a match. + + Examples of message usage: + + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) + + Wait for all units to reach this status (exact match): + message = re.compile('^Unit is ready and clustered$') + + Wait for all units to reach any one of these (exact match): + message = re.compile('Unit is ready|OK|Ready') + + Wait for at least one unit to reach this status (exact match): + message = {'ready'} + + See Amulet's sentry.wait_for_messages() for message usage detail. + https://github.com/juju/amulet/blob/master/amulet/sentry.py + + :param message: Expected status match + :param exclude_services: List of juju service names to ignore, + not to be used in conjuction with include_only. + :param include_only: List of juju service names to exclusively check, + not to be used in conjuction with exclude_services. + :param timeout: Maximum time in seconds to wait for status match + :returns: None. Raises if timeout is hit. + """ + if not timeout: + timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800)) + self.log.info('Waiting for extended status on units for {}s...' + ''.format(timeout)) + + all_services = self.d.services.keys() + + if exclude_services and include_only: + raise ValueError('exclude_services can not be used ' + 'with include_only') + + if message: + if isinstance(message, re._pattern_type): + match = message.pattern + else: + match = message + + self.log.debug('Custom extended status wait match: ' + '{}'.format(match)) + else: + self.log.debug('Default extended status wait match: contains ' + 'READY (case-insensitive)') + message = re.compile('.*ready.*', re.IGNORECASE) + + if exclude_services: + self.log.debug('Excluding services from extended status match: ' + '{}'.format(exclude_services)) + else: + exclude_services = [] + + if include_only: + services = include_only + else: + services = list(set(all_services) - set(exclude_services)) + + self.log.debug('Waiting up to {}s for extended status on services: ' + '{}'.format(timeout, services)) + service_messages = {service: message for service in services} + + # Check for idleness + self.d.sentry.wait(timeout=timeout) + # Check for error states and bail early + self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout) + # Check for ready messages + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) + + self.log.info('OK') + + def _get_openstack_release(self): + """Get openstack release. + + Return an integer representing the enum value of the openstack + release. + """ + # Must be ordered by OpenStack release (not by Ubuntu release): + for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): + setattr(self, os_pair, i) + + releases = { + ('trusty', None): self.trusty_icehouse, + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, + ('xenial', None): self.xenial_mitaka, + ('xenial', 'cloud:xenial-newton'): self.xenial_newton, + ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, + ('xenial', 'cloud:xenial-pike'): self.xenial_pike, + ('xenial', 'cloud:xenial-queens'): self.xenial_queens, + ('yakkety', None): self.yakkety_newton, + ('zesty', None): self.zesty_ocata, + ('artful', None): self.artful_pike, + ('bionic', None): self.bionic_queens, + ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, + ('bionic', 'cloud:bionic-stein'): self.bionic_stein, + ('bionic', 'cloud:bionic-train'): self.bionic_train, + ('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri, + ('cosmic', None): self.cosmic_rocky, + ('disco', None): self.disco_stein, + ('eoan', None): self.eoan_train, + ('focal', None): self.focal_ussuri, + } + return releases[(self.series, self.openstack)] + + def _get_openstack_release_string(self): + """Get openstack release string. + + Return a string representing the openstack release. + """ + releases = OrderedDict([ + ('trusty', 'icehouse'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), + ]) + if self.openstack: + os_origin = self.openstack.split(':')[1] + return os_origin.split('%s-' % self.series)[1].split('/')[0] + else: + return releases[self.series] + + def get_percona_service_entry(self, memory_constraint=None): + """Return a amulet service entry for percona cluster. + + :param memory_constraint: Override the default memory constraint + in the service entry. + :type memory_constraint: str + :returns: Amulet service entry. + :rtype: dict + """ + memory_constraint = memory_constraint or '3072M' + svc_entry = { + 'name': 'percona-cluster', + 'constraints': {'mem': memory_constraint}} + if self._get_openstack_release() <= self.trusty_mitaka: + svc_entry['location'] = 'cs:trusty/percona-cluster' + return svc_entry + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() == self.trusty_icehouse: + # Icehouse + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder-ceph', + 'glance' + ] + elif (self.trusty_kilo <= self._get_openstack_release() <= + self.zesty_ocata): + # Kilo through Ocata + pools = [ + 'rbd', + 'cinder-ceph', + 'glance' + ] + else: + # Pike and later + pools = [ + 'cinder-ceph', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/utils.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..14864198 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,1593 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import json +import logging +import os +import re +import six +import time +import urllib +import urlparse + +import cinderclient.v1.client as cinder_client +import cinderclient.v2.client as cinder_clientv2 +import glanceclient.v1 as glance_client +import glanceclient.v2 as glance_clientv2 +import heatclient.v1.client as heat_client +from keystoneclient.v2_0 import client as keystone_client +from keystoneauth1.identity import ( + v3, + v2, +) +from keystoneauth1 import session as keystone_session +from keystoneclient.v3 import client as keystone_client_v3 +from novaclient import exceptions + +import novaclient.client as nova_client +import novaclient +import pika +import swiftclient + +from charmhelpers.core.decorators import retry_on_exception +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) +from charmhelpers.core.host import CompareHostReleases + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + +NOVA_CLIENT_VERSION = "2" + +OPENSTACK_RELEASES_PAIRS = [ + 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', + 'trusty_mitaka', 'xenial_mitaka', + 'xenial_newton', 'yakkety_newton', + 'xenial_ocata', 'zesty_ocata', + 'xenial_pike', 'artful_pike', + 'xenial_queens', 'bionic_queens', + 'bionic_rocky', 'cosmic_rocky', + 'bionic_stein', 'disco_stein', + 'bionic_train', 'eoan_train', + 'bionic_ussuri', 'focal_ussuri', +] + + +class OpenStackAmuletUtils(AmuletUtils): + """OpenStack amulet utilities. + + This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charm tests. + """ + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, openstack_release=None): + """Validate endpoint data. Pick the correct validator based on + OpenStack release. Expected data should be in the v2 format: + { + 'id': id, + 'region': region, + 'adminurl': adminurl, + 'internalurl': internalurl, + 'publicurl': publicurl, + 'service_id': service_id} + + """ + validation_function = self.validate_v2_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_endpoint_data + expected = { + 'id': expected['id'], + 'region': expected['region'], + 'region_id': 'RegionOne', + 'url': self.valid_url, + 'interface': self.not_null, + 'service_id': expected['service_id']} + return validation_function(endpoints, admin_port, internal_port, + public_port, expected) + + def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate endpoint data. + + Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint. + """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if (admin_port in ep.adminurl and + internal_port in ep.internalurl and + public_port in ep.publicurl): + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected, expected_num_eps=3): + """Validate keystone v3 endpoint data. + + Validate the v3 endpoint data which has changed from v2. The + ports are used to find the matching endpoint. + + The new v3 endpoint data looks like: + + [<Endpoint enabled=True, + id=0432655fc2f74d1e9fa17bdaa6f6e60b, + interface=admin, + links={u'self': u'<RESTful URL of this endpoint>'}, + region=RegionOne, + region_id=RegionOne, + service_id=17f842a0dc084b928e476fafe67e4095, + url=http://10.5.6.5:9312>, + <Endpoint enabled=True, + id=6536cb6cb92f4f41bf22b079935c7707, + interface=admin, + links={u'self': u'<RESTful url of this endpoint>'}, + region=RegionOne, + region_id=RegionOne, + service_id=72fc8736fb41435e8b3584205bb2cfa3, + url=http://10.5.6.6:35357/v3>, + ... ] + """ + self.log.debug('Validating v3 endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) + found = [] + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if ((admin_port in ep.url and ep.interface == 'admin') or + (internal_port in ep.url and ep.interface == 'internal') or + (public_port in ep.url and ep.interface == 'public')): + found.append(ep.interface) + # note we ignore the links member. + actual = {'id': ep.id, + 'region': ep.region, + 'region_id': ep.region_id, + 'interface': self.not_null, + 'url': ep.url, + 'service_id': ep.service_id, } + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if len(found) != expected_num_eps: + return 'Unexpected number of endpoints found' + + def convert_svc_catalog_endpoint_data_to_v3(self, ep_data): + """Convert v2 endpoint data into v3. + + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + """ + self.log.warn("Endpoint ID and Region ID validation is limited to not " + "null checks after v2 to v3 conversion") + for svc in ep_data.keys(): + assert len(ep_data[svc]) == 1, "Unknown data format" + svc_ep_data = ep_data[svc][0] + ep_data[svc] = [ + { + 'url': svc_ep_data['adminURL'], + 'interface': 'admin', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['publicURL'], + 'interface': 'public', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}, + { + 'url': svc_ep_data['internalURL'], + 'interface': 'internal', + 'region': svc_ep_data['region'], + 'region_id': self.not_null, + 'id': self.not_null}] + return ep_data + + def validate_svc_catalog_endpoint_data(self, expected, actual, + openstack_release=None): + """Validate service catalog endpoint data. Pick the correct validator + for the OpenStack version. Expected data should be in the v2 format: + { + 'service_name1': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + 'service_name2': [ + { + 'adminURL': adminURL, + 'id': id, + 'region': region. + 'publicURL': publicURL, + 'internalURL': internalURL + }], + } + + """ + validation_function = self.validate_v2_svc_catalog_endpoint_data + xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens') + if openstack_release and openstack_release >= xenial_queens: + validation_function = self.validate_v3_svc_catalog_endpoint_data + expected = self.convert_svc_catalog_endpoint_data_to_v3(expected) + return validation_function(expected, actual) + + def validate_v2_svc_catalog_endpoint_data(self, expected, actual): + """Validate service catalog endpoint data. + + Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints. + """ + self.log.debug('Validating service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_v3_svc_catalog_endpoint_data(self, expected, actual): + """Validate the keystone v3 catalog endpoint data. + + Validate a list of dictinaries that make up the keystone v3 service + catalogue. + + It is in the form of: + + + {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:35357/v3'}, + {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}, + {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.224:5000/v3'}], + u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62', + u'interface': u'public', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d', + u'interface': u'internal', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9311'}, + {u'id': u'f629388955bc407f8b11d8b7ca168086', + u'interface': u'admin', + u'region': u'RegionOne', + u'region_id': u'RegionOne', + u'url': u'http://10.5.5.223:9312'}]} + + Note, that an added complication is that the order of admin, public, + internal against 'interface' in each region. + + Thus, the function sorts the expected and actual lists using the + interface key as a sort key, prior to the comparison. + """ + self.log.debug('Validating v3 service catalog endpoint data...') + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in six.iteritems(expected): + if k in actual: + l_expected = sorted(v, key=lambda x: x['interface']) + l_actual = sorted(actual[k], key=lambda x: x['interface']) + if len(l_actual) != len(l_expected): + return ("endpoint {} has differing number of interfaces " + " - expected({}), actual({})" + .format(k, len(l_expected), len(l_actual))) + for i_expected, i_actual in zip(l_expected, l_actual): + self.log.debug("checking interface {}" + .format(i_expected['interface'])) + ret = self._validate_dict_data(i_expected, i_actual) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate tenant data. + + Validate a list of actual tenant data vs list of expected tenant + data. + """ + self.log.debug('Validating tenant data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate role data. + + Validate a list of actual role data vs a list of expected role + data. + """ + self.log.debug('Validating role data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual, api_version=None): + """Validate user data. + + Validate a list of actual user data vs a list of expected user + data. + """ + self.log.debug('Validating user data...') + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + if e['name'] == act.name: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'id': act.id} + if api_version == 3: + a['default_project_id'] = getattr(act, + 'default_project_id', + 'none') + else: + a['tenantId'] = act.tenantId + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate flavor data. + + Validate a list of actual flavors vs a list of expected flavors. + """ + self.log.debug('Validating flavor data...') + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) + return tenant in [t.name for t in keystone.tenants.list()] + + @retry_on_exception(num_retries=5, base_delay=1) + def keystone_wait_for_propagation(self, sentry_relation_pairs, + api_version): + """Iterate over list of sentry and relation tuples and verify that + api_version has the expected value. + + :param sentry_relation_pairs: list of sentry, relation name tuples used + for monitoring propagation of relation + data + :param api_version: api_version to expect in relation data + :returns: None if successful. Raise on error. + """ + for (sentry, relation_name) in sentry_relation_pairs: + rel = sentry.relation('identity-service', + relation_name) + self.log.debug('keystone relation data: {}'.format(rel)) + if rel.get('api_version') != str(api_version): + raise Exception("api_version not propagated through relation" + " data yet ('{}' != '{}')." + "".format(rel.get('api_version'), api_version)) + + def keystone_configure_api_version(self, sentry_relation_pairs, deployment, + api_version): + """Configure preferred-api-version of keystone in deployment and + monitor provided list of relation objects for propagation + before returning to caller. + + :param sentry_relation_pairs: list of sentry, relation tuples used for + monitoring propagation of relation data + :param deployment: deployment to configure + :param api_version: value preferred-api-version will be set to + :returns: None if successful. Raise on error. + """ + self.log.debug("Setting keystone preferred-api-version: '{}'" + "".format(api_version)) + + config = {'preferred-api-version': api_version} + deployment.d.configure('keystone', config) + deployment._auto_wait_for_status() + self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) + + def authenticate_cinder_admin(self, keystone, api_version=2): + """Authenticates admin user with cinder.""" + self.log.debug('Authenticating cinder admin...') + _clients = { + 1: cinder_client.Client, + 2: cinder_clientv2.Client} + return _clients[api_version](session=keystone.session) + + def authenticate_keystone(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Authenticate with Keystone""" + self.log.debug('Authenticating with keystone...') + if not api_version: + api_version = 2 + sess, auth = self.get_keystone_session( + keystone_ip=keystone_ip, + username=username, + password=password, + api_version=api_version, + admin_port=admin_port, + user_domain_name=user_domain_name, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name + ) + if api_version == 2: + client = keystone_client.Client(session=sess) + else: + client = keystone_client_v3.Client(session=sess) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(sess) + return client + + def get_keystone_session(self, keystone_ip, username, password, + api_version=False, admin_port=False, + user_domain_name=None, domain_name=None, + project_domain_name=None, project_name=None): + """Return a keystone session object""" + ep = self.get_keystone_endpoint(keystone_ip, + api_version=api_version, + admin_port=admin_port) + if api_version == 2: + auth = v2.Password( + username=username, + password=password, + tenant_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + else: + auth = v3.Password( + user_domain_name=user_domain_name, + username=username, + password=password, + domain_name=domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + auth_url=ep + ) + sess = keystone_session.Session(auth=auth) + return (sess, auth) + + def get_keystone_endpoint(self, keystone_ip, api_version=None, + admin_port=False): + """Return keystone endpoint""" + port = 5000 + if admin_port: + port = 35357 + base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'), + port) + if api_version == 2: + ep = base_ep + "/v2.0" + else: + ep = base_ep + "/v3" + return ep + + def get_default_keystone_session(self, keystone_sentry, + openstack_release=None, api_version=2): + """Return a keystone session object and client object assuming standard + default settings + + Example call in amulet tests: + self.keystone_session, self.keystone = u.get_default_keystone_session( + self.keystone_sentry, + openstack_release=self._get_openstack_release()) + + The session can then be used to auth other clients: + neutronclient.Client(session=session) + aodh_client.Client(session=session) + eyc + """ + self.log.debug('Authenticating keystone admin...') + # 11 => xenial_queens + if api_version == 3 or (openstack_release and openstack_release >= 11): + client_class = keystone_client_v3.Client + api_version = 3 + else: + client_class = keystone_client.Client + keystone_ip = keystone_sentry.info['public-address'] + session, auth = self.get_keystone_session( + keystone_ip, + api_version=api_version, + username='admin', + password='openstack', + project_name='admin', + user_domain_name='admin_domain', + project_domain_name='admin_domain') + client = client_class(session=session) + # This populates the client.service_catalog + client.auth_ref = auth.get_access(session) + return session, client + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant=None, api_version=None, + keystone_ip=None, user_domain_name=None, + project_domain_name=None, + project_name=None): + """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') + if not keystone_ip: + keystone_ip = keystone_sentry.info['public-address'] + + # To support backward compatibility usage of this function + if not project_name: + project_name = tenant + if api_version == 3 and not user_domain_name: + user_domain_name = 'admin_domain' + if api_version == 3 and not project_domain_name: + project_domain_name = 'admin_domain' + if api_version == 3 and not project_name: + project_name = 'admin' + + return self.authenticate_keystone( + keystone_ip, user, password, + api_version=api_version, + user_domain_name=user_domain_name, + project_domain_name=project_domain_name, + project_name=project_name, + admin_port=True) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + keystone_ip = urlparse.urlparse(ep).hostname + + return self.authenticate_keystone(keystone_ip, user, password, + project_name=tenant) + + def authenticate_glance_admin(self, keystone, force_v1_client=False): + """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') + ep = keystone.service_catalog.url_for(service_type='image', + interface='adminURL') + if not force_v1_client and keystone.session: + return glance_clientv2.Client("2", session=keystone.session) + else: + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + interface='publicURL') + if keystone.session: + return heat_client.Client(endpoint=ep, session=keystone.session) + else: + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return nova_client.Client(NOVA_CLIENT_VERSION, + session=keystone.session, + auth_url=ep) + elif novaclient.__version__[0] >= "7": + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, password=password, + project_name=tenant, auth_url=ep) + else: + return nova_client.Client(NOVA_CLIENT_VERSION, + username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + interface='publicURL') + if keystone.session: + return swiftclient.Connection(session=keystone.session) + else: + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + + def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto", + ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): + """Create the specified flavor.""" + try: + nova.flavors.find(name=name) + except (exceptions.NotFound, exceptions.NoUniqueMatch): + self.log.debug('Creating flavor ({})'.format(name)) + nova.flavors.create(name, ram, vcpus, disk, flavorid, + ephemeral, swap, rxtx_factor, is_public) + + def glance_create_image(self, glance, image_name, image_url, + download_dir='tests', + hypervisor_type=None, + disk_format='qcow2', + architecture='x86_64', + container_format='bare'): + """Download an image and upload it to glance, validate its status + and return an image object pointer. KVM defaults, can override for + LXD. + + :param glance: pointer to authenticated glance api connection + :param image_name: display name for new image + :param image_url: url to retrieve + :param download_dir: directory to store downloaded image file + :param hypervisor_type: glance image hypervisor property + :param disk_format: glance image disk format + :param architecture: glance image architecture property + :param container_format: glance image container format + :returns: glance image pointer + """ + self.log.debug('Creating glance image ({}) from ' + '{}...'.format(image_name, image_url)) + + # Download image + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + abs_file_name = os.path.join(download_dir, image_name) + if not os.path.exists(abs_file_name): + opener.retrieve(image_url, abs_file_name) + + # Create glance image + glance_properties = { + 'architecture': architecture, + } + if hypervisor_type: + glance_properties['hypervisor_type'] = hypervisor_type + # Create glance image + if float(glance.version) < 2.0: + with open(abs_file_name) as f: + image = glance.images.create( + name=image_name, + is_public=True, + disk_format=disk_format, + container_format=container_format, + properties=glance_properties, + data=f) + else: + image = glance.images.create( + name=image_name, + visibility="public", + disk_format=disk_format, + container_format=container_format) + glance.images.upload(image.id, open(abs_file_name, 'rb')) + glance.images.update(image.id, **glance_properties) + + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + + if float(glance.version) < 2.0: + val_img_pub = glance.images.get(img_id).is_public + else: + val_img_pub = glance.images.get(img_id).visibility == "public" + + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == container_format \ + and val_img_dfmt == disk_format: + self.log.debug(msg_attr) + else: + msg = ('Image validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return image + + def create_cirros_image(self, glance, image_name, hypervisor_type=None): + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :param hypervisor_type: glance image hypervisor property + :returns: glance image pointer + """ + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'glance_create_image instead of ' + 'create_cirros_image.') + + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Get cirros image URL + http_proxy = os.getenv('OS_TEST_HTTP_PROXY') + self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open('http://download.cirros-cloud.net/version/released') + version = f.read().strip() + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', + version, cirros_img) + f.close() + + return self.glance_create_image( + glance, + image_name, + cirros_url, + hypervisor_type=hypervisor_type) + + def delete_image(self, glance, image): + """Delete the specified image.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) + image = nova.glance.find_image(image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status != 'ACTIVE': + self.log.error('instance creation timed out') + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') + + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except Exception: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def _get_cinder_obj_name(self, cinder_object): + """Retrieve name of cinder object. + + :param cinder_object: cinder snapshot or volume object + :returns: str cinder object name + """ + # v1 objects store name in 'display_name' attr but v2+ use 'name' + try: + return cinder_object.display_name + except AttributeError: + return cinder_object.name + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except TypeError: + vol_new = cinder.volumes.create(name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id)) + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) + return False + + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # For mimic ceph osd lspools output + output = output.replace("\n", ",") + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None + + # rabbitmq/amqp specific helpers: + + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): + """Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready.""" + + if init_sleep: + time.sleep(init_sleep) + + message = re.compile('^Unit is ready and clustered$') + deployment._auto_wait_for_status(message=message, + timeout=timeout, + include_only=['rabbitmq-server']) + + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.d.configure('rabbitmq-server', config) + + # Wait for unit status + self.rmq_wait_for_cluster(deployment) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.is_open is True + assert connection.is_closing is False + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) + + def validate_memcache(self, sentry_unit, conf, os_release, + earliest_release=5, section='keystone_authtoken', + check_kvs=None): + """Check Memcache is running and is configured to be used + + Example call from Amulet test: + + def test_110_memcache(self): + u.validate_memcache(self.neutron_api_sentry, + '/etc/neutron/neutron.conf', + self._get_openstack_release()) + + :param sentry_unit: sentry unit + :param conf: OpenStack config file to check memcache settings + :param os_release: Current OpenStack release int code + :param earliest_release: Earliest Openstack release to check int code + :param section: OpenStack config file section to check + :param check_kvs: Dict of settings to check in config file + :returns: None + """ + if os_release < earliest_release: + self.log.debug('Skipping memcache checks for deployment. {} <' + 'mitaka'.format(os_release)) + return + _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'} + self.log.debug('Checking memcached is running') + ret = self.validate_services_by_name({sentry_unit: ['memcached']}) + if ret: + amulet.raise_status(amulet.FAIL, msg='Memcache running check' + 'failed {}'.format(ret)) + else: + self.log.debug('OK') + self.log.debug('Checking memcache url is configured in {}'.format( + conf)) + if self.validate_config_data(sentry_unit, conf, section, _kvs): + message = "Memcache config error in: {}".format(conf) + amulet.raise_status(amulet.FAIL, msg=message) + else: + self.log.debug('OK') + self.log.debug('Checking memcache configuration in ' + '/etc/memcached.conf') + contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf', + fatal=True) + ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs') + if CompareHostReleases(ubuntu_release) <= 'trusty': + memcache_listen_addr = 'ip6-localhost' + else: + memcache_listen_addr = '::1' + expected = { + '-p': '11211', + '-l': memcache_listen_addr} + found = [] + for key, value in expected.items(): + for line in contents.split('\n'): + if line.startswith(key): + self.log.debug('Checking {} is set to {}'.format( + key, + value)) + assert value == line.split()[-1] + self.log.debug(line.split()[-1]) + found.append(key) + if sorted(found) == sorted(expected.keys()): + self.log.debug('OK') + else: + message = "Memcache config error in: /etc/memcached.conf" + amulet.raise_status(amulet.FAIL, msg=message) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/audits/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/audits/__init__.py new file mode 100644 index 00000000..7f7e5f79 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/audits/__init__.py @@ -0,0 +1,212 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OpenStack Security Audit code""" + +import collections +from enum import Enum +import traceback + +from charmhelpers.core.host import cmp_pkgrevno +import charmhelpers.contrib.openstack.utils as openstack_utils +import charmhelpers.core.hookenv as hookenv + + +class AuditType(Enum): + OpenStackSecurityGuide = 1 + + +_audits = {} + +Audit = collections.namedtuple('Audit', 'func filters') + + +def audit(*args): + """Decorator to register an audit. + + These are used to generate audits that can be run on a + deployed system that matches the given configuration + + :param args: List of functions to filter tests against + :type args: List[Callable[Dict]] + """ + def wrapper(f): + test_name = f.__name__ + if _audits.get(test_name): + raise RuntimeError( + "Test name '{}' used more than once" + .format(test_name)) + non_callables = [fn for fn in args if not callable(fn)] + if non_callables: + raise RuntimeError( + "Configuration includes non-callable filters: {}" + .format(non_callables)) + _audits[test_name] = Audit(func=f, filters=args) + return f + return wrapper + + +def is_audit_type(*args): + """This audit is included in the specified kinds of audits. + + :param *args: List of AuditTypes to include this audit in + :type args: List[AuditType] + :rtype: Callable[Dict] + """ + def _is_audit_type(audit_options): + if audit_options.get('audit_type') in args: + return True + else: + return False + return _is_audit_type + + +def since_package(pkg, pkg_version): + """This audit should be run after the specified package version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _since_package(audit_options=None): + return cmp_pkgrevno(pkg, pkg_version) >= 0 + + return _since_package + + +def before_package(pkg, pkg_version): + """This audit should be run before the specified package version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The package version + :type release: str + :rtype: Callable[Dict] + """ + def _before_package(audit_options=None): + return not since_package(pkg, pkg_version)() + + return _before_package + + +def since_openstack_release(pkg, release): + """This audit should run after the specified OpenStack version (incl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _since_openstack_release(audit_options=None): + _release = openstack_utils.get_os_codename_package(pkg) + return openstack_utils.CompareOpenStackReleases(_release) >= release + + return _since_openstack_release + + +def before_openstack_release(pkg, release): + """This audit should run before the specified OpenStack version (excl). + + :param pkg: Package name to compare + :type pkg: str + :param release: The OpenStack release codename + :type release: str + :rtype: Callable[Dict] + """ + def _before_openstack_release(audit_options=None): + return not since_openstack_release(pkg, release)() + + return _before_openstack_release + + +def it_has_config(config_key): + """This audit should be run based on specified config keys. + + :param config_key: Config key to look for + :type config_key: str + :rtype: Callable[Dict] + """ + def _it_has_config(audit_options): + return audit_options.get(config_key) is not None + + return _it_has_config + + +def run(audit_options): + """Run the configured audits with the specified audit_options. + + :param audit_options: Configuration for the audit + :type audit_options: Config + + :rtype: Dict[str, str] + """ + errors = {} + results = {} + for name, audit in sorted(_audits.items()): + result_name = name.replace('_', '-') + if result_name in audit_options.get('excludes', []): + print( + "Skipping {} because it is" + "excluded in audit config" + .format(result_name)) + continue + if all(p(audit_options) for p in audit.filters): + try: + audit.func(audit_options) + print("{}: PASS".format(name)) + results[result_name] = { + 'success': True, + } + except AssertionError as e: + print("{}: FAIL ({})".format(name, e)) + results[result_name] = { + 'success': False, + 'message': e, + } + except Exception as e: + print("{}: ERROR ({})".format(name, e)) + errors[name] = e + results[result_name] = { + 'success': False, + 'message': e, + } + for name, error in errors.items(): + print("=" * 20) + print("Error in {}: ".format(name)) + traceback.print_tb(error.__traceback__) + print() + return results + + +def action_parse_results(result): + """Parse the result of `run` in the context of an action. + + :param result: The result of running the security-checklist + action on a unit + :type result: Dict[str, Dict[str, str]] + :rtype: int + """ + passed = True + for test, result in result.items(): + if result['success']: + hookenv.action_set({test: 'PASS'}) + else: + hookenv.action_set({test: 'FAIL - {}'.format(result['message'])}) + passed = False + if not passed: + hookenv.action_fail("One or more tests failed") + return 0 if passed else 1 diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/audits/openstack_security_guide.py new file mode 100644 index 00000000..79740ed0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -0,0 +1,270 @@ +# Copyright 2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import configparser +import glob +import os.path +import subprocess + +from charmhelpers.contrib.openstack.audits import ( + audit, + AuditType, + # filters + is_audit_type, + it_has_config, +) + +from charmhelpers.core.hookenv import ( + cached, +) + +""" +The Security Guide suggests a specific list of files inside the +config directory for the service having 640 specifically, but +by ensuring the containing directory is 750, only the owner can +write, and only the group can read files within the directory. + +By restricting access to the containing directory, we can more +effectively ensure that there is no accidental leakage if a new +file is added to the service without being added to the security +guide, and to this check. +""" +FILE_ASSERTIONS = { + 'barbican': { + '/etc/barbican': {'group': 'barbican', 'mode': '750'}, + }, + 'ceph-mon': { + '/var/lib/charm/ceph-mon/ceph.conf': + {'owner': 'root', 'group': 'root', 'mode': '644'}, + '/etc/ceph/ceph.client.admin.keyring': + {'owner': 'ceph', 'group': 'ceph'}, + '/etc/ceph/rbdmap': {'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'} + }, + 'ceph-osd': { + '/var/lib/charm/ceph-osd/ceph.conf': + {'owner': 'ceph', 'group': 'ceph', 'mode': '644'}, + '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'}, + '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + '/var/lib/ceph/bootstrap-*/ceph.keyring': + {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}, + '/var/lib/ceph/radosgw': + {'owner': 'ceph', 'group': 'ceph', 'mode': '755'}, + }, + 'cinder': { + '/etc/cinder': {'group': 'cinder', 'mode': '750'}, + }, + 'glance': { + '/etc/glance': {'group': 'glance', 'mode': '750'}, + }, + 'keystone': { + '/etc/keystone': + {'owner': 'keystone', 'group': 'keystone', 'mode': '750'}, + }, + 'manilla': { + '/etc/manila': {'group': 'manilla', 'mode': '750'}, + }, + 'neutron-gateway': { + '/etc/neutron': {'group': 'neutron', 'mode': '750'}, + }, + 'neutron-api': { + '/etc/neutron/': {'group': 'neutron', 'mode': '750'}, + }, + 'nova-cloud-controller': { + '/etc/nova': {'group': 'nova', 'mode': '750'}, + }, + 'nova-compute': { + '/etc/nova/': {'group': 'nova', 'mode': '750'}, + }, + 'openstack-dashboard': { + # From security guide + '/etc/openstack-dashboard/local_settings.py': + {'group': 'horizon', 'mode': '640'}, + }, +} + +Ownership = collections.namedtuple('Ownership', 'owner group mode') + + +@cached +def _stat(file): + """ + Get the Ownership information from a file. + + :param file: The path to a file to stat + :type file: str + :returns: owner, group, and mode of the specified file + :rtype: Ownership + :raises subprocess.CalledProcessError: If the underlying stat fails + """ + out = subprocess.check_output( + ['stat', '-c', '%U %G %a', file]).decode('utf-8') + return Ownership(*out.strip().split(' ')) + + +@cached +def _config_ini(path): + """ + Parse an ini file + + :param path: The path to a file to parse + :type file: str + :returns: Configuration contained in path + :rtype: Dict + """ + # When strict is enabled, duplicate options are not allowed in the + # parsed INI; however, Oslo allows duplicate values. This change + # causes us to ignore the duplicate values which is acceptable as + # long as we don't validate any multi-value options + conf = configparser.ConfigParser(strict=False) + conf.read(path) + return dict(conf) + + +def _validate_file_ownership(owner, group, file_name, optional=False): + """ + Validate that a specified file is owned by `owner:group`. + + :param owner: Name of the owner + :type owner: str + :param group: Name of the group + :type group: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert owner == ownership.owner, \ + "{} has an incorrect owner: {} should be {}".format( + file_name, ownership.owner, owner) + assert group == ownership.group, \ + "{} has an incorrect group: {} should be {}".format( + file_name, ownership.group, group) + print("Validate ownership of {}: PASS".format(file_name)) + + +def _validate_file_mode(mode, file_name, optional=False): + """ + Validate that a specified file has the specified permissions. + + :param mode: file mode that is desires + :type owner: str + :param file_name: Path to the file to verify + :type file_name: str + :param optional: Is this file optional, + ie: Should this test fail when it's missing + :type optional: bool + """ + try: + ownership = _stat(file_name) + except subprocess.CalledProcessError as e: + print("Error reading file: {}".format(e)) + if not optional: + assert False, "Specified file does not exist: {}".format(file_name) + assert mode == ownership.mode, \ + "{} has an incorrect mode: {} should be {}".format( + file_name, ownership.mode, mode) + print("Validate mode of {}: PASS".format(file_name)) + + +@cached +def _config_section(config, section): + """Read the configuration file and return a section.""" + path = os.path.join(config.get('config_path'), config.get('config_file')) + conf = _config_ini(path) + return conf.get(section) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_ownership(config): + """Verify that configuration files are owned by the correct user/group.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + owner = options.get('owner', config.get('owner', 'root')) + group = options.get('group', config.get('group', 'root')) + optional = options.get('optional', config.get('optional', False)) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_ownership(owner, group, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_ownership(owner, group, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide), + it_has_config('files')) +def validate_file_permissions(config): + """Verify that permissions on configuration files are secure enough.""" + files = config.get('files', {}) + for file_name, options in files.items(): + for key in options.keys(): + if key not in ["owner", "group", "mode"]: + raise RuntimeError( + "Invalid ownership configuration: {}".format(key)) + mode = options.get('mode', config.get('permissions', '600')) + optional = options.get('optional', config.get('optional', False)) + if '*' in file_name: + for file in glob.glob(file_name): + if file not in files.keys(): + if os.path.isfile(file): + _validate_file_mode(mode, file, optional) + else: + if os.path.isfile(file_name): + _validate_file_mode(mode, file_name, optional) + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_keystone(audit_options): + """Validate that the service uses Keystone for authentication.""" + section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'api / DEFAULT'" + assert section.get('auth_strategy') == "keystone", \ + "Application is not using Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_keystone(audit_options): + """Verify that TLS is used to communicate with Keystone.""" + section = _config_section(audit_options, 'keystone_authtoken') + assert section is not None, "Missing section 'keystone_authtoken'" + assert not section.get('insecure') and \ + "https://" in section.get("auth_uri"), \ + "TLS is not used for Keystone" + + +@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) +def validate_uses_tls_for_glance(audit_options): + """Verify that TLS is used to communicate with Glance.""" + section = _config_section(audit_options, 'glance') + assert section is not None, "Missing section 'glance'" + assert not section.get('insecure') and \ + "https://" in section.get("api_servers"), \ + "TLS is not used for Glance" diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/cert_utils.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/cert_utils.py new file mode 100644 index 00000000..b494af64 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/cert_utils.py @@ -0,0 +1,289 @@ +# Copyright 2014-2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common python helper functions used for OpenStack charm certificats. + +import os +import json + +from charmhelpers.contrib.network.ip import ( + get_hostname, + resolve_network_cidr, +) +from charmhelpers.core.hookenv import ( + local_unit, + network_get_primary_address, + config, + related_units, + relation_get, + relation_ids, + unit_get, + NoNetworkBinding, + log, + WARNING, +) +from charmhelpers.contrib.openstack.ip import ( + ADMIN, + resolve_address, + get_vip_in_network, + INTERNAL, + PUBLIC, + ADDRESS_MAP) + +from charmhelpers.core.host import ( + mkdir, + write_file, +) + +from charmhelpers.contrib.hahelpers.apache import ( + install_ca_cert +) + + +class CertRequest(object): + + """Create a request for certificates to be generated + """ + + def __init__(self, json_encode=True): + self.entries = [] + self.hostname_entry = None + self.json_encode = json_encode + + def add_entry(self, net_type, cn, addresses): + """Add a request to the batch + + :param net_type: str netwrok space name request is for + :param cn: str Canonical Name for certificate + :param addresses: [] List of addresses to be used as SANs + """ + self.entries.append({ + 'cn': cn, + 'addresses': addresses}) + + def add_hostname_cn(self): + """Add a request for the hostname of the machine""" + ip = unit_get('private-address') + addresses = [ip] + # If a vip is being used without os-hostname config or + # network spaces then we need to ensure the local units + # cert has the approriate vip in the SAN list + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + self.hostname_entry = { + 'cn': get_hostname(ip), + 'addresses': addresses} + + def add_hostname_cn_ip(self, addresses): + """Add an address to the SAN list for the hostname request + + :param addr: [] List of address to be added + """ + for addr in addresses: + if addr not in self.hostname_entry['addresses']: + self.hostname_entry['addresses'].append(addr) + + def get_request(self): + """Generate request from the batched up entries + + """ + if self.hostname_entry: + self.entries.append(self.hostname_entry) + request = {} + for entry in self.entries: + sans = sorted(list(set(entry['addresses']))) + request[entry['cn']] = {'sans': sans} + if self.json_encode: + req = {'cert_requests': json.dumps(request, sort_keys=True)} + else: + req = {'cert_requests': request} + req['unit_name'] = local_unit().replace('/', '_') + return req + + +def get_certificate_request(json_encode=True): + """Generate a certificatee requests based on the network confioguration + + """ + req = CertRequest(json_encode=json_encode) + req.add_hostname_cn() + # Add os-hostname entries + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['override']) + try: + net_addr = resolve_address(endpoint_type=net_type) + ip = network_get_primary_address( + ADDRESS_MAP[net_type]['binding']) + addresses = [net_addr, ip] + vip = get_vip_in_network(resolve_network_cidr(ip)) + if vip: + addresses.append(vip) + if net_config: + req.add_entry( + net_type, + net_config, + addresses) + else: + # There is network address with no corresponding hostname. + # Add the ip to the hostname cert to allow for this. + req.add_hostname_cn_ip(addresses) + except NoNetworkBinding: + log("Skipping request for certificate for ip in {} space, no " + "local address found".format(net_type), WARNING) + return req.get_request() + + +def create_ip_cert_links(ssl_dir, custom_hostname_link=None): + """Create symlinks for SAN records + + :param ssl_dir: str Directory to create symlinks in + :param custom_hostname_link: str Additional link to be created + """ + hostname = get_hostname(unit_get('private-address')) + hostname_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(hostname)) + hostname_key = os.path.join( + ssl_dir, + 'key_{}'.format(hostname)) + # Add links to hostname cert, used if os-hostname vars not set + for net_type in [INTERNAL, ADMIN, PUBLIC]: + try: + addr = resolve_address(endpoint_type=net_type) + cert = os.path.join(ssl_dir, 'cert_{}'.format(addr)) + key = os.path.join(ssl_dir, 'key_{}'.format(addr)) + if os.path.isfile(hostname_cert) and not os.path.isfile(cert): + os.symlink(hostname_cert, cert) + os.symlink(hostname_key, key) + except NoNetworkBinding: + log("Skipping creating cert symlink for ip in {} space, no " + "local address found".format(net_type), WARNING) + if custom_hostname_link: + custom_cert = os.path.join( + ssl_dir, + 'cert_{}'.format(custom_hostname_link)) + custom_key = os.path.join( + ssl_dir, + 'key_{}'.format(custom_hostname_link)) + if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert): + os.symlink(hostname_cert, custom_cert) + os.symlink(hostname_key, custom_key) + + +def install_certs(ssl_dir, certs, chain=None, user='root', group='root'): + """Install the certs passed into the ssl dir and append the chain if + provided. + + :param ssl_dir: str Directory to create symlinks in + :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}} + :param chain: str Chain to be appended to certs + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str + """ + for cn, bundle in certs.items(): + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + cert_data = bundle['cert'] + if chain: + # Append chain file so that clients that trust the root CA will + # trust certs signed by an intermediate in the chain + cert_data = cert_data + os.linesep + chain + write_file( + path=os.path.join(ssl_dir, cert_filename), owner=user, group=group, + content=cert_data, perms=0o640) + write_file( + path=os.path.join(ssl_dir, key_filename), owner=user, group=group, + content=bundle['key'], perms=0o640) + + +def process_certificates(service_name, relation_id, unit, + custom_hostname_link=None, user='root', group='root'): + """Process the certificates supplied down the relation + + :param service_name: str Name of service the certifcates are for. + :param relation_id: str Relation id providing the certs + :param unit: str Unit providing the certs + :param custom_hostname_link: str Name of custom link to create + :param user: (Optional) Owner of certificate files. Defaults to 'root' + :type user: str + :param group: (Optional) Group of certificate files. Defaults to 'root' + :type group: str + :returns: True if certificates processed for local unit or False + :rtype: bool + """ + data = relation_get(rid=relation_id, unit=unit) + ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) + mkdir(path=ssl_dir) + name = local_unit().replace('/', '_') + certs = data.get('{}.processed_requests'.format(name)) + chain = data.get('chain') + ca = data.get('ca') + if certs: + certs = json.loads(certs) + install_ca_cert(ca.encode()) + install_certs(ssl_dir, certs, chain, user=user, group=group) + create_ip_cert_links( + ssl_dir, + custom_hostname_link=custom_hostname_link) + return True + return False + + +def get_requests_for_local_unit(relation_name=None): + """Extract any certificates data targeted at this unit down relation_name. + + :param relation_name: str Name of relation to check for data. + :returns: List of bundles of certificates. + :rtype: List of dicts + """ + local_name = local_unit().replace('/', '_') + raw_certs_key = '{}.processed_requests'.format(local_name) + relation_name = relation_name or 'certificates' + bundles = [] + for rid in relation_ids(relation_name): + for unit in related_units(rid): + data = relation_get(rid=rid, unit=unit) + if data.get(raw_certs_key): + bundles.append({ + 'ca': data['ca'], + 'chain': data.get('chain'), + 'certs': json.loads(data[raw_certs_key])}) + return bundles + + +def get_bundle_for_cn(cn, relation_name=None): + """Extract certificates for the given cn. + + :param cn: str Canonical Name on certificate. + :param relation_name: str Relation to check for certificates down. + :returns: Dictionary of certificate data, + :rtype: dict. + """ + entries = get_requests_for_local_unit(relation_name) + cert_bundle = {} + for entry in entries: + for _cn, bundle in entry['certs'].items(): + if _cn == cn: + cert_bundle = { + 'cert': bundle['cert'], + 'key': bundle['key'], + 'chain': entry['chain'], + 'ca': entry['ca']} + break + if cert_bundle: + break + return cert_bundle diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/context.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/context.py new file mode 100644 index 00000000..42abccf7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,3177 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import copy +import enum +import glob +import hashlib +import json +import math +import os +import re +import socket +import time + +from base64 import b64decode +from subprocess import check_call, CalledProcessError + +import six + +from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( + _config_ini as config_ini +) + +from charmhelpers.fetch import ( + apt_install, + filter_installed_packages, +) +from charmhelpers.core.hookenv import ( + NoNetworkBinding, + config, + is_relation_made, + local_unit, + log, + relation_get, + relation_ids, + related_units, + relation_set, + unit_get, + unit_private_ip, + charm_name, + DEBUG, + INFO, + ERROR, + status_set, + network_get_primary_address, + WARNING, +) + +from charmhelpers.core.sysctl import create as sysctl_create +from charmhelpers.core.strutils import bool_from_string +from charmhelpers.contrib.openstack.exceptions import OSContextError + +from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, + list_nics, + get_nic_hwaddr, + mkdir, + write_file, + pwgen, + lsb_release, + CompareHostReleases, + is_container, +) +from charmhelpers.contrib.hahelpers.cluster import ( + determine_apache_port, + determine_api_port, + https, + is_clustered, +) +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, + install_ca_cert, +) +from charmhelpers.contrib.openstack.neutron import ( + neutron_plugin_attribute, + parse_data_port_mappings, +) +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + INTERNAL, + ADMIN, + PUBLIC, + ADDRESS_MAP, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + get_ipv4_addr, + get_ipv6_addr, + get_netmask_for_address, + format_ipv6_addr, + is_bridge_member, + is_ipv6_disabled, + get_relation_ip, +) +from charmhelpers.contrib.openstack.utils import ( + config_flags_parser, + get_os_codename_install_source, + enable_memcache, + CompareOpenStackReleases, + os_release, +) +from charmhelpers.core.unitdata import kv + +try: + from sriov_netplan_shim import pci +except ImportError: + # The use of the function and contexts that require the pci module is + # optional. + pass + +try: + import psutil +except ImportError: + if six.PY2: + apt_install('python-psutil', fatal=True) + else: + apt_install('python3-psutil', fatal=True) + import psutil + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' +ADDRESS_TYPES = ['admin', 'internal', 'public'] +HAPROXY_RUN_DIR = '/var/run/haproxy/' +DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2" + + +def ensure_packages(packages): + """Install but do not upgrade required plugin packages.""" + required = filter_installed_packages(packages) + if required: + apt_install(required, fatal=True) + + +def context_complete(ctxt): + _missing = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + _missing.append(k) + + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level=INFO) + return False + + return True + + +class OSContextGenerator(object): + """Base class for all context generators.""" + interfaces = [] + related = False + complete = False + missing_data = [] + + def __call__(self): + raise NotImplementedError + + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log('Missing required data: %s' % ' '.join(self.missing_data), + level=INFO) + else: + self.complete = True + return self.complete + + def get_related(self): + """Check if any of the context interfaces have relation ids. + Set self.related and return True if one of the interfaces + has relation ids. + """ + # Fresh start + self.related = False + try: + for interface in self.interfaces: + if relation_ids(interface): + self.related = True + return self.related + except AttributeError as e: + log("{} {}" + "".format(self, e), 'INFO') + return self.related + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __init__(self, database=None, user=None, relation_prefix=None, + ssl_dir=None, relation_id=None): + """Allows inspecting relation for settings prefixed with + relation_prefix. This is useful for parsing access for multiple + databases returned via the shared-db interface (eg, nova_password, + quantum_password) + """ + self.relation_prefix = relation_prefix + self.database = database + self.user = user + self.ssl_dir = ssl_dir + self.rel_name = self.interfaces[0] + self.relation_id = relation_id + + def __call__(self): + self.database = self.database or config('database') + self.user = self.user or config('database-user') + if None in [self.database, self.user]: + log("Could not generate shared_db context. Missing required charm " + "config options. (database name and user)", level=ERROR) + raise OSContextError + + ctxt = {} + + # NOTE(jamespage) if mysql charm provides a network upon which + # access to the database should be made, reconfigure relation + # with the service units local address and defer execution + access_network = relation_get('access-network') + if access_network is not None: + if self.relation_prefix is not None: + hostname_key = "{}_hostname".format(self.relation_prefix) + else: + hostname_key = "hostname" + access_hostname = get_address_in_network( + access_network, + unit_get('private-address')) + set_hostname = relation_get(attribute=hostname_key, + unit=local_unit()) + if set_hostname != access_hostname: + relation_set(relation_settings={hostname_key: access_hostname}) + return None # Defer any further hook execution for now.... + + password_setting = 'password' + if self.relation_prefix: + password_setting = self.relation_prefix + '_password' + + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.interfaces[0]) + + rel = (get_os_codename_install_source(config('openstack-origin')) or + 'icehouse') + for rid in rids: + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + host = rdata.get('db_host') + host = format_ipv6_addr(host) or host + ctxt = { + 'database_host': host, + 'database': self.database, + 'database_user': self.user, + 'database_password': rdata.get(password_setting), + 'database_type': 'mysql+pymysql' + } + # Port is being introduced with LP Bug #1876188 + # but it not currently required and may not be set in all + # cases, particularly in classic charms. + port = rdata.get('db_port') + if port: + ctxt['database_port'] = port + if CompareOpenStackReleases(rel) < 'queens': + ctxt['database_type'] = 'mysql' + if self.context_complete(ctxt): + db_ssl(rdata, ctxt, self.ssl_dir) + return ctxt + return {} + + +class PostgresqlDBContext(OSContextGenerator): + interfaces = ['pgsql-db'] + + def __init__(self, database=None): + self.database = database + + def __call__(self): + self.database = self.database or config('database') + if self.database is None: + log('Could not generate postgresql_db context. Missing required ' + 'charm config options. (database name)', level=ERROR) + raise OSContextError + + ctxt = {} + for rid in relation_ids(self.interfaces[0]): + self.related = True + for unit in related_units(rid): + rel_host = relation_get('host', rid=rid, unit=unit) + rel_user = relation_get('user', rid=rid, unit=unit) + rel_passwd = relation_get('password', rid=rid, unit=unit) + ctxt = {'database_host': rel_host, + 'database': self.database, + 'database_user': rel_user, + 'database_password': rel_passwd, + 'database_type': 'postgresql'} + if self.context_complete(ctxt): + return ctxt + + return {} + + +def db_ssl(rdata, ctxt, ssl_dir): + if 'ssl_ca' in rdata and ssl_dir: + ca_path = os.path.join(ssl_dir, 'db-client.ca') + with open(ca_path, 'wb') as fh: + fh.write(b64decode(rdata['ssl_ca'])) + + ctxt['database_ssl_ca'] = ca_path + elif 'ssl_ca' in rdata: + log("Charm not setup for ssl support but ssl ca found", level=INFO) + return ctxt + + if 'ssl_cert' in rdata: + cert_path = os.path.join( + ssl_dir, 'db-client.cert') + if not os.path.exists(cert_path): + log("Waiting 1m for ssl client cert validity", level=INFO) + time.sleep(60) + + with open(cert_path, 'wb') as fh: + fh.write(b64decode(rdata['ssl_cert'])) + + ctxt['database_ssl_cert'] = cert_path + key_path = os.path.join(ssl_dir, 'db-client.key') + with open(key_path, 'wb') as fh: + fh.write(b64decode(rdata['ssl_key'])) + + ctxt['database_ssl_key'] = key_path + + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + + def __init__(self, + service=None, + service_user=None, + rel_name='identity-service'): + self.service = service + self.service_user = service_user + self.rel_name = rel_name + self.interfaces = [self.rel_name] + + def _setup_pki_cache(self): + if self.service and self.service_user: + # This is required for pki token signing if we don't want /tmp to + # be used. + cachedir = '/var/cache/%s' % (self.service) + if not os.path.isdir(cachedir): + log("Creating service cache dir %s" % (cachedir), level=DEBUG) + mkdir(path=cachedir, owner=self.service_user, + group=self.service_user, perms=0o700) + + return cachedir + return None + + def _get_pkg_name(self, python_name='keystonemiddleware'): + """Get corresponding distro installed package for python + package name. + + :param python_name: nameof the python package + :type: string + """ + pkg_names = map(lambda x: x + python_name, ('python3-', 'python-')) + + for pkg in pkg_names: + if not filter_installed_packages((pkg,)): + return pkg + + return None + + def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel): + """Build Jinja2 context for full rendering of [keystone_authtoken] + section with variable names included. Re-constructed from former + template 'section-keystone-auth-mitaka'. + + :param ctxt: Jinja2 context returned from self.__call__() + :type: dict + :param keystonemiddleware_os_rel: OpenStack release name of + keystonemiddleware package installed + """ + c = collections.OrderedDict((('auth_type', 'password'),)) + + # 'www_authenticate_uri' replaced 'auth_uri' since Stein, + # see keystonemiddleware upstream sources for more info + if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein': + c.update(( + ('www_authenticate_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + else: + c.update(( + ('auth_uri', "{}://{}:{}/v3".format( + ctxt.get('service_protocol', ''), + ctxt.get('service_host', ''), + ctxt.get('service_port', ''))),)) + + c.update(( + ('auth_url', "{}://{}:{}/v3".format( + ctxt.get('auth_protocol', ''), + ctxt.get('auth_host', ''), + ctxt.get('auth_port', ''))), + ('project_domain_name', ctxt.get('admin_domain_name', '')), + ('user_domain_name', ctxt.get('admin_domain_name', '')), + ('project_name', ctxt.get('admin_tenant_name', '')), + ('username', ctxt.get('admin_user', '')), + ('password', ctxt.get('admin_password', '')), + ('signing_dir', ctxt.get('signing_dir', '')),)) + + return c + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + keystonemiddleware_os_release = None + if self._get_pkg_name(): + keystonemiddleware_os_release = os_release(self._get_pkg_name()) + + cachedir = self._setup_pki_cache() + if cachedir: + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + serv_host = rdata.get('service_host') + serv_host = format_ipv6_addr(serv_host) or serv_host + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('service_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({'service_port': rdata.get('service_port'), + 'service_host': serv_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('service_tenant'), + 'admin_user': rdata.get('service_username'), + 'admin_password': rdata.get('service_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version}) + + if float(api_version) > 2: + ctxt.update({ + 'admin_domain_name': rdata.get('service_domain'), + 'service_project_id': rdata.get('service_tenant_id'), + 'service_domain_id': rdata.get('service_domain_id')}) + + # we keep all veriables in ctxt for compatibility and + # add nested dictionary for keystone_authtoken generic + # templating + if keystonemiddleware_os_release: + ctxt['keystone_authtoken'] = \ + self._get_keystone_authtoken_ctxt( + ctxt, keystonemiddleware_os_release) + + if self.context_complete(ctxt): + # NOTE(jamespage) this is required for >= icehouse + # so a missing value just indicates keystone needs + # upgrading + ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') + ctxt['admin_domain_id'] = rdata.get('service_domain_id') + return ctxt + + return {} + + +class IdentityCredentialsContext(IdentityServiceContext): + '''Context for identity-credentials interface type''' + + def __init__(self, + service=None, + service_user=None, + rel_name='identity-credentials'): + super(IdentityCredentialsContext, self).__init__(service, + service_user, + rel_name) + + def __call__(self): + log('Generating template context for ' + self.rel_name, level=DEBUG) + ctxt = {} + + cachedir = self._setup_pki_cache() + if cachedir: + ctxt['signing_dir'] = cachedir + + for rid in relation_ids(self.rel_name): + self.related = True + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + credentials_host = rdata.get('credentials_host') + credentials_host = ( + format_ipv6_addr(credentials_host) or credentials_host + ) + auth_host = rdata.get('auth_host') + auth_host = format_ipv6_addr(auth_host) or auth_host + svc_protocol = rdata.get('credentials_protocol') or 'http' + auth_protocol = rdata.get('auth_protocol') or 'http' + api_version = rdata.get('api_version') or '2.0' + ctxt.update({ + 'service_port': rdata.get('credentials_port'), + 'service_host': credentials_host, + 'auth_host': auth_host, + 'auth_port': rdata.get('auth_port'), + 'admin_tenant_name': rdata.get('credentials_project'), + 'admin_tenant_id': rdata.get('credentials_project_id'), + 'admin_user': rdata.get('credentials_username'), + 'admin_password': rdata.get('credentials_password'), + 'service_protocol': svc_protocol, + 'auth_protocol': auth_protocol, + 'api_version': api_version + }) + + if float(api_version) > 2: + ctxt.update({'admin_domain_name': + rdata.get('domain')}) + + if self.context_complete(ctxt): + return ctxt + + return {} + + +class NovaVendorMetadataContext(OSContextGenerator): + """Context used for configuring nova vendor metadata on nova.conf file.""" + + def __init__(self, os_release_pkg, interfaces=None): + """Initialize the NovaVendorMetadataContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + :param interfaces: list of string values to be used as the Context's + relation interfaces. + :type interfaces: List[str] + """ + self.os_release_pkg = os_release_pkg + if interfaces is not None: + self.interfaces = interfaces + + def __call__(self): + cmp_os_release = CompareOpenStackReleases( + os_release(self.os_release_pkg)) + ctxt = {'vendor_data': False} + + vdata_providers = [] + vdata = config('vendor-data') + vdata_url = config('vendor-data-url') + + if vdata: + try: + # validate the JSON. If invalid, we do not set anything here + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data'] = True + # Mitaka does not support DynamicJSON + # so vendordata_providers is not needed + if cmp_os_release > 'mitaka': + vdata_providers.append('StaticJSON') + + if vdata_url: + if cmp_os_release > 'mitaka': + ctxt['vendor_data_url'] = vdata_url + vdata_providers.append('DynamicJSON') + else: + log('Dynamic vendor data unsupported' + ' for {}.'.format(cmp_os_release), level=ERROR) + if vdata_providers: + ctxt['vendordata_providers'] = ','.join(vdata_providers) + + return ctxt + + +class NovaVendorMetadataJSONContext(OSContextGenerator): + """Context used for writing nova vendor metadata json file.""" + + def __init__(self, os_release_pkg): + """Initialize the NovaVendorMetadataJSONContext object. + + :param os_release_pkg: the package name to extract the OpenStack + release codename from. + :type os_release_pkg: str + """ + self.os_release_pkg = os_release_pkg + + def __call__(self): + ctxt = {'vendor_data_json': '{}'} + + vdata = config('vendor-data') + if vdata: + try: + # validate the JSON. If invalid, we return empty. + json.loads(vdata) + except (TypeError, ValueError) as e: + log('Error decoding vendor-data. {}'.format(e), level=ERROR) + else: + ctxt['vendor_data_json'] = vdata + + return ctxt + + +class AMQPContext(OSContextGenerator): + + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, + relation_id=None): + self.ssl_dir = ssl_dir + self.rel_name = rel_name + self.relation_prefix = relation_prefix + self.interfaces = [rel_name] + self.relation_id = relation_id + + def __call__(self): + log('Generating template context for amqp', level=DEBUG) + conf = config() + if self.relation_prefix: + user_setting = '%s-rabbit-user' % (self.relation_prefix) + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) + else: + user_setting = 'rabbit-user' + vhost_setting = 'rabbit-vhost' + + try: + username = conf[user_setting] + vhost = conf[vhost_setting] + except KeyError as e: + log('Could not generate shared_db context. Missing required charm ' + 'config options: %s.' % e, level=ERROR) + raise OSContextError + + ctxt = {} + if self.relation_id: + rids = [self.relation_id] + else: + rids = relation_ids(self.rel_name) + for rid in rids: + ha_vip_only = False + self.related = True + transport_hosts = None + rabbitmq_port = '5672' + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + ctxt['clustered'] = True + vip = relation_get('vip', rid=rid, unit=unit) + vip = format_ipv6_addr(vip) or vip + ctxt['rabbitmq_host'] = vip + transport_hosts = [vip] + else: + host = relation_get('private-address', rid=rid, unit=unit) + host = format_ipv6_addr(host) or host + ctxt['rabbitmq_host'] = host + transport_hosts = [host] + + ctxt.update({ + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + }) + + ssl_port = relation_get('ssl_port', rid=rid, unit=unit) + if ssl_port: + ctxt['rabbit_ssl_port'] = ssl_port + rabbitmq_port = ssl_port + + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) + if ssl_ca: + ctxt['rabbit_ssl_ca'] = ssl_ca + + if relation_get('ha_queues', rid=rid, unit=unit) is not None: + ctxt['rabbitmq_ha_queues'] = True + + ha_vip_only = relation_get('ha-vip-only', + rid=rid, unit=unit) is not None + + if self.context_complete(ctxt): + if 'rabbit_ssl_ca' in ctxt: + if not self.ssl_dir: + log("Charm not setup for ssl support but ssl ca " + "found", level=INFO) + break + + ca_path = os.path.join( + self.ssl_dir, 'rabbit-client-ca.pem') + with open(ca_path, 'wb') as fh: + fh.write(b64decode(ctxt['rabbit_ssl_ca'])) + ctxt['rabbit_ssl_ca'] = ca_path + + # Sufficient information found = break out! + break + + # Used for active/active rabbitmq >= grizzly + if (('clustered' not in ctxt or ha_vip_only) and + len(related_units(rid)) > 1): + rabbitmq_hosts = [] + for unit in related_units(rid): + host = relation_get('private-address', rid=rid, unit=unit) + if not relation_get('password', rid=rid, unit=unit): + log( + ("Skipping {} password not sent which indicates " + "unit is not ready.".format(host)), + level=DEBUG) + continue + host = format_ipv6_addr(host) or host + rabbitmq_hosts.append(host) + + rabbitmq_hosts = sorted(rabbitmq_hosts) + ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) + transport_hosts = rabbitmq_hosts + + if transport_hosts: + transport_url_hosts = ','.join([ + "{}:{}@{}:{}".format(ctxt['rabbitmq_user'], + ctxt['rabbitmq_password'], + host_, + rabbitmq_port) + for host_ in transport_hosts]) + ctxt['transport_url'] = "rabbit://{}/{}".format( + transport_url_hosts, vhost) + + oslo_messaging_flags = conf.get('oslo-messaging-flags', None) + if oslo_messaging_flags: + ctxt['oslo_messaging_flags'] = config_flags_parser( + oslo_messaging_flags) + + oslo_messaging_driver = conf.get( + 'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER) + if oslo_messaging_driver: + ctxt['oslo_messaging_driver'] = oslo_messaging_driver + + notification_format = conf.get('notification-format', None) + if notification_format: + ctxt['notification_format'] = notification_format + + notification_topics = conf.get('notification-topics', None) + if notification_topics: + ctxt['notification_topics'] = notification_topics + + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) + if send_notifications_to_logs: + ctxt['send_notifications_to_logs'] = send_notifications_to_logs + + if not self.complete: + return {} + + return ctxt + + +class CephContext(OSContextGenerator): + """Generates context for /etc/ceph/ceph.conf templates.""" + interfaces = ['ceph'] + + def __call__(self): + if not relation_ids('ceph'): + return {} + + log('Generating template context for ceph', level=DEBUG) + mon_hosts = [] + ctxt = { + 'use_syslog': str(config('use-syslog')).lower() + } + for rid in relation_ids('ceph'): + for unit in related_units(rid): + if not ctxt.get('auth'): + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) + if not ctxt.get('key'): + ctxt['key'] = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('rbd_features'): + default_features = relation_get('rbd-features', rid=rid, unit=unit) + if default_features is not None: + ctxt['rbd_features'] = default_features + + ceph_addrs = relation_get('ceph-public-address', rid=rid, + unit=unit) + if ceph_addrs: + for addr in ceph_addrs.split(' '): + mon_hosts.append(format_ipv6_addr(addr) or addr) + else: + priv_addr = relation_get('private-address', rid=rid, + unit=unit) + mon_hosts.append(format_ipv6_addr(priv_addr) or priv_addr) + + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) + + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + + if not self.context_complete(ctxt): + return {} + + ensure_packages(['ceph-common']) + return ctxt + + def context_complete(self, ctxt): + """Overridden here to ensure the context is actually complete. + + We set `key` and `auth` to None here, by default, to ensure + that the context will always evaluate to incomplete until the + Ceph relation has actually sent these details; otherwise, + there is a potential race condition between the relation + appearing and the first unit actually setting this data on the + relation. + + :param ctxt: The current context members + :type ctxt: Dict[str, ANY] + :returns: True if the context is complete + :rtype: bool + """ + if 'auth' not in ctxt or 'key' not in ctxt: + return False + return super(CephContext, self).context_complete(ctxt) + + +class HAProxyContext(OSContextGenerator): + """Provides half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + + :side effect: mkdir is called on HAPROXY_RUN_DIR + """ + interfaces = ['cluster'] + + def __init__(self, singlenode_mode=False, + address_types=ADDRESS_TYPES): + self.address_types = address_types + self.singlenode_mode = singlenode_mode + + def __call__(self): + if not os.path.isdir(HAPROXY_RUN_DIR): + mkdir(path=HAPROXY_RUN_DIR) + if not relation_ids('cluster') and not self.singlenode_mode: + return {} + + l_unit = local_unit().replace('/', '-') + cluster_hosts = collections.OrderedDict() + + # NOTE(jamespage): build out map of configured network endpoints + # and associated backends + for addr_type in self.address_types: + cfg_opt = 'os-{}-network'.format(addr_type) + # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather + # than 'internal' + if addr_type == 'internal': + _addr_map_type = INTERNAL + else: + _addr_map_type = addr_type + # Network spaces aware + laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'], + config(cfg_opt)) + if laddr: + netmask = get_netmask_for_address(laddr) + cluster_hosts[laddr] = { + 'network': "{}/{}".format(laddr, + netmask), + 'backends': collections.OrderedDict([(l_unit, + laddr)]) + } + for rid in relation_ids('cluster'): + for unit in sorted(related_units(rid)): + # API Charms will need to set {addr_type}-address with + # get_relation_ip(addr_type) + _laddr = relation_get('{}-address'.format(addr_type), + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[laddr]['backends'][_unit] = _laddr + + # NOTE(jamespage) add backend based on get_relation_ip - this + # will either be the only backend or the fallback if no acls + # match in the frontend + # Network spaces aware + addr = get_relation_ip('cluster') + cluster_hosts[addr] = {} + netmask = get_netmask_for_address(addr) + cluster_hosts[addr] = { + 'network': "{}/{}".format(addr, netmask), + 'backends': collections.OrderedDict([(l_unit, + addr)]) + } + for rid in relation_ids('cluster'): + for unit in sorted(related_units(rid)): + # API Charms will need to set their private-address with + # get_relation_ip('cluster') + _laddr = relation_get('private-address', + rid=rid, unit=unit) + if _laddr: + _unit = unit.replace('/', '-') + cluster_hosts[addr]['backends'][_unit] = _laddr + + ctxt = { + 'frontends': cluster_hosts, + 'default_backend': addr + } + + if config('haproxy-server-timeout'): + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') + + if config('haproxy-client-timeout'): + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') + + if config('haproxy-queue-timeout'): + ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') + + if config('haproxy-connect-timeout'): + ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') + + if config('prefer-ipv6'): + ctxt['local_host'] = 'ip6-localhost' + ctxt['haproxy_host'] = '::' + else: + ctxt['local_host'] = '127.0.0.1' + ctxt['haproxy_host'] = '0.0.0.0' + + ctxt['ipv6_enabled'] = not is_ipv6_disabled() + + ctxt['stat_port'] = '8888' + + db = kv() + ctxt['stat_password'] = db.get('stat-password') + if not ctxt['stat_password']: + ctxt['stat_password'] = db.set('stat-password', + pwgen(32)) + db.flush() + + for frontend in cluster_hosts: + if (len(cluster_hosts[frontend]['backends']) > 1 or + self.singlenode_mode): + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.', + level=DEBUG) + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + + return ctxt + + log('HAProxy context is incomplete, this unit has no peers.', + level=INFO) + return {} + + +class ImageServiceContext(OSContextGenerator): + interfaces = ['image-service'] + + def __call__(self): + """Obtains the glance API server from the image-service relation. + Useful in nova and cinder (currently). + """ + log('Generating template context for image-service.', level=DEBUG) + rids = relation_ids('image-service') + if not rids: + return {} + + for rid in rids: + for unit in related_units(rid): + api_server = relation_get('glance-api-server', + rid=rid, unit=unit) + if api_server: + return {'glance_api_servers': api_server} + + log("ImageService context is incomplete. Missing required relation " + "data.", level=INFO) + return {} + + +class ApacheSSLContext(OSContextGenerator): + """Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like:: + + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + user = group = 'root' + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers'] + check_call(cmd) + + def configure_cert(self, cn=None): + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + mkdir(path=ssl_dir) + cert, key = get_cert(cn) + if cert and key: + if cn: + cert_filename = 'cert_{}'.format(cn) + key_filename = 'key_{}'.format(cn) + else: + cert_filename = 'cert' + key_filename = 'key' + + write_file(path=os.path.join(ssl_dir, cert_filename), + content=b64decode(cert), owner=self.user, + group=self.group, perms=0o640) + write_file(path=os.path.join(ssl_dir, key_filename), + content=b64decode(key), owner=self.user, + group=self.group, perms=0o640) + + def configure_ca(self): + ca_cert = get_ca_cert() + if ca_cert: + install_ca_cert(b64decode(ca_cert)) + + def canonical_names(self): + """Figure out which canonical names clients will access this service. + """ + cns = [] + for r_id in relation_ids('identity-service'): + for unit in related_units(r_id): + rdata = relation_get(rid=r_id, unit=unit) + for k in rdata: + if k.startswith('ssl_key_'): + cns.append(k.lstrip('ssl_key_')) + + return sorted(list(set(cns))) + + def get_network_addresses(self): + """For each network configured, return corresponding address and + hostnamr or vip (if available). + + Returns a list of tuples of the form: + + [(address_in_net_a, hostname_in_net_a), + (address_in_net_b, hostname_in_net_b), + ...] + + or, if no hostnames(s) available: + + [(address_in_net_a, vip_in_net_a), + (address_in_net_b, vip_in_net_b), + ...] + + or, if no vip(s) available: + + [(address_in_net_a, address_in_net_a), + (address_in_net_b, address_in_net_b), + ...] + """ + addresses = [] + for net_type in [INTERNAL, ADMIN, PUBLIC]: + net_config = config(ADDRESS_MAP[net_type]['config']) + # NOTE(jamespage): Fallback must always be private address + # as this is used to bind services on the + # local unit. + fallback = unit_get("private-address") + if net_config: + addr = get_address_in_network(net_config, + fallback) + else: + try: + addr = network_get_primary_address( + ADDRESS_MAP[net_type]['binding'] + ) + except (NotImplementedError, NoNetworkBinding): + addr = fallback + + endpoint = resolve_address(net_type) + addresses.append((addr, endpoint)) + + return sorted(set(addresses)) + + def __call__(self): + if isinstance(self.external_ports, six.string_types): + self.external_ports = [self.external_ports] + + if not self.external_ports or not https(): + return {} + + use_keystone_ca = True + for rid in relation_ids('certificates'): + if related_units(rid): + use_keystone_ca = False + + if use_keystone_ca: + self.configure_ca() + + self.enable_modules() + + ctxt = {'namespace': self.service_namespace, + 'endpoints': [], + 'ext_ports': []} + + if use_keystone_ca: + cns = self.canonical_names() + if cns: + for cn in cns: + self.configure_cert(cn) + else: + # Expect cert/key provided in config (currently assumed that ca + # uses ip for cn) + for net_type in (INTERNAL, ADMIN, PUBLIC): + cn = resolve_address(endpoint_type=net_type) + self.configure_cert(cn) + + addresses = self.get_network_addresses() + for address, endpoint in addresses: + for api_port in self.external_ports: + ext_port = determine_apache_port(api_port, + singlenode_mode=True) + int_port = determine_api_port(api_port, singlenode_mode=True) + portmap = (address, endpoint, int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + ctxt['ext_ports'].append(int(ext_port)) + + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) + return ctxt + + +class NeutronContext(OSContextGenerator): + interfaces = [] + + @property + def plugin(self): + return None + + @property + def network_manager(self): + return None + + @property + def packages(self): + return neutron_plugin_attribute(self.plugin, 'packages', + self.network_manager) + + @property + def neutron_security_groups(self): + return None + + def _ensure_packages(self): + for pkgs in self.packages: + ensure_packages(pkgs) + + def ovs_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'ovs', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return ovs_ctxt + + def nuage_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nuage_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'vsp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nuage_ctxt + + def nvp_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + nvp_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'nvp', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return nvp_ctxt + + def n1kv_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + n1kv_user_config_flags = config('n1kv-config-flags') + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') + n1kv_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'n1kv', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': n1kv_config, + 'vsm_ip': config('n1kv-vsm-ip'), + 'vsm_username': config('n1kv-vsm-username'), + 'vsm_password': config('n1kv-vsm-password'), + 'restrict_policy_profiles': restrict_policy_profiles} + + if n1kv_user_config_flags: + flags = config_flags_parser(n1kv_user_config_flags) + n1kv_ctxt['user_config_flags'] = flags + + return n1kv_ctxt + + def calico_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + calico_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'Calico', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + + return calico_ctxt + + def neutron_ctxt(self): + if https(): + proto = 'https' + else: + proto = 'http' + + if is_clustered(): + host = config('vip') + else: + host = unit_get('private-address') + + ctxt = {'network_manager': self.network_manager, + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} + return ctxt + + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + + def midonet_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + midonet_config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + mido_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'midonet', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': midonet_config} + + return mido_ctxt + + def __call__(self): + if self.network_manager not in ['quantum', 'neutron']: + return {} + + if not self.plugin: + return {} + + ctxt = self.neutron_ctxt() + + if self.plugin == 'ovs': + ctxt.update(self.ovs_ctxt()) + elif self.plugin in ['nvp', 'nsx']: + ctxt.update(self.nvp_ctxt()) + elif self.plugin == 'n1kv': + ctxt.update(self.n1kv_ctxt()) + elif self.plugin == 'Calico': + ctxt.update(self.calico_ctxt()) + elif self.plugin == 'vsp': + ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) + elif self.plugin == 'midonet': + ctxt.update(self.midonet_ctxt()) + + alchemy_flags = config('neutron-alchemy-flags') + if alchemy_flags: + flags = config_flags_parser(alchemy_flags) + ctxt['neutron_alchemy_flags'] = flags + + return ctxt + + +class NeutronPortContext(OSContextGenerator): + + def resolve_ports(self, ports): + """Resolve NICs not yet bound to bridge(s) + + If hwaddress provided then returns resolved hwaddress otherwise NIC. + """ + if not ports: + return None + + hwaddr_to_nic = {} + hwaddr_to_ip = {} + extant_nics = list_nics() + + for nic in extant_nics: + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + + hwaddr = get_nic_hwaddr(nic) + hwaddr_to_nic[hwaddr] = nic + addresses = get_ipv4_addr(nic, fatal=False) + addresses += get_ipv6_addr(iface=nic, fatal=False) + hwaddr_to_ip[hwaddr] = addresses + + resolved = [] + mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) + for entry in ports: + if re.match(mac_regex, entry): + # NIC is in known NICs and does NOT hace an IP address + if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: + # If the nic is part of a bridge then don't use it + if is_bridge_member(hwaddr_to_nic[entry]): + continue + + # Entry is a MAC address for a valid interface that doesn't + # have an IP address assigned yet. + resolved.append(hwaddr_to_nic[entry]) + elif entry in extant_nics: + # If the passed entry is not a MAC address and the interface + # exists, assume it's a valid interface, and that the user put + # it there on purpose (we can trust it to be the real external + # network). + resolved.append(entry) + + # Ensure no duplicates + return list(set(resolved)) + + +class OSConfigFlagContext(OSContextGenerator): + """Provides support for user-defined config flags. + + Users can define a comma-seperated list of key=value pairs + in the charm configuration and apply them at any point in + any file by using a template flag. + + Sometimes users might want config flags inserted within a + specific section so this class allows users to specify the + template flag name, allowing for multiple template flags + (sections) within the same context. + + NOTE: the value of config-flags may be a comma-separated list of + key=value pairs and some Openstack config files support + comma-separated lists as values. + """ + + def __init__(self, charm_flag='config-flags', + template_flag='user_config_flags'): + """ + :param charm_flag: config flags in charm configuration. + :param template_flag: insert point for user-defined flags in template + file. + """ + super(OSConfigFlagContext, self).__init__() + self._charm_flag = charm_flag + self._template_flag = template_flag + + def __call__(self): + config_flags = config(self._charm_flag) + if not config_flags: + return {} + + return {self._template_flag: + config_flags_parser(config_flags)} + + +class LibvirtConfigFlagsContext(OSContextGenerator): + """ + This context provides support for extending + the libvirt section through user-defined flags. + """ + def __call__(self): + ctxt = {} + libvirt_flags = config('libvirt-flags') + if libvirt_flags: + ctxt['libvirt_flags'] = config_flags_parser( + libvirt_flags) + return ctxt + + +class SubordinateConfigContext(OSContextGenerator): + + """ + Responsible for inspecting relations to subordinates that + may be exporting required config via a json blob. + + The subordinate interface allows subordinates to export their + configuration requirements to the principle for multiple config + files and multiple services. Ie, a subordinate that has interfaces + to both glance and nova may export to following yaml blob as json:: + + glance: + /etc/glance/glance-api.conf: + sections: + DEFAULT: + - [key1, value1] + /etc/glance/glance-registry.conf: + MYSECTION: + - [key2, value2] + nova: + /etc/nova/nova.conf: + sections: + DEFAULT: + - [key3, value3] + + + It is then up to the principle charms to subscribe this context to + the service+config file it is interestd in. Configuration data will + be available in the template context, in glance's case, as:: + + ctxt = { + ... other context ... + 'subordinate_configuration': { + 'DEFAULT': { + 'key1': 'value1', + }, + 'MYSECTION': { + 'key2': 'value2', + }, + } + } + """ + + def __init__(self, service, config_file, interface): + """ + :param service : Service name key to query in any subordinate + data found + :param config_file : Service's config file to query sections + :param interface : Subordinate interface to inspect + """ + self.config_file = config_file + if isinstance(service, list): + self.services = service + else: + self.services = [service] + if isinstance(interface, list): + self.interfaces = interface + else: + self.interfaces = [interface] + + def __call__(self): + ctxt = {'sections': {}} + rids = [] + for interface in self.interfaces: + rids.extend(relation_ids(interface)) + for rid in rids: + for unit in related_units(rid): + sub_config = relation_get('subordinate_configuration', + rid=rid, unit=unit) + if sub_config and sub_config != '': + try: + sub_config = json.loads(sub_config) + except Exception: + log('Could not parse JSON from ' + 'subordinate_configuration setting from %s' + % rid, level=ERROR) + continue + + for service in self.services: + if service not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s service' + % (rid, service), level=INFO) + continue + + sub_config = sub_config[service] + if self.config_file not in sub_config: + log('Found subordinate_configuration on %s but it ' + 'contained nothing for %s' + % (rid, self.config_file), level=INFO) + continue + + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_list in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list + else: + ctxt[k] = v + log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) + return ctxt + + +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + + return ctxt + + +class SyslogContext(OSContextGenerator): + + def __call__(self): + ctxt = {'use_syslog': config('use-syslog')} + return ctxt + + +class BindHostContext(OSContextGenerator): + + def __call__(self): + if config('prefer-ipv6'): + return {'bind_host': '::'} + else: + return {'bind_host': '0.0.0.0'} + + +MAX_DEFAULT_WORKERS = 4 +DEFAULT_MULTIPLIER = 2 + + +def _calculate_workers(): + ''' + Determine the number of worker processes based on the CPU + count of the unit containing the application. + + Workers will be limited to MAX_DEFAULT_WORKERS in + container environments where no worker-multipler configuration + option been set. + + @returns int: number of worker processes to use + ''' + multiplier = config('worker-multiplier') or DEFAULT_MULTIPLIER + count = int(_num_cpus() * multiplier) + if multiplier > 0 and count == 0: + count = 1 + + if config('worker-multiplier') is None and is_container(): + # NOTE(jamespage): Limit unconfigured worker-multiplier + # to MAX_DEFAULT_WORKERS to avoid insane + # worker configuration in LXD containers + # on large servers + # Reference: https://pad.lv/1665270 + count = min(count, MAX_DEFAULT_WORKERS) + + return count + + +def _num_cpus(): + ''' + Compatibility wrapper for calculating the number of CPU's + a unit has. + + @returns: int: number of CPU cores detected + ''' + try: + return psutil.cpu_count() + except AttributeError: + return psutil.NUM_CPUS + + +class WorkerConfigContext(OSContextGenerator): + + def __call__(self): + ctxt = {"workers": _calculate_workers()} + return ctxt + + +class WSGIWorkerConfigContext(WorkerConfigContext): + + def __init__(self, name=None, script=None, admin_script=None, + public_script=None, user=None, group=None, + process_weight=1.00, + admin_process_weight=0.25, public_process_weight=0.75): + self.service_name = name + self.user = user or name + self.group = group or name + self.script = script + self.admin_script = admin_script + self.public_script = public_script + self.process_weight = process_weight + self.admin_process_weight = admin_process_weight + self.public_process_weight = public_process_weight + + def __call__(self): + total_processes = _calculate_workers() + ctxt = { + "service_name": self.service_name, + "user": self.user, + "group": self.group, + "script": self.script, + "admin_script": self.admin_script, + "public_script": self.public_script, + "processes": int(math.ceil(self.process_weight * total_processes)), + "admin_processes": int(math.ceil(self.admin_process_weight * + total_processes)), + "public_processes": int(math.ceil(self.public_process_weight * + total_processes)), + "threads": 1, + } + return ctxt + + +class ZeroMQContext(OSContextGenerator): + interfaces = ['zeromq-configuration'] + + def __call__(self): + ctxt = {} + if is_relation_made('zeromq-configuration', 'host'): + for rid in relation_ids('zeromq-configuration'): + for unit in related_units(rid): + ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) + ctxt['zmq_host'] = relation_get('host', unit, rid) + ctxt['zmq_redis_address'] = relation_get( + 'zmq_redis_address', unit, rid) + + return ctxt + + +class NotificationDriverContext(OSContextGenerator): + + def __init__(self, zmq_relation='zeromq-configuration', + amqp_relation='amqp'): + """ + :param zmq_relation: Name of Zeromq relation to check + """ + self.zmq_relation = zmq_relation + self.amqp_relation = amqp_relation + + def __call__(self): + ctxt = {'notifications': 'False'} + if is_relation_made(self.amqp_relation): + ctxt['notifications'] = "True" + + return ctxt + + +class SysctlContext(OSContextGenerator): + """This context check if the 'sysctl' option exists on configuration + then creates a file with the loaded contents""" + def __call__(self): + sysctl_dict = config('sysctl') + if sysctl_dict: + sysctl_create(sysctl_dict, + '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) + return {'sysctl': sysctl_dict} + + +class NeutronAPIContext(OSContextGenerator): + ''' + Inspects current neutron-plugin-api relation for neutron settings. Return + defaults if it is not present. + ''' + interfaces = ['neutron-plugin-api'] + + def __call__(self): + self.neutron_defaults = { + 'l2_population': { + 'rel_key': 'l2-population', + 'default': False, + }, + 'overlay_network_type': { + 'rel_key': 'overlay-network-type', + 'default': 'gre', + }, + 'neutron_security_groups': { + 'rel_key': 'neutron-security-groups', + 'default': False, + }, + 'network_device_mtu': { + 'rel_key': 'network-device-mtu', + 'default': None, + }, + 'enable_dvr': { + 'rel_key': 'enable-dvr', + 'default': False, + }, + 'enable_l3ha': { + 'rel_key': 'enable-l3ha', + 'default': False, + }, + 'dns_domain': { + 'rel_key': 'dns-domain', + 'default': None, + }, + 'polling_interval': { + 'rel_key': 'polling-interval', + 'default': 2, + }, + 'rpc_response_timeout': { + 'rel_key': 'rpc-response-timeout', + 'default': 60, + }, + 'report_interval': { + 'rel_key': 'report-interval', + 'default': 30, + }, + 'enable_qos': { + 'rel_key': 'enable-qos', + 'default': False, + }, + 'enable_nsg_logging': { + 'rel_key': 'enable-nsg-logging', + 'default': False, + }, + 'enable_nfg_logging': { + 'rel_key': 'enable-nfg-logging', + 'default': False, + }, + 'enable_port_forwarding': { + 'rel_key': 'enable-port-forwarding', + 'default': False, + }, + 'global_physnet_mtu': { + 'rel_key': 'global-physnet-mtu', + 'default': 1500, + }, + 'physical_network_mtus': { + 'rel_key': 'physical-network-mtus', + 'default': None, + }, + } + ctxt = self.get_neutron_options({}) + for rid in relation_ids('neutron-plugin-api'): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + # The l2-population key is used by the context as a way of + # checking if the api service on the other end is sending data + # in a recent format. + if 'l2-population' in rdata: + ctxt.update(self.get_neutron_options(rdata)) + + extension_drivers = [] + + if ctxt['enable_qos']: + extension_drivers.append('qos') + + if ctxt['enable_nsg_logging']: + extension_drivers.append('log') + + ctxt['extension_drivers'] = ','.join(extension_drivers) + + l3_extension_plugins = [] + + if ctxt['enable_port_forwarding']: + l3_extension_plugins.append('port_forwarding') + + ctxt['l3_extension_plugins'] = l3_extension_plugins + + return ctxt + + def get_neutron_options(self, rdata): + settings = {} + for nkey in self.neutron_defaults.keys(): + defv = self.neutron_defaults[nkey]['default'] + rkey = self.neutron_defaults[nkey]['rel_key'] + if rkey in rdata.keys(): + if type(defv) is bool: + settings[nkey] = bool_from_string(rdata[rkey]) + else: + settings[nkey] = rdata[rkey] + else: + settings[nkey] = defv + return settings + + +class ExternalPortContext(NeutronPortContext): + + def __call__(self): + ctxt = {} + ports = config('ext-port') + if ports: + ports = [p.strip() for p in ports.split()] + ports = self.resolve_ports(ports) + if ports: + ctxt = {"ext_port": ports[0]} + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + if mtu: + ctxt['ext_port_mtu'] = mtu + + return ctxt + + +class DataPortContext(NeutronPortContext): + + def __call__(self): + ports = config('data-port') + if ports: + # Map of {bridge:port/mac} + portmap = parse_data_port_mappings(ports) + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. + resolved = self.resolve_ports(ports) + # Rebuild port index using resolved and filtered ports. + normalized = {get_nic_hwaddr(port): port for port in resolved + if port not in ports} + normalized.update({port: port for port in resolved + if port in ports}) + if resolved: + return {normalized[port]: bridge for port, bridge in + six.iteritems(portmap) if port in normalized.keys()} + + return None + + +class PhyNICMTUContext(DataPortContext): + + def __call__(self): + ctxt = {} + mappings = super(PhyNICMTUContext, self).__call__() + if mappings and mappings.keys(): + ports = sorted(mappings.keys()) + napi_settings = NeutronAPIContext()() + mtu = napi_settings.get('network_device_mtu') + all_ports = set() + # If any of ports is a vlan device, its underlying device must have + # mtu applied first. + for port in ports: + for lport in glob.glob("/sys/class/net/%s/lower_*" % port): + lport = os.path.basename(lport) + all_ports.add(lport.split('_')[1]) + + all_ports = list(all_ports) + all_ports.extend(ports) + if mtu: + ctxt["devs"] = '\\n'.join(all_ports) + ctxt['mtu'] = mtu + + return ctxt + + +class NetworkServiceContext(OSContextGenerator): + + def __init__(self, rel_name='quantum-network-service'): + self.rel_name = rel_name + self.interfaces = [rel_name] + + def __call__(self): + for rid in relation_ids(self.rel_name): + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + ctxt = { + 'keystone_host': rdata.get('keystone_host'), + 'service_port': rdata.get('service_port'), + 'auth_port': rdata.get('auth_port'), + 'service_tenant': rdata.get('service_tenant'), + 'service_username': rdata.get('service_username'), + 'service_password': rdata.get('service_password'), + 'quantum_host': rdata.get('quantum_host'), + 'quantum_port': rdata.get('quantum_port'), + 'quantum_url': rdata.get('quantum_url'), + 'region': rdata.get('region'), + 'service_protocol': + rdata.get('service_protocol') or 'http', + 'auth_protocol': + rdata.get('auth_protocol') or 'http', + 'api_version': + rdata.get('api_version') or '2.0', + } + if self.context_complete(ctxt): + return ctxt + return {} + + +class InternalEndpointContext(OSContextGenerator): + """Internal endpoint context. + + This context provides the endpoint type used for communication between + services e.g. between Nova and Cinder internally. Openstack uses Public + endpoints by default so this allows admins to optionally use internal + endpoints. + """ + def __call__(self): + return {'use_internal_endpoints': config('use-internal-endpoints')} + + +class VolumeAPIContext(InternalEndpointContext): + """Volume API context. + + This context provides information regarding the volume endpoint to use + when communicating between services. It determines which version of the + API is appropriate for use. + + This value will be determined in the resulting context dictionary + returned from calling the VolumeAPIContext object. Information provided + by this context is as follows: + + volume_api_version: the volume api version to use, currently + 'v2' or 'v3' + volume_catalog_info: the information to use for a cinder client + configuration that consumes API endpoints from the keystone + catalog. This is defined as the type:name:endpoint_type string. + """ + # FIXME(wolsen) This implementation is based on the provider being able + # to specify the package version to check but does not guarantee that the + # volume service api version selected is available. In practice, it is + # quite likely the volume service *is* providing the v3 volume service. + # This should be resolved when the service-discovery spec is implemented. + def __init__(self, pkg): + """ + Creates a new VolumeAPIContext for use in determining which version + of the Volume API should be used for communication. A package codename + should be supplied for determining the currently installed OpenStack + version. + + :param pkg: the package codename to use in order to determine the + component version (e.g. nova-common). See + charmhelpers.contrib.openstack.utils.PACKAGE_CODENAMES for more. + """ + super(VolumeAPIContext, self).__init__() + self._ctxt = None + if not pkg: + raise ValueError('package name must be provided in order to ' + 'determine current OpenStack version.') + self.pkg = pkg + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """Determines the Volume API endpoint information. + + Determines the appropriate version of the API that should be used + as well as the catalog_info string that would be supplied. Returns + a dict containing the volume_api_version and the volume_catalog_info. + """ + rel = os_release(self.pkg) + version = '2' + if CompareOpenStackReleases(rel) >= 'pike': + version = '3' + + service_type = 'volumev{version}'.format(version=version) + service_name = 'cinderv{version}'.format(version=version) + endpoint_type = 'publicURL' + if config('use-internal-endpoints'): + endpoint_type = 'internalURL' + catalog_info = '{type}:{name}:{endpoint}'.format( + type=service_type, name=service_name, endpoint=endpoint_type) + + return { + 'volume_api_version': version, + 'volume_catalog_info': catalog_info, + } + + def __call__(self): + return self.ctxt + + +class AppArmorContext(OSContextGenerator): + """Base class for apparmor contexts.""" + + def __init__(self, profile_name=None): + self._ctxt = None + self.aa_profile = profile_name + self.aa_utils_packages = ['apparmor-utils'] + + @property + def ctxt(self): + if self._ctxt is not None: + return self._ctxt + self._ctxt = self._determine_ctxt() + return self._ctxt + + def _determine_ctxt(self): + """ + Validate aa-profile-mode settings is disable, enforce, or complain. + + :return ctxt: Dictionary of the apparmor profile or None + """ + if config('aa-profile-mode') in ['disable', 'enforce', 'complain']: + ctxt = {'aa_profile_mode': config('aa-profile-mode'), + 'ubuntu_release': lsb_release()['DISTRIB_RELEASE']} + if self.aa_profile: + ctxt['aa_profile'] = self.aa_profile + else: + ctxt = None + return ctxt + + def __call__(self): + return self.ctxt + + def install_aa_utils(self): + """ + Install packages required for apparmor configuration. + """ + log("Installing apparmor utils.") + ensure_packages(self.aa_utils_packages) + + def manually_disable_aa_profile(self): + """ + Manually disable an apparmor profile. + + If aa-profile-mode is set to disabled (default) this is required as the + template has been written but apparmor is yet unaware of the profile + and aa-disable aa-profile fails. Without this the profile would kick + into enforce mode on the next service restart. + + """ + profile_path = '/etc/apparmor.d' + disable_path = '/etc/apparmor.d/disable' + if not os.path.lexists(os.path.join(disable_path, self.aa_profile)): + os.symlink(os.path.join(profile_path, self.aa_profile), + os.path.join(disable_path, self.aa_profile)) + + def setup_aa_profile(self): + """ + Setup an apparmor profile. + The ctxt dictionary will contain the apparmor profile mode and + the apparmor profile name. + Makes calls out to aa-disable, aa-complain, or aa-enforce to setup + the apparmor profile. + """ + self() + if not self.ctxt: + log("Not enabling apparmor Profile") + return + self.install_aa_utils() + cmd = ['aa-{}'.format(self.ctxt['aa_profile_mode'])] + cmd.append(self.ctxt['aa_profile']) + log("Setting up the apparmor profile for {} in {} mode." + "".format(self.ctxt['aa_profile'], self.ctxt['aa_profile_mode'])) + try: + check_call(cmd) + except CalledProcessError as e: + # If aa-profile-mode is set to disabled (default) manual + # disabling is required as the template has been written but + # apparmor is yet unaware of the profile and aa-disable aa-profile + # fails. If aa-disable learns to read profile files first this can + # be removed. + if self.ctxt['aa_profile_mode'] == 'disable': + log("Manually disabling the apparmor profile for {}." + "".format(self.ctxt['aa_profile'])) + self.manually_disable_aa_profile() + return + status_set('blocked', "Apparmor profile {} failed to be set to {}." + "".format(self.ctxt['aa_profile'], + self.ctxt['aa_profile_mode'])) + raise e + + +class MemcacheContext(OSContextGenerator): + """Memcache context + + This context provides options for configuring a local memcache client and + server for both IPv4 and IPv6 + """ + + def __init__(self, package=None): + """ + @param package: Package to examine to extrapolate OpenStack release. + Used when charms have no openstack-origin config + option (ie subordinates) + """ + self.package = package + + def __call__(self): + ctxt = {} + ctxt['use_memcache'] = enable_memcache(package=self.package) + if ctxt['use_memcache']: + # Trusty version of memcached does not support ::1 as a listen + # address so use host file entry instead + release = lsb_release()['DISTRIB_CODENAME'].lower() + if is_ipv6_disabled(): + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '127.0.0.1' + else: + ctxt['memcache_server'] = 'localhost' + ctxt['memcache_server_formatted'] = '127.0.0.1' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = '{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + else: + if CompareHostReleases(release) > 'trusty': + ctxt['memcache_server'] = '::1' + else: + ctxt['memcache_server'] = 'ip6-localhost' + ctxt['memcache_server_formatted'] = '[::1]' + ctxt['memcache_port'] = '11211' + ctxt['memcache_url'] = 'inet6:{}:{}'.format( + ctxt['memcache_server_formatted'], + ctxt['memcache_port']) + return ctxt + + +class EnsureDirContext(OSContextGenerator): + ''' + Serves as a generic context to create a directory as a side-effect. + + Useful for software that supports drop-in files (.d) in conjunction + with config option-based templates. Examples include: + * OpenStack oslo.policy drop-in files; + * systemd drop-in config files; + * other software that supports overriding defaults with .d files + + Another use-case is when a subordinate generates a configuration for + primary to render in a separate directory. + + Some software requires a user to create a target directory to be + scanned for drop-in files with a specific format. This is why this + context is needed to do that before rendering a template. + ''' + + def __init__(self, dirname, **kwargs): + '''Used merely to ensure that a given directory exists.''' + self.dirname = dirname + self.kwargs = kwargs + + def __call__(self): + mkdir(self.dirname, **self.kwargs) + return {} + + +class VersionsContext(OSContextGenerator): + """Context to return the openstack and operating system versions. + + """ + def __init__(self, pkg='python-keystone'): + """Initialise context. + + :param pkg: Package to extrapolate openstack version from. + :type pkg: str + """ + self.pkg = pkg + + def __call__(self): + ostack = os_release(self.pkg) + osystem = lsb_release()['DISTRIB_CODENAME'].lower() + return { + 'openstack_release': ostack, + 'operating_system_release': osystem} + + +class LogrotateContext(OSContextGenerator): + """Common context generator for logrotate.""" + + def __init__(self, location, interval, count): + """ + :param location: Absolute path for the logrotate config file + :type location: str + :param interval: The interval for the rotations. Valid values are + 'daily', 'weekly', 'monthly', 'yearly' + :type interval: str + :param count: The logrotate count option configures the 'count' times + the log files are being rotated before being + :type count: int + """ + self.location = location + self.interval = interval + self.count = 'rotate {}'.format(count) + + def __call__(self): + ctxt = { + 'logrotate_logs_location': self.location, + 'logrotate_interval': self.interval, + 'logrotate_count': self.count, + } + return ctxt + + +class HostInfoContext(OSContextGenerator): + """Context to provide host information.""" + + def __init__(self, use_fqdn_hint_cb=None): + """Initialize HostInfoContext + + :param use_fqdn_hint_cb: Callback whose return value used to populate + `use_fqdn_hint` + :type use_fqdn_hint_cb: Callable[[], bool] + """ + # Store callback used to get hint for whether FQDN should be used + + # Depending on the workload a charm manages, the use of FQDN vs. + # shortname may be a deploy-time decision, i.e. behaviour can not + # change on charm upgrade or post-deployment configuration change. + + # The hint is passed on as a flag in the context to allow the decision + # to be made in the Jinja2 configuration template. + self.use_fqdn_hint_cb = use_fqdn_hint_cb + + def _get_canonical_name(self, name=None): + """Get the official FQDN of the host + + The implementation of ``socket.getfqdn()`` in the standard Python + library does not exhaust all methods of getting the official name + of a host ref Python issue https://bugs.python.org/issue5004 + + This function mimics the behaviour of a call to ``hostname -f`` to + get the official FQDN but returns an empty string if it is + unsuccessful. + + :param name: Shortname to get FQDN on + :type name: Optional[str] + :returns: The official FQDN for host or empty string ('') + :rtype: str + """ + name = name or socket.gethostname() + fqdn = '' + + if six.PY2: + exc = socket.error + else: + exc = OSError + + try: + addrs = socket.getaddrinfo( + name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) + except exc: + pass + else: + for addr in addrs: + if addr[3]: + if '.' in addr[3]: + fqdn = addr[3] + break + return fqdn + + def __call__(self): + name = socket.gethostname() + ctxt = { + 'host_fqdn': self._get_canonical_name(name) or name, + 'host': name, + 'use_fqdn_hint': ( + self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) + } + return ctxt + + +def validate_ovs_use_veth(*args, **kwargs): + """Validate OVS use veth setting for dhcp agents + + The ovs_use_veth setting is considered immutable as it will break existing + deployments. Historically, we set ovs_use_veth=True in dhcp_agent.ini. It + turns out this is no longer necessary. Ideally, all new deployments would + have this set to False. + + This function validates that the config value does not conflict with + previously deployed settings in dhcp_agent.ini. + + See LP Bug#1831935 for details. + + :returns: Status state and message + :rtype: Union[(None, None), (string, string)] + """ + existing_ovs_use_veth = ( + DHCPAgentContext.get_existing_ovs_use_veth()) + config_ovs_use_veth = DHCPAgentContext.parse_ovs_use_veth() + + # Check settings are set and not None + if existing_ovs_use_veth is not None and config_ovs_use_veth is not None: + # Check for mismatch between existing config ini and juju config + if existing_ovs_use_veth != config_ovs_use_veth: + # Stop the line to avoid breakage + msg = ( + "The existing setting for dhcp_agent.ini ovs_use_veth, {}, " + "does not match the juju config setting, {}. This may lead to " + "VMs being unable to receive a DHCP IP. Either change the " + "juju config setting or dhcp agents may need to be recreated." + .format(existing_ovs_use_veth, config_ovs_use_veth)) + log(msg, ERROR) + return ( + "blocked", + "Mismatched existing and configured ovs-use-veth. See log.") + + # Everything is OK + return None, None + + +class DHCPAgentContext(OSContextGenerator): + + def __call__(self): + """Return the DHCPAGentContext. + + Return all DHCP Agent INI related configuration. + ovs unit is attached to (as a subordinate) and the 'dns_domain' from + the neutron-plugin-api relations (if one is set). + + :returns: Dictionary context + :rtype: Dict + """ + + ctxt = {} + dnsmasq_flags = config('dnsmasq-flags') + if dnsmasq_flags: + ctxt['dnsmasq_flags'] = config_flags_parser(dnsmasq_flags) + ctxt['dns_servers'] = config('dns-servers') + + neutron_api_settings = NeutronAPIContext()() + + ctxt['debug'] = config('debug') + ctxt['instance_mtu'] = config('instance-mtu') + ctxt['ovs_use_veth'] = self.get_ovs_use_veth() + + ctxt['enable_metadata_network'] = config('enable-metadata-network') + ctxt['enable_isolated_metadata'] = config('enable-isolated-metadata') + + if neutron_api_settings.get('dns_domain'): + ctxt['dns_domain'] = neutron_api_settings.get('dns_domain') + + # Override user supplied config for these plugins as these settings are + # mandatory + if config('plugin') in ['nvp', 'nsx', 'n1kv']: + ctxt['enable_metadata_network'] = True + ctxt['enable_isolated_metadata'] = True + + return ctxt + + @staticmethod + def get_existing_ovs_use_veth(): + """Return existing ovs_use_veth setting from dhcp_agent.ini. + + :returns: Boolean value of existing ovs_use_veth setting or None + :rtype: Optional[Bool] + """ + DHCP_AGENT_INI = "/etc/neutron/dhcp_agent.ini" + existing_ovs_use_veth = None + # If there is a dhcp_agent.ini file read the current setting + if os.path.isfile(DHCP_AGENT_INI): + # config_ini does the right thing and returns None if the setting is + # commented. + existing_ovs_use_veth = ( + config_ini(DHCP_AGENT_INI)["DEFAULT"].get("ovs_use_veth")) + # Convert to Bool if necessary + if isinstance(existing_ovs_use_veth, six.string_types): + return bool_from_string(existing_ovs_use_veth) + return existing_ovs_use_veth + + @staticmethod + def parse_ovs_use_veth(): + """Parse the ovs-use-veth config setting. + + Parse the string config setting for ovs-use-veth and return a boolean + or None. + + bool_from_string will raise a ValueError if the string is not falsy or + truthy. + + :raises: ValueError for invalid input + :returns: Boolean value of ovs-use-veth or None + :rtype: Optional[Bool] + """ + _config = config("ovs-use-veth") + # An unset parameter returns None. Just in case we will also check for + # an empty string: "". Ironically, (the problem we are trying to avoid) + # "False" returns True and "" returns False. + if _config is None or not _config: + # Return None + return + # bool_from_string handles many variations of true and false strings + # as well as upper and lowercases including: + # ['y', 'yes', 'true', 't', 'on', 'n', 'no', 'false', 'f', 'off'] + return bool_from_string(_config) + + def get_ovs_use_veth(self): + """Return correct ovs_use_veth setting for use in dhcp_agent.ini. + + Get the right value from config or existing dhcp_agent.ini file. + Existing has precedence. Attempt to default to "False" without + disrupting existing deployments. Handle existing deployments and + upgrades safely. See LP Bug#1831935 + + :returns: Value to use for ovs_use_veth setting + :rtype: Bool + """ + _existing = self.get_existing_ovs_use_veth() + if _existing is not None: + return _existing + + _config = self.parse_ovs_use_veth() + if _config is None: + # New better default + return False + else: + return _config + + +EntityMac = collections.namedtuple('EntityMac', ['entity', 'mac']) + + +def resolve_pci_from_mapping_config(config_key): + """Resolve local PCI devices from MAC addresses in mapping config. + + Note that this function keeps record of mac->PCI address lookups + in the local unit db as the devices will disappaear from the system + once bound. + + :param config_key: Configuration option key to parse data from + :type config_key: str + :returns: PCI device address to Tuple(entity, mac) map + :rtype: collections.OrderedDict[str,Tuple[str,str]] + """ + devices = pci.PCINetDevices() + resolved_devices = collections.OrderedDict() + db = kv() + # Note that ``parse_data_port_mappings`` returns Dict regardless of input + for mac, entity in parse_data_port_mappings(config(config_key)).items(): + pcidev = devices.get_device_from_mac(mac) + if pcidev: + # NOTE: store mac->pci allocation as post binding + # it disappears from PCIDevices. + db.set(mac, pcidev.pci_address) + db.flush() + + pci_address = db.get(mac) + if pci_address: + resolved_devices[pci_address] = EntityMac(entity, mac) + + return resolved_devices + + +class DPDKDeviceContext(OSContextGenerator): + + def __init__(self, driver_key=None, bridges_key=None, bonds_key=None): + """Initialize DPDKDeviceContext. + + :param driver_key: Key to use when retrieving driver config. + :type driver_key: str + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.driver_key = driver_key or 'dpdk-driver' + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[str,collections.OrderedDict[str,str]]] + """ + driver = config(self.driver_key) + if driver is None: + return {} + # Resolve PCI devices for both directly used devices (_bridges) + # and devices for use in dpdk bonds (_bonds) + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return {'devices': pci_devices, + 'driver': driver} + + +class OVSDPDKDeviceContext(OSContextGenerator): + + def __init__(self, bridges_key=None, bonds_key=None): + """Initialize OVSDPDKDeviceContext. + + :param bridges_key: Key to use when retrieving bridge config. + :type bridges_key: str + :param bonds_key: Key to use when retrieving bonds config. + :type bonds_key: str + """ + self.bridges_key = bridges_key or 'data-port' + self.bonds_key = bonds_key or 'dpdk-bond-mappings' + + @staticmethod + def _parse_cpu_list(cpulist): + """Parses a linux cpulist for a numa node + + :returns: list of cores + :rtype: List[int] + """ + cores = [] + ranges = cpulist.split(',') + for cpu_range in ranges: + if "-" in cpu_range: + cpu_min_max = cpu_range.split('-') + cores += range(int(cpu_min_max[0]), + int(cpu_min_max[1]) + 1) + else: + cores.append(int(cpu_range)) + return cores + + def _numa_node_cores(self): + """Get map of numa node -> cpu core + + :returns: map of numa node -> cpu core + :rtype: Dict[str,List[int]] + """ + nodes = {} + node_regex = '/sys/devices/system/node/node*' + for node in glob.glob(node_regex): + index = node.lstrip('/sys/devices/system/node/node') + with open(os.path.join(node, 'cpulist')) as cpulist: + nodes[index] = self._parse_cpu_list(cpulist.read().strip()) + return nodes + + def cpu_mask(self): + """Get hex formatted CPU mask + + The mask is based on using the first config:dpdk-socket-cores + cores of each NUMA node in the unit. + :returns: hex formatted CPU mask + :rtype: str + """ + num_cores = config('dpdk-socket-cores') + mask = 0 + for cores in self._numa_node_cores().values(): + for core in cores[:num_cores]: + mask = mask | 1 << core + return format(mask, '#04x') + + def socket_memory(self): + """Formatted list of socket memory configuration per NUMA node + + :returns: socket memory configuration per NUMA node + :rtype: str + """ + sm_size = config('dpdk-socket-memory') + node_regex = '/sys/devices/system/node/node*' + mem_list = [str(sm_size) for _ in glob.glob(node_regex)] + if mem_list: + return ','.join(mem_list) + else: + return str(sm_size) + + def devices(self): + """List of PCI devices for use by DPDK + + :returns: List of PCI devices for use by DPDK + :rtype: collections.OrderedDict[str,str] + """ + pci_devices = resolve_pci_from_mapping_config(self.bridges_key) + pci_devices.update(resolve_pci_from_mapping_config(self.bonds_key)) + return pci_devices + + def _formatted_whitelist(self, flag): + """Flag formatted list of devices to whitelist + + :param flag: flag format to use + :type flag: str + :rtype: str + """ + whitelist = [] + for device in self.devices(): + whitelist.append(flag.format(device=device)) + return ' '.join(whitelist) + + def device_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the old style '-w' flag + + :returns: devices to whitelist prefixed by '-w ' + :rtype: str + """ + return self._formatted_whitelist('-w {device}') + + def pci_whitelist(self): + """Formatted list of devices to whitelist for dpdk + + using the new style '--pci-whitelist' flag + + :returns: devices to whitelist prefixed by '--pci-whitelist ' + :rtype: str + """ + return self._formatted_whitelist('--pci-whitelist {device}') + + def __call__(self): + """Populate context. + + :returns: context + :rtype: Dict[str,Union[bool,str]] + """ + ctxt = {} + whitelist = self.device_whitelist() + if whitelist: + ctxt['dpdk_enabled'] = config('enable-dpdk') + ctxt['device_whitelist'] = self.device_whitelist() + ctxt['socket_memory'] = self.socket_memory() + ctxt['cpu_mask'] = self.cpu_mask() + return ctxt + + +class BridgePortInterfaceMap(object): + """Build a map of bridge ports and interaces from charm configuration. + + NOTE: the handling of this detail in the charm is pre-deprecated. + + The long term goal is for network connectivity detail to be modelled in + the server provisioning layer (such as MAAS) which in turn will provide + a Netplan YAML description that will be used to drive Open vSwitch. + + Until we get to that reality the charm will need to configure this + detail based on application level configuration options. + + There is a established way of mapping interfaces to ports and bridges + in the ``neutron-openvswitch`` and ``neutron-gateway`` charms and we + will carry that forward. + + The relationship between bridge, port and interface(s). + +--------+ + | bridge | + +--------+ + | + +----------------+ + | port aka. bond | + +----------------+ + | | + +-+ +-+ + |i| |i| + |n| |n| + |t| |t| + |0| |N| + +-+ +-+ + """ + class interface_type(enum.Enum): + """Supported interface types. + + Supported interface types can be found in the ``iface_types`` column + in the ``Open_vSwitch`` table on a running system. + """ + dpdk = 'dpdk' + internal = 'internal' + system = 'system' + + def __str__(self): + """Return string representation of value. + + :returns: string representation of value. + :rtype: str + """ + return self.value + + def __init__(self, bridges_key=None, bonds_key=None, enable_dpdk_key=None, + global_mtu=None): + """Initialize map. + + :param bridges_key: Name of bridge:interface/port map config key + (default: 'data-port') + :type bridges_key: Optional[str] + :param bonds_key: Name of port-name:interface map config key + (default: 'dpdk-bond-mappings') + :type bonds_key: Optional[str] + :param enable_dpdk_key: Name of DPDK toggle config key + (default: 'enable-dpdk') + :type enable_dpdk_key: Optional[str] + :param global_mtu: Set a MTU on all interfaces at map initialization. + + The default is to have Open vSwitch get this from the underlying + interface as set up by bare metal provisioning. + + Note that you can augment the MTU on an individual interface basis + like this: + + ifdatamap = bpi.get_ifdatamap(bridge, port) + ifdatamap = { + port: { + **ifdata, + **{'mtu-request': my_individual_mtu_map[port]}, + } + for port, ifdata in ifdatamap.items() + } + :type global_mtu: Optional[int] + """ + bridges_key = bridges_key or 'data-port' + bonds_key = bonds_key or 'dpdk-bond-mappings' + enable_dpdk_key = enable_dpdk_key or 'enable-dpdk' + self._map = collections.defaultdict( + lambda: collections.defaultdict(dict)) + self._ifname_mac_map = collections.defaultdict(list) + self._mac_ifname_map = {} + self._mac_pci_address_map = {} + + # First we iterate over the list of physical interfaces visible to the + # system and update interface name to mac and mac to interface name map + for ifname in list_nics(): + if not is_phy_iface(ifname): + continue + mac = get_nic_hwaddr(ifname) + self._ifname_mac_map[ifname] = [mac] + self._mac_ifname_map[mac] = ifname + + # check if interface is part of a linux bond + _bond_name = get_bond_master(ifname) + if _bond_name and _bond_name != ifname: + log('Add linux bond "{}" to map for physical interface "{}" ' + 'with mac "{}".'.format(_bond_name, ifname, mac), + level=DEBUG) + # for bonds we want to be able to get a list of the mac + # addresses for the physical interfaces the bond is made up of. + if self._ifname_mac_map.get(_bond_name): + self._ifname_mac_map[_bond_name].append(mac) + else: + self._ifname_mac_map[_bond_name] = [mac] + + # In light of the pre-deprecation notice in the docstring of this + # class we will expose the ability to configure OVS bonds as a + # DPDK-only feature, but generally use the data structures internally. + if config(enable_dpdk_key): + # resolve PCI address of interfaces listed in the bridges and bonds + # charm configuration options. Note that for already bound + # interfaces the helper will retrieve MAC address from the unit + # KV store as the information is no longer available in sysfs. + _pci_bridge_mac = resolve_pci_from_mapping_config( + bridges_key) + _pci_bond_mac = resolve_pci_from_mapping_config( + bonds_key) + + for pci_address, bridge_mac in _pci_bridge_mac.items(): + if bridge_mac.mac in self._mac_ifname_map: + # if we already have the interface name in our map it is + # visible to the system and therefore not bound to DPDK + continue + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[ifname] = [bridge_mac.mac] + self._mac_ifname_map[bridge_mac.mac] = ifname + self._mac_pci_address_map[bridge_mac.mac] = pci_address + + for pci_address, bond_mac in _pci_bond_mac.items(): + # for bonds we want to be able to get a list of macs from + # the bond name and also get at the interface name made up + # of the hash of the PCI address + ifname = 'dpdk-{}'.format( + hashlib.sha1( + pci_address.encode('UTF-8')).hexdigest()[:7]) + self._ifname_mac_map[bond_mac.entity].append(bond_mac.mac) + self._mac_ifname_map[bond_mac.mac] = ifname + self._mac_pci_address_map[bond_mac.mac] = pci_address + + config_bridges = config(bridges_key) or '' + for bridge, ifname_or_mac in ( + pair.split(':', 1) + for pair in config_bridges.split()): + if ':' in ifname_or_mac: + try: + ifname = self.ifname_from_mac(ifname_or_mac) + except KeyError: + # The interface is destined for a different unit in the + # deployment. + continue + macs = [ifname_or_mac] + else: + ifname = ifname_or_mac + macs = self.macs_from_ifname(ifname_or_mac) + + portname = ifname + for mac in macs: + try: + pci_address = self.pci_address_from_mac(mac) + iftype = self.interface_type.dpdk + ifname = self.ifname_from_mac(mac) + except KeyError: + pci_address = None + iftype = self.interface_type.system + + self.add_interface( + bridge, portname, ifname, iftype, pci_address, global_mtu) + + if not macs: + # We have not mapped the interface and it is probably some sort + # of virtual interface. Our user have put it in the config with + # a purpose so let's carry out their wish. LP: #1884743 + log('Add unmapped interface from config: name "{}" bridge "{}"' + .format(ifname, bridge), + level=DEBUG) + self.add_interface( + bridge, ifname, ifname, self.interface_type.system, None, + global_mtu) + + def __getitem__(self, key): + """Provide a Dict-like interface, get value of item. + + :param key: Key to look up value from. + :type key: any + :returns: Value + :rtype: any + """ + return self._map.__getitem__(key) + + def __iter__(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.__iter__() + + def __len__(self): + """Provide a Dict-like interface, measure the length of internal map. + + :returns: Length + :rtype: int + """ + return len(self._map) + + def items(self): + """Provide a Dict-like interface, iterate over items. + + :returns: Key Value pairs + :rtype: Iterator[any, any] + """ + return self._map.items() + + def keys(self): + """Provide a Dict-like interface, iterate over keys. + + :returns: Iterator + :rtype: Iterator[any] + """ + return self._map.keys() + + def ifname_from_mac(self, mac): + """ + :returns: Name of interface + :rtype: str + :raises: KeyError + """ + return (get_bond_master(self._mac_ifname_map[mac]) or + self._mac_ifname_map[mac]) + + def macs_from_ifname(self, ifname): + """ + :returns: List of hardware address (MAC) of interface + :rtype: List[str] + :raises: KeyError + """ + return self._ifname_mac_map[ifname] + + def pci_address_from_mac(self, mac): + """ + :param mac: Hardware address (MAC) of interface + :type mac: str + :returns: PCI address of device associated with mac + :rtype: str + :raises: KeyError + """ + return self._mac_pci_address_map[mac] + + def add_interface(self, bridge, port, ifname, iftype, + pci_address, mtu_request): + """Add an interface to the map. + + :param bridge: Name of bridge on which the bond will be added + :type bridge: str + :param port: Name of port which will represent the bond on bridge + :type port: str + :param ifname: Name of interface that will make up the bonded port + :type ifname: str + :param iftype: Type of interface + :type iftype: BridgeBondMap.interface_type + :param pci_address: PCI address of interface + :type pci_address: Optional[str] + :param mtu_request: MTU to request for interface + :type mtu_request: Optional[int] + """ + self._map[bridge][port][ifname] = { + 'type': str(iftype), + } + if pci_address: + self._map[bridge][port][ifname].update({ + 'pci-address': pci_address, + }) + if mtu_request is not None: + self._map[bridge][port][ifname].update({ + 'mtu-request': str(mtu_request) + }) + + def get_ifdatamap(self, bridge, port): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bridge: Name of bridge on which the port will be added + :type bridge: str + :param port: Name of port which will represent one or more interfaces + :type port: str + """ + for _bridge, _ports in self.items(): + for _port, _interfaces in _ports.items(): + if _bridge == bridge and _port == port: + ifdatamap = {} + for name, data in _interfaces.items(): + ifdatamap.update({ + name: { + 'type': data['type'], + }, + }) + if data.get('mtu-request') is not None: + ifdatamap[name].update({ + 'mtu_request': data['mtu-request'], + }) + if data.get('pci-address'): + ifdatamap[name].update({ + 'options': { + 'dpdk-devargs': data['pci-address'], + }, + }) + return ifdatamap + + +class BondConfig(object): + """Container and helpers for bond configuration options. + + Data is put into a dictionary and a convenient config get interface is + provided. + """ + + DEFAULT_LACP_CONFIG = { + 'mode': 'balance-tcp', + 'lacp': 'active', + 'lacp-time': 'fast' + } + ALL_BONDS = 'ALL_BONDS' + + BOND_MODES = ['active-backup', 'balance-slb', 'balance-tcp'] + BOND_LACP = ['active', 'passive', 'off'] + BOND_LACP_TIME = ['fast', 'slow'] + + def __init__(self, config_key=None): + """Parse specified configuration option. + + :param config_key: Configuration key to retrieve data from + (default: ``dpdk-bond-config``) + :type config_key: Optional[str] + """ + self.config_key = config_key or 'dpdk-bond-config' + + self.lacp_config = { + self.ALL_BONDS: copy.deepcopy(self.DEFAULT_LACP_CONFIG) + } + + lacp_config = config(self.config_key) + if lacp_config: + lacp_config_map = lacp_config.split() + for entry in lacp_config_map: + bond, entry = entry.partition(':')[0:3:2] + if not bond: + bond = self.ALL_BONDS + + mode, entry = entry.partition(':')[0:3:2] + if not mode: + mode = self.DEFAULT_LACP_CONFIG['mode'] + assert mode in self.BOND_MODES, \ + "Bond mode {} is invalid".format(mode) + + lacp, entry = entry.partition(':')[0:3:2] + if not lacp: + lacp = self.DEFAULT_LACP_CONFIG['lacp'] + assert lacp in self.BOND_LACP, \ + "Bond lacp {} is invalid".format(lacp) + + lacp_time, entry = entry.partition(':')[0:3:2] + if not lacp_time: + lacp_time = self.DEFAULT_LACP_CONFIG['lacp-time'] + assert lacp_time in self.BOND_LACP_TIME, \ + "Bond lacp-time {} is invalid".format(lacp_time) + + self.lacp_config[bond] = { + 'mode': mode, + 'lacp': lacp, + 'lacp-time': lacp_time + } + + def get_bond_config(self, bond): + """Get the LACP configuration for a bond + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Dict[str,str]] + """ + return self.lacp_config.get(bond, self.lacp_config[self.ALL_BONDS]) + + def get_ovs_portdata(self, bond): + """Get structure suitable for charmhelpers.contrib.network.ovs helpers. + + :param bond: the bond name + :return: a dictionary with the configuration of the bond + :rtype: Dict[str,Union[str,Dict[str,str]]] + """ + bond_config = self.get_bond_config(bond) + return { + 'bond_mode': bond_config['mode'], + 'lacp': bond_config['lacp'], + 'other_config': { + 'lacp-time': bond_config['lacp-time'], + }, + } + + +class SRIOVContext(OSContextGenerator): + """Provide context for configuring SR-IOV devices.""" + + class sriov_config_mode(enum.Enum): + """Mode in which SR-IOV is configured. + + The configuration option identified by the ``numvfs_key`` parameter + is overloaded and defines in which mode the charm should interpret + the other SR-IOV-related configuration options. + """ + auto = 'auto' + blanket = 'blanket' + explicit = 'explicit' + + def _determine_numvfs(self, device, sriov_numvfs): + """Determine number of Virtual Functions (VFs) configured for device. + + :param device: Object describing a PCI Network interface card (NIC)/ + :type device: sriov_netplan_shim.pci.PCINetDevice + :param sriov_numvfs: Number of VFs requested for blanket configuration. + :type sriov_numvfs: int + :returns: Number of VFs to configure for device + :rtype: Optional[int] + """ + + def _get_capped_numvfs(requested): + """Get a number of VFs that does not exceed individual card limits. + + Depending and make and model of NIC the number of VFs supported + vary. Requesting more VFs than a card support would be a fatal + error, cap the requested number at the total number of VFs each + individual card supports. + + :param requested: Number of VFs requested + :type requested: int + :returns: Number of VFs allowed + :rtype: int + """ + actual = min(int(requested), int(device.sriov_totalvfs)) + if actual < int(requested): + log('Requested VFs ({}) too high for device {}. Falling back ' + 'to value supprted by device: {}' + .format(requested, device.interface_name, + device.sriov_totalvfs), + level=WARNING) + return actual + + if self._sriov_config_mode == self.sriov_config_mode.auto: + # auto-mode + # + # If device mapping configuration is present, return information + # on cards with mapping. + # + # If no device mapping configuration is present, return information + # for all cards. + # + # The maximum number of VFs supported by card will be used. + if (self._sriov_mapped_devices and + device.interface_name not in self._sriov_mapped_devices): + log('SR-IOV configured in auto mode: No device mapping for {}' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs(device.sriov_totalvfs) + elif self._sriov_config_mode == self.sriov_config_mode.blanket: + # blanket-mode + # + # User has specified a number of VFs that should apply to all + # cards with support for VFs. + return _get_capped_numvfs(sriov_numvfs) + elif self._sriov_config_mode == self.sriov_config_mode.explicit: + # explicit-mode + # + # User has given a list of interface names and associated number of + # VFs + if device.interface_name not in self._sriov_config_devices: + log('SR-IOV configured in explicit mode: No device:numvfs ' + 'pair for device {}, skipping.' + .format(device.interface_name), + level=DEBUG) + return + return _get_capped_numvfs( + self._sriov_config_devices[device.interface_name]) + else: + raise RuntimeError('This should not be reached') + + def __init__(self, numvfs_key=None, device_mappings_key=None): + """Initialize map from PCI devices and configuration options. + + :param numvfs_key: Config key for numvfs (default: 'sriov-numvfs') + :type numvfs_key: Optional[str] + :param device_mappings_key: Config key for device mappings + (default: 'sriov-device-mappings') + :type device_mappings_key: Optional[str] + :raises: RuntimeError + """ + numvfs_key = numvfs_key or 'sriov-numvfs' + device_mappings_key = device_mappings_key or 'sriov-device-mappings' + + devices = pci.PCINetDevices() + charm_config = config() + sriov_numvfs = charm_config.get(numvfs_key) or '' + sriov_device_mappings = charm_config.get(device_mappings_key) or '' + + # create list of devices from sriov_device_mappings config option + self._sriov_mapped_devices = [ + pair.split(':', 1)[1] + for pair in sriov_device_mappings.split() + ] + + # create map of device:numvfs from sriov_numvfs config option + self._sriov_config_devices = { + ifname: numvfs for ifname, numvfs in ( + pair.split(':', 1) for pair in sriov_numvfs.split() + if ':' in sriov_numvfs) + } + + # determine configuration mode from contents of sriov_numvfs + if sriov_numvfs == 'auto': + self._sriov_config_mode = self.sriov_config_mode.auto + elif sriov_numvfs.isdigit(): + self._sriov_config_mode = self.sriov_config_mode.blanket + elif ':' in sriov_numvfs: + self._sriov_config_mode = self.sriov_config_mode.explicit + else: + raise RuntimeError('Unable to determine mode of SR-IOV ' + 'configuration.') + + self._map = { + device.interface_name: self._determine_numvfs(device, sriov_numvfs) + for device in devices.pci_devices + if device.sriov and + self._determine_numvfs(device, sriov_numvfs) is not None + } + + def __call__(self): + """Provide SR-IOV context. + + :returns: Map interface name: min(configured, max) virtual functions. + Example: + { + 'eth0': 16, + 'eth1': 32, + 'eth2': 64, + } + :rtype: Dict[str,int] + """ + return self._map diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/exceptions.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/exceptions.py new file mode 100644 index 00000000..f85ae4f4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/exceptions.py @@ -0,0 +1,21 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class OSContextError(Exception): + """Raised when an error occurs during context generation. + + This exception is principally used in contrib.openstack.context + """ + pass diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/__init__.py new file mode 100644 index 00000000..9df5f746 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/check_haproxy.sh new file mode 100755 index 00000000..1df55db4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/check_haproxy.sh @@ -0,0 +1,34 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +CRITICAL=0 +NOTACTIVE='' +LOGFILE=/var/log/nagios/check_haproxy.log +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') + +typeset -i N_INSTANCES=0 +for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) +do + N_INSTANCES=N_INSTANCES+1 + output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK') + if [ $? != 0 ]; then + date >> $LOGFILE + echo $output >> $LOGFILE + /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1 + CRITICAL=1 + NOTACTIVE="${NOTACTIVE} $appserver" + fi +done + +if [ $CRITICAL = 1 ]; then + echo "CRITICAL:${NOTACTIVE}" + exit 2 +fi + +echo "OK: All haproxy instances ($N_INSTANCES) looking good" +exit 0 diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh new file mode 100755 index 00000000..91ce0246 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh @@ -0,0 +1,30 @@ +#!/bin/bash +#-------------------------------------------- +# This file is managed by Juju +#-------------------------------------------- +# +# Copyright 2009,2012 Canonical Ltd. +# Author: Tom Haddon + +# These should be config options at some stage +CURRQthrsh=0 +MAXQthrsh=100 + +AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}') + +HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) + +for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}') +do + CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3) + MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4) + + if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then + echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ" + exit 2 + fi +done + +echo "OK: All haproxy queue depths looking good" +exit 0 + diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ha/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ha/__init__.py new file mode 100644 index 00000000..9b088de8 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ha/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ha/utils.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ha/utils.py new file mode 100644 index 00000000..a5cbdf53 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ha/utils.py @@ -0,0 +1,348 @@ +# Copyright 2014-2016 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2016 Canonical Ltd. +# +# Authors: +# Openstack Charmers < +# + +""" +Helpers for high availability. +""" + +import hashlib +import json + +import re + +from charmhelpers.core.hookenv import ( + expected_related_units, + log, + relation_set, + charm_name, + config, + status_set, + DEBUG, +) + +from charmhelpers.core.host import ( + lsb_release +) + +from charmhelpers.contrib.openstack.ip import ( + resolve_address, + is_ipv6, +) + +from charmhelpers.contrib.network.ip import ( + get_iface_for_address, + get_netmask_for_address, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + get_hacluster_config +) + +JSON_ENCODE_OPTIONS = dict( + sort_keys=True, + allow_nan=False, + indent=None, + separators=(',', ':'), +) + +VIP_GROUP_NAME = 'grp_{service}_vips' +DNSHA_GROUP_NAME = 'grp_{service}_hostnames' + + +class DNSHAException(Exception): + """Raised when an error occurs setting up DNS HA + """ + + pass + + +def update_dns_ha_resource_params(resources, resource_params, + relation_id=None, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration and + update resource dictionaries for the HA relation. + + @param resources: Pointer to dictionary of resources. + Usually instantiated in ha_joined(). + @param resource_params: Pointer to dictionary of resource parameters. + Usually instantiated in ha_joined() + @param relation_id: Relation ID of the ha relation + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + _relation_data = {'resources': {}, 'resource_params': {}} + update_hacluster_dns_ha(charm_name(), + _relation_data, + crm_ocf) + resources.update(_relation_data['resources']) + resource_params.update(_relation_data['resource_params']) + relation_set(relation_id=relation_id, groups=_relation_data['groups']) + + +def assert_charm_supports_dns_ha(): + """Validate prerequisites for DNS HA + The MAAS client is only available on Xenial or greater + + :raises DNSHAException: if release is < 16.04 + """ + if lsb_release().get('DISTRIB_RELEASE') < '16.04': + msg = ('DNS HA is only supported on 16.04 and greater ' + 'versions of Ubuntu.') + status_set('blocked', msg) + raise DNSHAException(msg) + return True + + +def expect_ha(): + """ Determine if the unit expects to be in HA + + Check juju goal-state if ha relation is expected, check for VIP or dns-ha + settings which indicate the unit should expect to be related to hacluster. + + @returns boolean + """ + ha_related_units = [] + try: + ha_related_units = list(expected_related_units(reltype='ha')) + except (NotImplementedError, KeyError): + pass + return len(ha_related_units) > 0 or config('vip') or config('dns-ha') + + +def generate_ha_relation_data(service, + extra_settings=None, + haproxy_enabled=True): + """ Generate relation data for ha relation + + Based on configuration options and unit interfaces, generate a json + encoded dict of relation data items for the hacluster relation, + providing configuration for DNS HA or VIP's + haproxy clone sets. + + Example of supplying additional settings:: + + COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips' + AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth' + AGENT_CA_PARAMS = 'op monitor interval="5s"' + + ha_console_settings = { + 'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH}, + 'init_services': {'res_nova_consoleauth': 'nova-consoleauth'}, + 'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH}, + 'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS}) + generate_ha_relation_data('nova', extra_settings=ha_console_settings) + + + @param service: Name of the service being configured + @param extra_settings: Dict of additional resource data + @returns dict: json encoded data for use with relation_set + """ + _relation_data = {'resources': {}, 'resource_params': {}} + + if haproxy_enabled: + _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' + _haproxy_res = 'res_{}_haproxy'.format(service) + _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} + _relation_data['resource_params'] = { + _haproxy_res: '{} op monitor interval="5s"'.format(_meta) + } + _relation_data['init_services'] = {_haproxy_res: 'haproxy'} + _relation_data['clones'] = { + 'cl_{}_haproxy'.format(service): _haproxy_res + } + + if extra_settings: + for k, v in extra_settings.items(): + if _relation_data.get(k): + _relation_data[k].update(v) + else: + _relation_data[k] = v + + if config('dns-ha'): + update_hacluster_dns_ha(service, _relation_data) + else: + update_hacluster_vip(service, _relation_data) + + return { + 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS) + for k, v in _relation_data.items() if v + } + + +def update_hacluster_dns_ha(service, relation_data, + crm_ocf='ocf:maas:dns'): + """ Configure DNS-HA resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + @param crm_ocf: Corosync Open Cluster Framework resource agent to use for + DNS HA + """ + # Validate the charm environment for DNS HA + assert_charm_supports_dns_ha() + + settings = ['os-admin-hostname', 'os-internal-hostname', + 'os-public-hostname', 'os-access-hostname'] + + # Check which DNS settings are set and update dictionaries + hostname_group = [] + for setting in settings: + hostname = config(setting) + if hostname is None: + log('DNS HA: Hostname setting {} is None. Ignoring.' + ''.format(setting), + DEBUG) + continue + m = re.search('os-(.+?)-hostname', setting) + if m: + endpoint_type = m.group(1) + # resolve_address's ADDRESS_MAP uses 'int' not 'internal' + if endpoint_type == 'internal': + endpoint_type = 'int' + else: + msg = ('Unexpected DNS hostname setting: {}. ' + 'Cannot determine endpoint_type name' + ''.format(setting)) + status_set('blocked', msg) + raise DNSHAException(msg) + + hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type) + if hostname_key in hostname_group: + log('DNS HA: Resource {}: {} already exists in ' + 'hostname group - skipping'.format(hostname_key, hostname), + DEBUG) + continue + + hostname_group.append(hostname_key) + relation_data['resources'][hostname_key] = crm_ocf + relation_data['resource_params'][hostname_key] = ( + 'params fqdn="{}" ip_address="{}"' + .format(hostname, resolve_address(endpoint_type=endpoint_type, + override=False))) + + if len(hostname_group) >= 1: + log('DNS HA: Hostname group is set with {} as members. ' + 'Informing the ha relation'.format(' '.join(hostname_group)), + DEBUG) + relation_data['groups'] = { + DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group) + } + else: + msg = 'DNS HA: Hostname group has no members.' + status_set('blocked', msg) + raise DNSHAException(msg) + + +def get_vip_settings(vip): + """Calculate which nic is on the correct network for the given vip. + + If nic or netmask discovery fail then fallback to using charm supplied + config. If fallback is used this is indicated via the fallback variable. + + @param vip: VIP to lookup nic and cidr for. + @returns (str, str, bool): eg (iface, netmask, fallback) + """ + iface = get_iface_for_address(vip) + netmask = get_netmask_for_address(vip) + fallback = False + if iface is None: + iface = config('vip_iface') + fallback = True + if netmask is None: + netmask = config('vip_cidr') + fallback = True + return iface, netmask, fallback + + +def update_hacluster_vip(service, relation_data): + """ Configure VIP resources based on provided configuration + + @param service: Name of the service being configured + @param relation_data: Pointer to dictionary of relation data. + """ + cluster_config = get_hacluster_config() + vip_group = [] + vips_to_delete = [] + for vip in cluster_config['vip'].split(): + if is_ipv6(vip): + res_vip = 'ocf:heartbeat:IPv6addr' + vip_params = 'ipv6addr' + else: + res_vip = 'ocf:heartbeat:IPaddr2' + vip_params = 'ip' + + iface, netmask, fallback = get_vip_settings(vip) + + vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"' + if iface is not None: + # NOTE(jamespage): Delete old VIP resources + # Old style naming encoding iface in name + # does not work well in environments where + # interface/subnet wiring is not consistent + vip_key = 'res_{}_{}_vip'.format(service, iface) + if vip_key in vips_to_delete: + vip_key = '{}_{}'.format(vip_key, vip_params) + vips_to_delete.append(vip_key) + + vip_key = 'res_{}_{}_vip'.format( + service, + hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7]) + + relation_data['resources'][vip_key] = res_vip + # NOTE(jamespage): + # Use option provided vip params if these where used + # instead of auto-detected values + if fallback: + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" cidr_netmask="{netmask}" ' + 'nic="{iface}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + iface=iface, + netmask=netmask, + vip_monitoring=vip_monitoring)) + else: + # NOTE(jamespage): + # let heartbeat figure out which interface and + # netmask to configure, which works nicely + # when network interface naming is not + # consistent across units. + relation_data['resource_params'][vip_key] = ( + 'params {ip}="{vip}" {vip_monitoring}'.format( + ip=vip_params, + vip=vip, + vip_monitoring=vip_monitoring)) + + vip_group.append(vip_key) + + if vips_to_delete: + try: + relation_data['delete_resources'].extend(vips_to_delete) + except KeyError: + relation_data['delete_resources'] = vips_to_delete + + if len(vip_group) >= 1: + key = VIP_GROUP_NAME.format(service=service) + try: + relation_data['groups'][key] = ' '.join(vip_group) + except KeyError: + relation_data['groups'] = { + key: ' '.join(vip_group) + } diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ip.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ip.py new file mode 100644 index 00000000..723aebc1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ip.py @@ -0,0 +1,197 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from charmhelpers.core.hookenv import ( + NoNetworkBinding, + config, + unit_get, + service_name, + network_get_primary_address, +) +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, + get_ipv6_addr, + resolve_network_cidr, +) +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' +ACCESS = 'access' + +ADDRESS_MAP = { + PUBLIC: { + 'binding': 'public', + 'config': 'os-public-network', + 'fallback': 'public-address', + 'override': 'os-public-hostname', + }, + INTERNAL: { + 'binding': 'internal', + 'config': 'os-internal-network', + 'fallback': 'private-address', + 'override': 'os-internal-hostname', + }, + ADMIN: { + 'binding': 'admin', + 'config': 'os-admin-network', + 'fallback': 'private-address', + 'override': 'os-admin-hostname', + }, + ACCESS: { + 'binding': 'access', + 'config': 'access-network', + 'fallback': 'private-address', + 'override': 'os-access-hostname', + }, +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + """Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :param endpoint_type: str endpoint type to resolve. + :param returns: str base URL for services on the current service unit. + """ + scheme = _get_scheme(configs) + + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + + return '%s://%s' % (scheme, address) + + +def _get_scheme(configs): + """Returns the scheme to use for the url (either http or https) + depending upon whether https is in the configs value. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :returns: either 'http' or 'https' depending on whether https is + configured within the configs context. + """ + scheme = 'http' + if configs and 'https' in configs.complete_contexts(): + scheme = 'https' + return scheme + + +def _get_address_override(endpoint_type=PUBLIC): + """Returns any address overrides that the user has defined based on the + endpoint type. + + Note: this function allows for the service name to be inserted into the + address if the user specifies {service_name}.somehost.org. + + :param endpoint_type: the type of endpoint to retrieve the override + value for. + :returns: any endpoint address or hostname that the user has overridden + or None if an override is not present. + """ + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if not addr_override: + return None + else: + return addr_override.format(service_name=service_name()) + + +def resolve_address(endpoint_type=PUBLIC, override=True): + """Return unit address depending on net config. + + If unit is clustered with vip(s) and has net splits defined, return vip on + correct network. If clustered with no nets defined, return primary vip. + + If not clustered, return unit address ensuring address is on configured net + split if one is configured, or a Juju 2.0 extra-binding has been used. + + :param endpoint_type: Network endpoing type + :param override: Accept hostname overrides or not + """ + resolved_address = None + if override: + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address + + vips = config('vip') + if vips: + vips = vips.split() + + net_type = ADDRESS_MAP[endpoint_type]['config'] + net_addr = config(net_type) + net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] + binding = ADDRESS_MAP[endpoint_type]['binding'] + clustered = is_clustered() + + if clustered and vips: + if net_addr: + for vip in vips: + if is_address_in_network(net_addr, vip): + resolved_address = vip + break + else: + # NOTE: endeavour to check vips against network space + # bindings + try: + bound_cidr = resolve_network_cidr( + network_get_primary_address(binding) + ) + for vip in vips: + if is_address_in_network(bound_cidr, vip): + resolved_address = vip + break + except (NotImplementedError, NoNetworkBinding): + # If no net-splits configured and no support for extra + # bindings/network spaces so we expect a single vip + resolved_address = vips[0] + else: + if config('prefer-ipv6'): + fallback_addr = get_ipv6_addr(exc_list=vips)[0] + else: + fallback_addr = unit_get(net_fallback) + + if net_addr: + resolved_address = get_address_in_network(net_addr, fallback_addr) + else: + # NOTE: only try to use extra bindings if legacy network + # configuration is not in use + try: + resolved_address = network_get_primary_address(binding) + except (NotImplementedError, NoNetworkBinding): + resolved_address = fallback_addr + + if resolved_address is None: + raise ValueError("Unable to resolve a suitable IP address based on " + "charm state and configuration. (net_type=%s, " + "clustered=%s)" % (net_type, clustered)) + + return resolved_address + + +def get_vip_in_network(network): + matching_vip = None + vips = config('vip') + if vips: + for vip in vips.split(): + if is_address_in_network(network, vip): + matching_vip = vip + return matching_vip diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/keystone.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/keystone.py new file mode 100644 index 00000000..d7e02ccd --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/keystone.py @@ -0,0 +1,178 @@ +#!/usr/bin/python +# +# Copyright 2017 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +from charmhelpers.fetch import apt_install +from charmhelpers.contrib.openstack.context import IdentityServiceContext +from charmhelpers.core.hookenv import ( + log, + ERROR, +) + + +def get_api_suffix(api_version): + """Return the formatted api suffix for the given version + @param api_version: version of the keystone endpoint + @returns the api suffix formatted according to the given api + version + """ + return 'v2.0' if api_version in (2, "2", "2.0") else 'v3' + + +def format_endpoint(schema, addr, port, api_version): + """Return a formatted keystone endpoint + @param schema: http or https + @param addr: ipv4/ipv6 host of the keystone service + @param port: port of the keystone service + @param api_version: 2 or 3 + @returns a fully formatted keystone endpoint + """ + return '{}://{}:{}/{}/'.format(schema, addr, port, + get_api_suffix(api_version)) + + +def get_keystone_manager(endpoint, api_version, **kwargs): + """Return a keystonemanager for the correct API version + + @param endpoint: the keystone endpoint to point client at + @param api_version: version of the keystone api the client should use + @param kwargs: token or username/tenant/password information + @returns keystonemanager class used for interrogating keystone + """ + if api_version == 2: + return KeystoneManager2(endpoint, **kwargs) + if api_version == 3: + return KeystoneManager3(endpoint, **kwargs) + raise ValueError('No manager found for api version {}'.format(api_version)) + + +def get_keystone_manager_from_identity_service_context(): + """Return a keystonmanager generated from a + instance of charmhelpers.contrib.openstack.context.IdentityServiceContext + @returns keystonamenager instance + """ + context = IdentityServiceContext()() + if not context: + msg = "Identity service context cannot be generated" + log(msg, level=ERROR) + raise ValueError(msg) + + endpoint = format_endpoint(context['service_protocol'], + context['service_host'], + context['service_port'], + context['api_version']) + + if context['api_version'] in (2, "2.0"): + api_version = 2 + else: + api_version = 3 + + return get_keystone_manager(endpoint, api_version, + username=context['admin_user'], + password=context['admin_password'], + tenant_name=context['admin_tenant_name']) + + +class KeystoneManager(object): + + def resolve_service_id(self, service_name=None, service_type=None): + """Find the service_id of a given service""" + services = [s._info for s in self.api.services.list()] + + service_name = service_name.lower() + for s in services: + name = s['name'].lower() + if service_type and service_name: + if (service_name == name and service_type == s['type']): + return s['id'] + elif service_name and service_name == name: + return s['id'] + elif service_type and service_type == s['type']: + return s['id'] + return None + + def service_exists(self, service_name=None, service_type=None): + """Determine if the given service exists on the service list""" + return self.resolve_service_id(service_name, service_type) is not None + + +class KeystoneManager2(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v2_0 import client + from keystoneclient.auth.identity import v2 + from keystoneclient import session + + self.api_version = 2 + + token = kwargs.get("token", None) + if token: + api = client.Client(endpoint=endpoint, token=token) + else: + auth = v2.Password(username=kwargs.get("username"), + password=kwargs.get("password"), + tenant_name=kwargs.get("tenant_name"), + auth_url=endpoint) + sess = session.Session(auth=auth) + api = client.Client(session=sess) + + self.api = api + + +class KeystoneManager3(KeystoneManager): + + def __init__(self, endpoint, **kwargs): + try: + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + except ImportError: + if six.PY2: + apt_install(["python-keystoneclient"], fatal=True) + else: + apt_install(["python3-keystoneclient"], fatal=True) + + from keystoneclient.v3 import client + from keystoneclient.auth import token_endpoint + from keystoneclient import session + from keystoneclient.auth.identity import v3 + + self.api_version = 3 + + token = kwargs.get("token", None) + if token: + auth = token_endpoint.Token(endpoint=endpoint, + token=token) + sess = session.Session(auth=auth) + else: + auth = v3.Password(auth_url=endpoint, + user_id=kwargs.get("username"), + password=kwargs.get("password"), + project_id=kwargs.get("tenant_name")) + sess = session.Session(auth=auth) + + self.api = client.Client(session=sess) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/neutron.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/neutron.py new file mode 100644 index 00000000..fb5607f3 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/neutron.py @@ -0,0 +1,359 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Various utilies for dealing with Neutron and the renaming from Quantum. + +import six +from subprocess import check_output + +from charmhelpers.core.hookenv import ( + config, + log, + ERROR, +) + +from charmhelpers.contrib.openstack.utils import ( + os_release, + CompareOpenStackReleases, +) + + +def headers_package(): + """Ensures correct linux-headers for running kernel are installed, + for building DKMS package""" + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + return 'linux-headers-%s' % kver + + +QUANTUM_CONF_DIR = '/etc/quantum' + + +def kernel_version(): + """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ + kver = check_output(['uname', '-r']).decode('UTF-8').strip() + kver = kver.split('.') + return (int(kver[0]), int(kver[1])) + + +def determine_dkms_package(): + """ Determine which DKMS package should be used based on kernel version """ + # NOTE: 3.13 kernels have support for GRE and VXLAN native + if kernel_version() >= (3, 13): + return [] + else: + return [headers_package(), 'openvswitch-datapath-dkms'] + + +# legacy + + +def quantum_plugins(): + return { + 'ovs': { + 'config': '/etc/quantum/plugins/openvswitch/' + 'ovs_quantum_plugin.ini', + 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' + 'OVSQuantumPluginV2', + 'contexts': [], + 'services': ['quantum-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['quantum-plugin-openvswitch-agent']], + 'server_packages': ['quantum-server', + 'quantum-plugin-openvswitch'], + 'server_services': ['quantum-server'] + }, + 'nvp': { + 'config': '/etc/quantum/plugins/nicira/nvp.ini', + 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' + 'QuantumPlugin.NvpPluginV2', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['quantum-server', + 'quantum-plugin-nicira'], + 'server_services': ['quantum-server'] + } + } + + +NEUTRON_CONF_DIR = '/etc/neutron' + + +def neutron_plugins(): + release = os_release('nova-common') + plugins = { + 'ovs': { + 'config': '/etc/neutron/plugins/openvswitch/' + 'ovs_neutron_plugin.ini', + 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' + 'OVSNeutronPluginV2', + 'contexts': [], + 'services': ['neutron-plugin-openvswitch-agent'], + 'packages': [determine_dkms_package(), + ['neutron-plugin-openvswitch-agent']], + 'server_packages': ['neutron-server', + 'neutron-plugin-openvswitch'], + 'server_services': ['neutron-server'] + }, + 'nvp': { + 'config': '/etc/neutron/plugins/nicira/nvp.ini', + 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' + 'NeutronPlugin.NvpPluginV2', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-nicira'], + 'server_services': ['neutron-server'] + }, + 'nsx': { + 'config': '/etc/neutron/plugins/vmware/nsx.ini', + 'driver': 'vmware', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', + 'neutron-plugin-vmware'], + 'server_services': ['neutron-server'] + }, + 'n1kv': { + 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', + 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', + 'contexts': [], + 'services': [], + 'packages': [determine_dkms_package(), + ['neutron-plugin-cisco']], + 'server_packages': ['neutron-server', + 'neutron-plugin-cisco'], + 'server_services': ['neutron-server'] + }, + 'Calico': { + 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', + 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', + 'contexts': [], + 'services': ['calico-felix', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd'], + 'packages': [determine_dkms_package(), + ['calico-compute', + 'bird', + 'neutron-dhcp-agent', + 'nova-api-metadata', + 'etcd']], + 'server_packages': ['neutron-server', 'calico-control', 'etcd'], + 'server_services': ['neutron-server', 'etcd'] + }, + 'vsp': { + 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', + 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', + 'contexts': [], + 'services': [], + 'packages': [], + 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], + 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': ('neutron.plugins.plumgrid.plumgrid_plugin' + '.plumgrid_plugin.NeutronPluginPLUMgridV2'), + 'contexts': [], + 'services': [], + 'packages': ['plumgrid-lxc', + 'iovisor-dkms'], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] + }, + 'midonet': { + 'config': '/etc/neutron/plugins/midonet/midonet.ini', + 'driver': 'midonet.neutron.plugin.MidonetPluginV2', + 'contexts': [], + 'services': [], + 'packages': [determine_dkms_package()], + 'server_packages': ['neutron-server', + 'python-neutron-plugin-midonet'], + 'server_services': ['neutron-server'] + } + } + if CompareOpenStackReleases(release) >= 'icehouse': + # NOTE: patch in ml2 plugin for icehouse onwards + plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['ovs']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + # NOTE: patch in vmware renames nvp->nsx for icehouse onwards + plugins['nvp'] = plugins['nsx'] + if CompareOpenStackReleases(release) >= 'kilo': + plugins['midonet']['driver'] = ( + 'neutron.plugins.midonet.plugin.MidonetPluginV2') + if CompareOpenStackReleases(release) >= 'liberty': + plugins['midonet']['driver'] = ( + 'midonet.neutron.plugin_v1.MidonetPluginV2') + plugins['midonet']['server_packages'].remove( + 'python-neutron-plugin-midonet') + plugins['midonet']['server_packages'].append( + 'python-networking-midonet') + plugins['plumgrid']['driver'] = ( + 'networking_plumgrid.neutron.plugins' + '.plugin.NeutronPluginPLUMgridV2') + plugins['plumgrid']['server_packages'].remove( + 'neutron-plugin-plumgrid') + if CompareOpenStackReleases(release) >= 'mitaka': + plugins['nsx']['server_packages'].remove('neutron-plugin-vmware') + plugins['nsx']['server_packages'].append('python-vmware-nsx') + plugins['nsx']['config'] = '/etc/neutron/nsx.ini' + plugins['vsp']['driver'] = ( + 'nuage_neutron.plugins.nuage.plugin.NuagePlugin') + if CompareOpenStackReleases(release) >= 'newton': + plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' + plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' + plugins['vsp']['server_packages'] = ['neutron-server', + 'neutron-plugin-ml2'] + return plugins + + +def neutron_plugin_attribute(plugin, attr, net_manager=None): + manager = net_manager or network_manager() + if manager == 'quantum': + plugins = quantum_plugins() + elif manager == 'neutron': + plugins = neutron_plugins() + else: + log("Network manager '%s' does not support plugins." % (manager), + level=ERROR) + raise Exception + + try: + _plugin = plugins[plugin] + except KeyError: + log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) + raise Exception + + try: + return _plugin[attr] + except KeyError: + return None + + +def network_manager(): + ''' + Deals with the renaming of Quantum to Neutron in H and any situations + that require compatability (eg, deploying H with network-manager=quantum, + upgrading from G). + ''' + release = os_release('nova-common') + manager = config('network-manager').lower() + + if manager not in ['quantum', 'neutron']: + return manager + + if release in ['essex']: + # E does not support neutron + log('Neutron networking not supported in Essex.', level=ERROR) + raise Exception + elif release in ['folsom', 'grizzly']: + # neutron is named quantum in F and G + return 'quantum' + else: + # ensure accurate naming for all releases post-H + return 'neutron' + + +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ + parsed = {} + if mappings: + mappings = mappings.split() + for m in mappings: + p = m.partition(':') + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue + else: + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() + + return parsed + + +def parse_bridge_mappings(mappings): + """Parse bridge mappings. + + Mappings must be a space-delimited list of provider:bridge mappings. + + Returns dict of the form {provider:bridge}. + """ + return parse_mappings(mappings) + + +def parse_data_port_mappings(mappings, default_bridge='br-data'): + """Parse data port mappings. + + Mappings must be a space-delimited list of bridge:port. + + Returns dict of the form {port:bridge} where ports may be mac addresses or + interface names. + """ + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for <port> since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) + if not _mappings or list(_mappings.values()) == ['']: + if not mappings: + return {} + + # For backwards-compatibility we need to support port-only provided in + # config. + _mappings = {mappings.split()[0]: default_bridge} + + ports = _mappings.keys() + if len(set(ports)) != len(ports): + raise Exception("It is not allowed to have the same port configured " + "on more than one bridge") + + return _mappings + + +def parse_vlan_range_mappings(mappings): + """Parse vlan range mappings. + + Mappings must be a space-delimited list of provider:start:end mappings. + + The start:end range is optional and may be omitted. + + Returns dict of the form {provider: (start, end)}. + """ + _mappings = parse_mappings(mappings) + if not _mappings: + return {} + + mappings = {} + for p, r in six.iteritems(_mappings): + mappings[p] = tuple(r.split(':')) + + return mappings diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/policyd.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/policyd.py new file mode 100644 index 00000000..f2bb21e9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/policyd.py @@ -0,0 +1,801 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import contextlib +import os +import six +import shutil +import yaml +import zipfile + +import charmhelpers +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as ch_host + +# Features provided by this module: + +""" +Policy.d helper functions +========================= + +The functions in this module are designed, as a set, to provide an easy-to-use +set of hooks for classic charms to add in /etc/<service-name>/policy.d/ +directory override YAML files. + +(For charms.openstack charms, a mixin class is provided for this +functionality). + +In order to "hook" this functionality into a (classic) charm, two functions are +provided: + + maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=none, + blacklist_keys=none, + template_function=none, + restart_handler=none) + + maybe_do_policyd_overrides_on_config_changed(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None + +(See the docstrings for details on the parameters) + +The functions should be called from the install and upgrade hooks in the charm. +The `maybe_do_policyd_overrides_on_config_changed` function is designed to be +called on the config-changed hook, in that it does an additional check to +ensure that an already overriden policy.d in an upgrade or install hooks isn't +repeated. + +In order the *enable* this functionality, the charm's install, config_changed, +and upgrade_charm hooks need to be modified, and a new config option (see +below) needs to be added. The README for the charm should also be updated. + +Examples from the keystone charm are: + +@hooks.hook('install.real') +@harden() +def install(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + + +@hooks.hook('config-changed') +@restart_on_change(restart_map(), restart_functions=restart_function_map()) +@harden() +def config_changed(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides_on_config_changed(os_release('keystone'), + 'keystone') + +@hooks.hook('upgrade-charm') +@restart_on_change(restart_map(), stopstart=True) +@harden() +def upgrade_charm(): + ... + # call the policy overrides handler which will install any policy overrides + maybe_do_policyd_overrides(os_release('keystone'), 'keystone') + +Status Line +=========== + +The workload status code in charm-helpers has been modified to detect if +policy.d override code has been incorporated into the charm by checking for the +new config variable (in the config.yaml). If it has been, then the workload +status line will automatically show "PO:" at the beginning of the workload +status for that unit/service if the config option is set. If the policy +override is broken, the "PO (broken):" will be shown. No changes to the charm +(apart from those already mentioned) are needed to enable this functionality. +(charms.openstack charms also get this functionality, but please see that +library for further details). +""" + +# The config.yaml for the charm should contain the following for the config +# option: + +""" + use-policyd-override: + type: boolean + default: False + description: | + If True then use the resource file named 'policyd-override' to install + override YAML files in the service's policy.d directory. The resource + file should be a ZIP file containing at least one yaml file with a .yaml + or .yml extension. If False then remove the overrides. +""" + +# The metadata.yaml for the charm should contain the following: +""" +resources: + policyd-override: + type: file + filename: policyd-override.zip + description: The policy.d overrides file +""" + +# The README for the charm should contain the following: +""" +Policy Overrides +---------------- + +This feature allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the OpenStack service supports +should be clearly and unambiguously understood before trying to override, or +add to, the default policies that the service uses. The charm also has some +policy defaults. They should also be understood before being overridden. + +> **Caution**: It is possible to break the system (for tenants and other + services) if policies are incorrectly applied to the service. + +Policy overrides are YAML files that contain rules that will add to, or +override, existing policy rules in the service. The `policy.d` directory is +a place to put the YAML override files. This charm owns the +`/etc/keystone/policy.d` directory, and as such, any manual changes to it will +be overwritten on charm upgrades. + +Overrides are provided to the charm using a Juju resource called +`policyd-override`. The resource is a ZIP file. This file, say +`overrides.zip`, is attached to the charm by: + + + juju attach-resource <charm-name> policyd-override=overrides.zip + +The policy override is enabled in the charm using: + + juju config <charm-name> use-policyd-override=true + +When `use-policyd-override` is `True` the status line of the charm will be +prefixed with `PO:` indicating that policies have been overridden. If the +installation of the policy override YAML files failed for any reason then the +status line will be prefixed with `PO (broken):`. The log file for the charm +will indicate the reason. No policy override files are installed if the `PO +(broken):` is shown. The status line indicates that the overrides are broken, +not that the policy for the service has failed. The policy will be the defaults +for the charm and service. + +Policy overrides on one service may affect the functionality of another +service. Therefore, it may be necessary to provide policy overrides for +multiple service charms to achieve a consistent set of policies across the +OpenStack system. The charms for the other services that may need overrides +should be checked to ensure that they support overrides before proceeding. +""" + +POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl'] +POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl'] +POLICYD_RESOURCE_NAME = "policyd-override" +POLICYD_CONFIG_NAME = "use-policyd-override" +POLICYD_SUCCESS_FILENAME = "policyd-override-success" +POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO +POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin") + + +class BadPolicyZipFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +class BadPolicyYamlFile(Exception): + + def __init__(self, log_message): + self.log_message = log_message + + def __str__(self): + return self.log_message + + +if six.PY2: + BadZipFile = zipfile.BadZipfile +else: + BadZipFile = zipfile.BadZipFile + + +def is_policyd_override_valid_on_this_release(openstack_release): + """Check that the charm is running on at least Ubuntu Xenial, and at + least the queens release. + + :param openstack_release: the release codename that is installed. + :type openstack_release: str + :returns: True if okay + :rtype: bool + """ + # NOTE(ajkavanagh) circular import! This is because the status message + # generation code in utils has to call into this module, but this function + # needs the CompareOpenStackReleases() function. The only way to solve + # this is either to put ALL of this module into utils, or refactor one or + # other of the CompareOpenStackReleases or status message generation code + # into a 3rd module. + import charmhelpers.contrib.openstack.utils as ch_utils + return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens' + + +def maybe_do_policyd_overrides(openstack_release, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + restart_handler=None, + user=None, + group=None, + config_changed=False): + """If the config option is set, get the resource file and process it to + enable the policy.d overrides for the service passed. + + The param `openstack_release` is required as the policyd overrides feature + is only supported on openstack_release "queens" or later, and on ubuntu + "xenial" or later. Prior to these versions, this feature is a NOP. + + The optional template_function is a function that accepts a string and has + an opportunity to modify the loaded file prior to it being read by + yaml.safe_load(). This allows the charm to perform "templating" using + charm derived data. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The param restart_handler is an optional Callable that is called to perform + the service restart if the policy.d file is changed. This should normally + be None as oslo.policy automatically picks up changes in the policy.d + directory. However, for any services where this is buggy then a + restart_handler can be used to force the policy.d files to be read. + + If the config_changed param is True, then the handling is slightly + different: It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + + :param openstack_release: The openstack release that is installed. + :type openstack_release: str + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the string + prior to being processed as a Yaml document. + :type template_function: Union[None, Callable[[str], str]] + :param restart_handler: The function to call if the service should be + restarted. + :type restart_handler: Union[None, Callable[]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :param config_changed: Set to True for config_changed hook. + :type config_changed: bool + """ + _user = service if user is None else user + _group = service if group is None else group + if not is_policyd_override_valid_on_this_release(openstack_release): + return + hookenv.log("Running maybe_do_policyd_overrides", + level=POLICYD_LOG_LEVEL_DEFAULT) + config = hookenv.config() + try: + if not config.get(POLICYD_CONFIG_NAME, False): + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + if (os.path.isfile(_policy_success_file()) and + restart_handler is not None and + callable(restart_handler)): + restart_handler() + remove_policy_success_file() + return + except Exception as e: + hookenv.log("... ERROR: Exception is: {}".format(str(e)), + level=POLICYD_CONFIG_NAME) + import traceback + hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) + return + # if the policyd overrides have been performed when doing config_changed + # just return + if config_changed and is_policy_success_file_set(): + hookenv.log("... already setup, so skipping.", + level=POLICYD_LOG_LEVEL_DEFAULT) + return + # from now on it should succeed; if it doesn't then status line will show + # broken. + resource_filename = get_policy_resource_filename() + restart = process_policy_resource_file( + resource_filename, service, blacklist_paths, blacklist_keys, + template_function) + if restart and restart_handler is not None and callable(restart_handler): + restart_handler() + + +@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): + """This function is designed to be called from the config changed hook. + + DEPRECATED: please use maybe_do_policyd_overrides() with the param + `config_changed` as `True`. + + See maybe_do_policyd_overrides() for more details on the params. + """ + if 'config_changed' not in kwargs.keys(): + kwargs['config_changed'] = True + return maybe_do_policyd_overrides(*args, **kwargs) + + +def get_policy_resource_filename(): + """Function to extract the policy resource filename + + :returns: The filename of the resource, if set, otherwise, if an error + occurs, then None is returned. + :rtype: Union[str, None] + """ + try: + return hookenv.resource_get(POLICYD_RESOURCE_NAME) + except Exception: + return None + + +@contextlib.contextmanager +def open_and_filter_yaml_files(filepath, has_subdirs=False): + """Validate that the filepath provided is a zip file and contains at least + one (.yaml|.yml) file, and that the files are not duplicated when the zip + file is flattened. Note that the yaml files are not checked. This is the + first stage in validating the policy zipfile; individual yaml files are not + checked for validity or black listed keys. + + If the has_subdirs param is True, then the files are flattened to the first + directory, and the files in the root are ignored. + + An example of use is: + + with open_and_filter_yaml_files(some_path) as zfp, g: + for zipinfo in g: + # do something with zipinfo ... + + :param filepath: a filepath object that can be opened by zipfile + :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: (zfp handle, + a generator of the (name, filename, ZipInfo object) tuples) as a + tuple. + :rtype: ContextManager[(zipfile.ZipFile, + Generator[(name, str, str, zipfile.ZipInfo)])] + :raises: zipfile.BadZipFile + :raises: BadPolicyZipFile if duplicated yaml or missing + :raises: IOError if the filepath is not found + """ + with zipfile.ZipFile(filepath, 'r') as zfp: + # first pass through; check for duplicates and at least one yaml file. + names = collections.defaultdict(int) + yamlfiles = _yamlfiles(zfp, has_subdirs) + for name, _, _, _ in yamlfiles: + names[name] += 1 + # There must be at least 1 yaml file. + if len(names.keys()) == 0: + raise BadPolicyZipFile("contains no yaml files with {} extensions." + .format(", ".join(POLICYD_VALID_EXTS))) + # There must be no duplicates + duplicates = [n for n, c in names.items() if c > 1] + if duplicates: + raise BadPolicyZipFile("{} have duplicates in the zip file." + .format(", ".join(duplicates))) + # Finally, let's yield the generator + yield (zfp, yamlfiles) + + +def _yamlfiles(zipfile, has_subdirs=False): + """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) + and the infolist item from a zipfile. + + If the `has_subdirs` param is True, the the only yaml files that have a + directory component are read, and then first part of the directory + component is kept, along with the filename in the name. e.g. an entry with + a filename of: + + compute/someotherdir/override.yaml + + is returned as: + + compute/override, yaml, override.yaml, <ZipInfo object> + + This is to help with the special, additional, processing that the dashboard + charm requires. + + :param zipfile: the zipfile to read zipinfo items from + :type zipfile: zipfile.ZipFile + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: generator of (name, ext, filename, info item) for each + self-identified yaml file. + :rtype: List[(str, str, str, zipfile.ZipInfo)] + """ + files = [] + for infolist_item in zipfile.infolist(): + try: + if infolist_item.is_dir(): + continue + except AttributeError: + # fallback to "old" way to determine dir entry for pre-py36 + if infolist_item.filename.endswith('/'): + continue + _dir, name_ext = os.path.split(infolist_item.filename) + name, ext = os.path.splitext(name_ext) + if has_subdirs and _dir != "": + name = os.path.join(_dir.split(os.path.sep)[0], name) + ext = ext.lower() + if ext and ext in POLICYD_VALID_EXTS: + files.append((name, ext, name_ext, infolist_item)) + return files + + +def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): + """Read, validate and return the (first) yaml document from the stream. + + The doc is read, and checked for a yaml file. The the top-level keys are + checked against the blacklist_keys provided. If there are problems then an + Exception is raised. Otherwise the yaml document is returned as a Python + object that can be dumped back as a yaml file on the system. + + The yaml file must only consist of a str:str mapping, and if not then the + yaml file is rejected. + + :param stream_or_doc: the file object to read the yaml from + :type stream_or_doc: Union[AnyStr, IO[AnyStr]] + :param blacklist_keys: Any keys, which if in the yaml file, should cause + and error. + :type blacklisted_keys: Union[None, List[str]] + :returns: the yaml file as a python document + :rtype: Dict[str, str] + :raises: yaml.YAMLError if there is a problem with the document + :raises: BadPolicyYamlFile if file doesn't look right or there are + blacklisted keys in the file. + """ + blacklist_keys = blacklist_keys or [] + blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS) + doc = yaml.safe_load(stream_or_doc) + if not isinstance(doc, dict): + raise BadPolicyYamlFile("doesn't look like a policy file?") + keys = set(doc.keys()) + blacklisted_keys_present = keys.intersection(blacklist_keys) + if blacklisted_keys_present: + raise BadPolicyYamlFile("blacklisted keys {} present." + .format(", ".join(blacklisted_keys_present))) + if not all(isinstance(k, six.string_types) for k in keys): + raise BadPolicyYamlFile("keys in yaml aren't all strings?") + # check that the dictionary looks like a mapping of str to str + if not all(isinstance(v, six.string_types) for v in doc.values()): + raise BadPolicyYamlFile("values in yaml aren't all strings?") + return doc + + +def policyd_dir_for(service): + """Return the policy directory for the named service. + + :param service: str + :returns: the policy.d override directory. + :rtype: os.PathLike[str] + """ + return os.path.join("/", "etc", service, "policy.d") + + +def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): + """Clean out the policyd directory except for items that should be kept. + + The keep_paths, if used, should be set to the full path of the files that + should be kept in the policyd directory for the service. Note that the + service name is passed in, and then the policyd_dir_for() function is used. + This is so that a coding error doesn't result in a sudden deletion of the + charm (say). + + :param service: the service name to use to construct the policy.d dir. + :type service: str + :param keep_paths: optional list of paths to not delete. + :type keep_paths: Union[None, List[str]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + """ + _user = service if user is None else user + _group = service if group is None else group + keep_paths = keep_paths or [] + path = policyd_dir_for(service) + hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) + if not os.path.exists(path): + ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) + _scanner = os.scandir if hasattr(os, 'scandir') else _fallback_scandir + for direntry in _scanner(path): + # see if the path should be kept. + if direntry.path in keep_paths: + continue + # we remove any directories; it's ours and there shouldn't be any + if direntry.is_dir(): + shutil.rmtree(direntry.path) + else: + os.remove(direntry.path) + + +def maybe_create_directory_for(path, user, group): + """For the filename 'path', ensure that the directory for that path exists. + + Note that if the directory already exists then the permissions are NOT + changed. + + :param path: the filename including the path to it. + :type path: str + :param user: the user to create the directory as + :param group: the group to create the directory as + """ + _dir, _ = os.path.split(path) + if not os.path.exists(_dir): + ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) + + +@contextlib.contextmanager +def _fallback_scandir(path): + """Fallback os.scandir implementation. + + provide a fallback implementation of os.scandir if this module ever gets + used in a py2 or py34 charm. Uses os.listdir() to get the names in the path, + and then mocks the is_dir() function using os.path.isdir() to check for + directory. + + :param path: the path to list the directories for + :type path: str + :returns: Generator that provides _FBDirectory objects + :rtype: ContextManager[_FBDirectory] + """ + for f in os.listdir(path): + yield _FBDirectory(f) + + +class _FBDirectory(object): + """Mock a scandir Directory object with enough to use in + clean_policyd_dir_for + """ + + def __init__(self, path): + self.path = path + + def is_dir(self): + return os.path.isdir(self.path) + + +def path_for_policy_file(service, name): + """Return the full path for a policy.d file that will be written to the + service's policy.d directory. + + It is constructed using policyd_dir_for(), the name and the ".yaml" + extension. + + For horizon, for example, it's a bit more complicated. The name param is + actually "override_service_dir/a_name", where target_service needs to be + one the allowed horizon override services. This translation and check is + done in the _yamlfiles() function. + + :param service: the service name + :type service: str + :param name: the name for the policy override + :type name: str + :returns: the full path name for the file + :rtype: os.PathLike[str] + """ + return os.path.join(policyd_dir_for(service), name + ".yaml") + + +def _policy_success_file(): + """Return the file name for a successful drop of policy.d overrides + + :returns: the path name for the file. + :rtype: str + """ + return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME) + + +def remove_policy_success_file(): + """Remove the file that indicates successful policyd override.""" + try: + os.remove(_policy_success_file()) + except Exception: + pass + + +def set_policy_success_file(): + """Set the file that indicates successful policyd override.""" + open(_policy_success_file(), "w").close() + + +def is_policy_success_file_set(): + """Returns True if the policy success file has been set. + + This indicates that policies are overridden and working properly. + + :returns: True if the policy file is set + :rtype: bool + """ + return os.path.isfile(_policy_success_file()) + + +def policyd_status_message_prefix(): + """Return the prefix str for the status line. + + "PO:" indicating that the policy overrides are in place, or "PO (broken):" + if the policy is supposed to be working but there is no success file. + + :returns: the prefix + :rtype: str + """ + if is_policy_success_file_set(): + return "PO:" + return "PO (broken):" + + +def process_policy_resource_file(resource_file, + service, + blacklist_paths=None, + blacklist_keys=None, + template_function=None, + preserve_topdir=False, + preprocess_filename=None, + user=None, + group=None): + """Process the resource file (which should contain at least one yaml file) + and write those files to the service's policy.d directory. + + The optional template_function is a function that accepts a python + string and has an opportunity to modify the document + prior to it being read by the yaml.safe_load() function and written to + disk. Note that this function does *not* say how the templating is done - + this is up to the charm to implement its chosen method. + + The param blacklist_paths are paths (that are in the service's policy.d + directory that should not be touched). + + The param blacklist_keys are keys that must not appear in the yaml file. + If they do, then the whole policy.d file fails. + + The yaml file extracted from the resource_file (which is a zipped file) has + its file path reconstructed. This, also, must not match any path in the + black list. + + The yaml filename can be modified in two ways. If the `preserve_topdir` + param is True, then files will be flattened to the top dir. This allows + for creating sets of files that can be grouped into a single level tree + structure. + + Secondly, if the `preprocess_filename` param is not None and callable() + then the name is passed to that function for preprocessing before being + converted to the end location. This is to allow munging of the filename + prior to being tested for a blacklist path. + + If any error occurs, then the policy.d directory is cleared, the error is + written to the log, and the status line will eventually show as failed. + + :param resource_file: The zipped file to open and extract yaml files form. + :type resource_file: Union[AnyStr, os.PathLike[AnyStr]] + :param service: the service name to construct the policy.d directory for. + :type service: str + :param blacklist_paths: optional list of paths to leave alone + :type blacklist_paths: Union[None, List[str]] + :param blacklist_keys: optional list of keys that mustn't appear in the + yaml file's + :type blacklist_keys: Union[None, List[str]] + :param template_function: Optional function that can modify the yaml + document. + :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :param preserve_topdir: Keep the toplevel subdir + :type preserve_topdir: bool + :param preprocess_filename: Optional function to use to process filenames + extracted from the resource file. + :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :returns: True if the processing was successful, False if not. + :rtype: boolean + """ + hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) + blacklist_paths = blacklist_paths or [] + completed = False + _preprocess = None + if preprocess_filename is not None and callable(preprocess_filename): + _preprocess = preprocess_filename + _user = service if user is None else user + _group = service if group is None else group + try: + with open_and_filter_yaml_files( + resource_file, preserve_topdir) as (zfp, gen): + # first clear out the policy.d directory and clear success + remove_policy_success_file() + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + for name, ext, filename, zipinfo in gen: + # See if the name should be preprocessed. + if _preprocess is not None: + name = _preprocess(name) + # construct a name for the output file. + yaml_filename = path_for_policy_file(service, name) + if yaml_filename in blacklist_paths: + raise BadPolicyZipFile("policy.d name {} is blacklisted" + .format(yaml_filename)) + with zfp.open(zipinfo) as fp: + doc = fp.read() + # if template_function is not None, then offer the document + # to the template function + if ext in POLICYD_TEMPLATE_EXTS: + if (template_function is None or not + callable(template_function)): + raise BadPolicyZipFile( + "Template {} but no template_function is " + "available".format(filename)) + doc = template_function(doc) + yaml_doc = read_and_validate_yaml(doc, blacklist_keys) + # we may have to create the directory + maybe_create_directory_for(yaml_filename, _user, _group) + ch_host.write_file(yaml_filename, + yaml.dump(yaml_doc).encode('utf-8'), + _user, + _group) + # Every thing worked, so we mark up a success. + completed = True + except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: + hookenv.log("Processing {} failed: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except IOError as e: + # technically this shouldn't happen; it would be a programming error as + # the filename comes from Juju and thus, should exist. + hookenv.log( + "File {} failed with IOError. This really shouldn't happen" + " -- error: {}".format(resource_file, str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + except Exception as e: + import traceback + hookenv.log("General Exception({}) during policyd processing" + .format(str(e)), + level=POLICYD_LOG_LEVEL_DEFAULT) + hookenv.log(traceback.format_exc()) + finally: + if not completed: + hookenv.log("Processing {} failed: cleaning policy.d directory" + .format(resource_file), + level=POLICYD_LOG_LEVEL_DEFAULT) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) + else: + # touch the success filename + hookenv.log("policy.d overrides installed.", + level=POLICYD_LOG_LEVEL_DEFAULT) + set_policy_success_file() + return completed diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ssh_migrations.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ssh_migrations.py new file mode 100644 index 00000000..96b9f71d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/ssh_migrations.py @@ -0,0 +1,412 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess + +from charmhelpers.core.hookenv import ( + ERROR, + log, + relation_get, +) +from charmhelpers.contrib.network.ip import ( + is_ipv6, + ns_query, +) +from charmhelpers.contrib.openstack.utils import ( + get_hostname, + get_host_ip, + is_ip, +) + +NOVA_SSH_DIR = '/etc/nova/compute_ssh/' + + +def ssh_directory_for_unit(application_name, user=None): + """Return the directory used to store ssh assets for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified directory path. + :rtype: str + """ + if user: + application_name = "{}_{}".format(application_name, user) + _dir = os.path.join(NOVA_SSH_DIR, application_name) + for d in [NOVA_SSH_DIR, _dir]: + if not os.path.isdir(d): + os.mkdir(d) + for f in ['authorized_keys', 'known_hosts']: + f = os.path.join(_dir, f) + if not os.path.isfile(f): + open(f, 'w').close() + return _dir + + +def known_hosts(application_name, user=None): + """Return the known hosts file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'known_hosts') + + +def authorized_keys(application_name, user=None): + """Return the authorized keys file for the application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Fully qualified path to file. + :rtype: str + """ + return os.path.join( + ssh_directory_for_unit(application_name, user), + 'authorized_keys') + + +def ssh_known_host_key(host, application_name, user=None): + """Return the first entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Host key + :rtype: str or None + """ + cmd = [ + 'ssh-keygen', + '-f', known_hosts(application_name, user), + '-H', + '-F', + host] + try: + # The first line of output is like '# Host xx found: line 1 type RSA', + # which should be excluded. + output = subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + # RC of 1 seems to be legitimate for most ssh-keygen -F calls. + if e.returncode == 1: + output = e.output + else: + raise + output = output.strip() + + if output: + # Bug #1500589 cmd has 0 rc on precise if entry not present + lines = output.split('\n') + if len(lines) >= 1: + return lines[0] + + return None + + +def remove_known_host(host, application_name, user=None): + """Remove the entry in known_hosts for host. + + :param host: hostname to lookup in file. + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + log('Removing SSH known host entry for compute host at %s' % host) + cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host] + subprocess.check_call(cmd) + + +def is_same_key(key_1, key_2): + """Extract the key from two host entries and compare them. + + :param key_1: Host key + :type key_1: str + :param key_2: Host key + :type key_2: str + """ + # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' + # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare + # the part start with 'ssh-rsa' followed with '= ', because the hash + # value in the beginning will change each time. + k_1 = key_1.split('= ')[1] + k_2 = key_2.split('= ')[1] + return k_1 == k_2 + + +def add_known_host(host, application_name, user=None): + """Add the given host key to the known hosts file. + + :param host: host name + :type host: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] + try: + remote_key = subprocess.check_output(cmd).strip() + except Exception as e: + log('Could not obtain SSH host key from %s' % host, level=ERROR) + raise e + + current_key = ssh_known_host_key(host, application_name, user) + if current_key and remote_key: + if is_same_key(remote_key, current_key): + log('Known host key for compute host %s up to date.' % host) + return + else: + remove_known_host(host, application_name, user) + + log('Adding SSH host key to known hosts for compute node at %s.' % host) + with open(known_hosts(application_name, user), 'a') as out: + out.write("{}\n".format(remote_key)) + + +def ssh_authorized_key_exists(public_key, application_name, user=None): + """Check if given key is in the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Whether given key is in the authorized_key file. + :rtype: boolean + """ + with open(authorized_keys(application_name, user)) as keys: + return ('%s' % public_key) in keys.read() + + +def add_authorized_key(public_key, application_name, user=None): + """Add given key to the authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + with open(authorized_keys(application_name, user), 'a') as keys: + keys.write("{}\n".format(public_key)) + + +def ssh_compute_add_host_and_key(public_key, hostname, private_address, + application_name, user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param hostname: Hostname to collect host keys from. + :type hostname: str + :param private_address:aCorresponding private address for hostname + :type private_address: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + # If remote compute node hands us a hostname, ensure we have a + # known hosts entry for its IP, hostname and FQDN. + hosts = [private_address] + + if not is_ipv6(private_address): + if hostname: + hosts.append(hostname) + + if is_ip(private_address): + hn = get_hostname(private_address) + if hn: + hosts.append(hn) + short = hn.split('.')[0] + if ns_query(short): + hosts.append(short) + else: + hosts.append(get_host_ip(private_address)) + short = private_address.split('.')[0] + if ns_query(short): + hosts.append(short) + + for host in list(set(hosts)): + add_known_host(host, application_name, user) + + if not ssh_authorized_key_exists(public_key, application_name, user): + log('Saving SSH authorized key for compute host at %s.' % + private_address) + add_authorized_key(public_key, application_name, user) + + +def ssh_compute_add(public_key, application_name, rid=None, unit=None, + user=None): + """Add a compute nodes ssh details to local cache. + + Collect various hostname variations and add the corresponding host keys to + the local known hosts file. Finally, add the supplied public key to the + authorized_key file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param rid: Relation id of the relation between this charm and the app. If + none is supplied it is assumed its the relation relating to + the current hook context. + :type rid: str + :param unit: Unit to add ssh asserts for if none is supplied it is assumed + its the unit relating to the current hook context. + :type unit: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + relation_data = relation_get(rid=rid, unit=unit) + ssh_compute_add_host_and_key( + public_key, + relation_data.get('hostname'), + relation_data.get('private-address'), + application_name, + user=user) + + +def ssh_known_hosts_lines(application_name, user=None): + """Return contents of known_hosts file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + known_hosts_list = [] + with open(known_hosts(application_name, user)) as hosts: + for hosts_line in hosts: + if hosts_line.rstrip(): + known_hosts_list.append(hosts_line.rstrip()) + return(known_hosts_list) + + +def ssh_authorized_keys_lines(application_name, user=None): + """Return contents of authorized_keys file for given application. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + authorized_keys_list = [] + + with open(authorized_keys(application_name, user)) as keys: + for authkey_line in keys: + if authkey_line.rstrip(): + authorized_keys_list.append(authkey_line.rstrip()) + return(authorized_keys_list) + + +def ssh_compute_remove(public_key, application_name, user=None): + """Remove given public key from authorized_keys file. + + :param public_key: Public key. + :type public_key: str + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + """ + if not (os.path.isfile(authorized_keys(application_name, user)) or + os.path.isfile(known_hosts(application_name, user))): + return + + keys = ssh_authorized_keys_lines(application_name, user=None) + keys = [k.strip() for k in keys] + + if public_key not in keys: + return + + [keys.remove(key) for key in keys if key == public_key] + + with open(authorized_keys(application_name, user), 'w') as _keys: + keys = '\n'.join(keys) + if not keys.endswith('\n'): + keys += '\n' + _keys.write(keys) + + +def get_ssh_settings(application_name, user=None): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for the + app + user combination. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :param user: The user that the ssh asserts are for. + :type user: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = {} + keys = {} + prefix = '' + if user: + prefix = '{}_'.format(user) + + for i, line in enumerate(ssh_known_hosts_lines( + application_name=application_name, user=user)): + settings['{}known_hosts_{}'.format(prefix, i)] = line + if settings: + settings['{}known_hosts_max_index'.format(prefix)] = len( + settings.keys()) + + for i, line in enumerate(ssh_authorized_keys_lines( + application_name=application_name, user=user)): + keys['{}authorized_keys_{}'.format(prefix, i)] = line + if keys: + keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys()) + settings.update(keys) + return settings + + +def get_all_user_ssh_settings(application_name): + """Retrieve the known host entries and public keys for application + + Retrieve the known host entries and public keys for application for all + units of the given application related to this application for root user + and nova user. + + :param application_name: Name of application eg nova-compute-something + :type application_name: str + :returns: Public keys + host keys for all units for app + user combination. + :rtype: dict + """ + settings = get_ssh_settings(application_name) + settings.update(get_ssh_settings(application_name, user='nova')) + return settings diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 00000000..9df5f746 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/ceph.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/ceph.conf new file mode 100644 index 00000000..a11ce8ab --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/ceph.conf @@ -0,0 +1,24 @@ +############################################################################### +# [ WARNING ] +# ceph configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +[global] +{% if auth -%} +auth_supported = {{ auth }} +keyring = /etc/ceph/$cluster.$name.keyring +mon host = {{ mon_hosts }} +{% endif -%} +log to syslog = {{ use_syslog }} +err to syslog = {{ use_syslog }} +clog to syslog = {{ use_syslog }} +{% if rbd_features %} +rbd default features = {{ rbd_features }} +{% endif %} + +[client] +{% if rbd_client_cache_settings -%} +{% for key, value in rbd_client_cache_settings.items() -%} +{{ key }} = {{ value }} +{% endfor -%} +{%- endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/git.upstart b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/git.upstart new file mode 100644 index 00000000..4bed404b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/git.upstart @@ -0,0 +1,17 @@ +description "{{ service_description }}" +author "Juju {{ service_name }} Charm <juju@localhost>" + +start on runlevel [2345] +stop on runlevel [!2345] + +respawn + +exec start-stop-daemon --start --chuid {{ user_name }} \ + --chdir {{ start_dir }} --name {{ process_name }} \ + --exec {{ executable_name }} -- \ + {% for config_file in config_files -%} + --config-file={{ config_file }} \ + {% endfor -%} + {% if log_file -%} + --log-file={{ log_file }} + {% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/haproxy.cfg new file mode 100644 index 00000000..d36af2aa --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -0,0 +1,77 @@ +global + log /var/lib/haproxy/dev/log local0 + log /var/lib/haproxy/dev/log local1 notice + maxconn 20000 + user haproxy + group haproxy + spread-checks 0 + stats socket /var/run/haproxy/admin.sock mode 600 level admin + stats timeout 2m + +defaults + log global + mode tcp + option tcplog + option dontlognull + retries 3 +{%- if haproxy_queue_timeout %} + timeout queue {{ haproxy_queue_timeout }} +{%- else %} + timeout queue 9000 +{%- endif %} +{%- if haproxy_connect_timeout %} + timeout connect {{ haproxy_connect_timeout }} +{%- else %} + timeout connect 9000 +{%- endif %} +{%- if haproxy_client_timeout %} + timeout client {{ haproxy_client_timeout }} +{%- else %} + timeout client 90000 +{%- endif %} +{%- if haproxy_server_timeout %} + timeout server {{ haproxy_server_timeout }} +{%- else %} + timeout server 90000 +{%- endif %} + +listen stats + bind {{ local_host }}:{{ stat_port }} + mode http + stats enable + stats hide-version + stats realm Haproxy\ Statistics + stats uri / + stats auth admin:{{ stat_password }} + +{% if frontends -%} +{% for service, ports in service_ports.items() -%} +frontend tcp-in_{{ service }} + bind *:{{ ports[0] }} + {% if ipv6_enabled -%} + bind :::{{ ports[0] }} + {% endif -%} + {% for frontend in frontends -%} + acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} + use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} + {% endfor -%} + default_backend {{ service }}_{{ default_backend }} + +{% for frontend in frontends -%} +backend {{ service }}_{{ frontend }} + balance leastconn + {% if backend_options -%} + {% if backend_options[service] -%} + {% for option in backend_options[service] -%} + {% for key, value in option.items() -%} + {{ key }} {{ value }} + {% endfor -%} + {% endfor -%} + {% endif -%} + {% endif -%} + {% for unit, address in frontends[frontend]['backends'].items() -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endfor %} +{% endfor -%} +{% endfor -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/logrotate b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/logrotate new file mode 100644 index 00000000..b2900d09 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/logrotate @@ -0,0 +1,9 @@ +/var/log/{{ logrotate_logs_location }}/*.log { + {{ logrotate_interval }} + {{ logrotate_count }} + compress + delaycompress + missingok + notifempty + copytruncate +} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/memcached.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/memcached.conf new file mode 100644 index 00000000..26cb037c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/memcached.conf @@ -0,0 +1,53 @@ +############################################################################### +# [ WARNING ] +# memcached configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### + +# memcached default config file +# 2003 - Jay Bonci <jaybonci@debian.org> +# This configuration file is read by the start-memcached script provided as +# part of the Debian GNU/Linux distribution. + +# Run memcached as a daemon. This command is implied, and is not needed for the +# daemon to run. See the README.Debian that comes with this package for more +# information. +-d + +# Log memcached's output to /var/log/memcached +logfile /var/log/memcached.log + +# Be verbose +# -v + +# Be even more verbose (print client commands as well) +# -vv + +# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default +# Note that the daemon will grow to this size, but does not start out holding this much +# memory +-m 64 + +# Default connection port is 11211 +-p {{ memcache_port }} + +# Run the daemon as root. The start-memcached will default to running as root if no +# -u command is present in this config file +-u memcache + +# Specify which IP address to listen on. The default is to listen on all IP addresses +# This parameter is one of the only security measures that memcached has, so make sure +# it's listening on a firewalled interface. +-l {{ memcache_server }} + +# Limit the number of simultaneous incoming connections. The daemon default is 1024 +# -c 1024 + +# Lock down all paged memory. Consult with the README and homepage before you do this +# -k + +# Return error when memory is exhausted (rather than removing items) +# -M + +# Maximize core file limit +# -r diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend new file mode 100644 index 00000000..f614b3fa --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -0,0 +1,29 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} +<VirtualHost {{ address }}:{{ ext }}> + ServerName {{ endpoint }} + SSLEngine on + SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 + SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 + SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + RequestHeader set X-Forwarded-Proto "https" +</VirtualHost> +{% endfor -%} +<Proxy *> + Order deny,allow + Allow from all +</Proxy> +<Location /> + Order allow,deny + Allow from all +</Location> +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf new file mode 100644 index 00000000..f614b3fa --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf @@ -0,0 +1,29 @@ +{% if endpoints -%} +{% for ext_port in ext_ports -%} +Listen {{ ext_port }} +{% endfor -%} +{% for address, endpoint, ext, int in endpoints -%} +<VirtualHost {{ address }}:{{ ext }}> + ServerName {{ endpoint }} + SSLEngine on + SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2 + SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + # See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8 + SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + RequestHeader set X-Forwarded-Proto "https" +</VirtualHost> +{% endfor -%} +<Proxy *> + Order deny,allow + Allow from all +</Proxy> +<Location /> + Order allow,deny + Allow from all +</Location> +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken new file mode 100644 index 00000000..5dcebe7c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken @@ -0,0 +1,12 @@ +{% if auth_host -%} +[keystone_authtoken] +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_plugin = password +project_domain_id = default +user_domain_id = default +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy new file mode 100644 index 00000000..9356b2be --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-legacy @@ -0,0 +1,10 @@ +{% if auth_host -%} +[keystone_authtoken] +# Juno specific config (Bug #1557223) +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} +identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +admin_tenant_name = {{ admin_tenant_name }} +admin_user = {{ admin_user }} +admin_password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka new file mode 100644 index 00000000..c281868b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-mitaka @@ -0,0 +1,22 @@ +{% if auth_host -%} +[keystone_authtoken] +auth_type = password +{% if api_version == "3" -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3 +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3 +project_domain_name = {{ admin_domain_name }} +user_domain_name = {{ admin_domain_name }} +{% else -%} +auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +project_domain_name = default +user_domain_name = default +{% endif -%} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +signing_dir = {{ signing_dir }} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only new file mode 100644 index 00000000..d26a91fe --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-keystone-authtoken-v3only @@ -0,0 +1,9 @@ +{% if auth_host -%} +[keystone_authtoken] +{% for option_name, option_value in keystone_authtoken.items() -%} +{{ option_name }} = {{ option_value }} +{% endfor -%} +{% if use_memcache == true %} +memcached_servers = {{ memcache_url }} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-cache b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-cache new file mode 100644 index 00000000..e056a32a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-cache @@ -0,0 +1,6 @@ +[cache] +{% if memcache_url %} +enabled = true +backend = oslo_cache.memcache_pool +memcache_servers = {{ memcache_url }} +{% endif %} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit new file mode 100644 index 00000000..bed2216a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata new file mode 100644 index 00000000..365f4375 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit-ocata @@ -0,0 +1,10 @@ +[oslo_messaging_rabbit] +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +{% endif -%} +{% if rabbit_ssl_ca -%} +ssl_ca_file = {{ rabbit_ssl_ca }} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-middleware b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-middleware new file mode 100644 index 00000000..dd73230a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-middleware @@ -0,0 +1,5 @@ +[oslo_middleware] + +# Bug #1758675 +enable_proxy_headers_parsing = true + diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-notifications b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-notifications new file mode 100644 index 00000000..71c7eb06 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-oslo-notifications @@ -0,0 +1,15 @@ +{% if transport_url -%} +[oslo_messaging_notifications] +driver = {{ oslo_messaging_driver }} +transport_url = {{ transport_url }} +{% if send_notifications_to_logs %} +driver = log +{% endif %} +{% if notification_topics -%} +topics = {{ notification_topics }} +{% endif -%} +{% if notification_format -%} +[notifications] +notification_format = {{ notification_format }} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-placement b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-placement new file mode 100644 index 00000000..97724bdb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-placement @@ -0,0 +1,19 @@ +[placement] +{% if auth_host -%} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_type = password +{% if api_version == "3" -%} +project_domain_name = {{ admin_domain_name }} +user_domain_name = {{ admin_domain_name }} +{% else -%} +project_domain_name = default +user_domain_name = default +{% endif -%} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +{% endif -%} +{% if region -%} +os_region_name = {{ region }} +{% endif -%} +randomize_allocation_candidates = true diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo new file mode 100644 index 00000000..b444c9c9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo @@ -0,0 +1,22 @@ +{% if rabbitmq_host or rabbitmq_hosts -%} +[oslo_messaging_rabbit] +rabbit_userid = {{ rabbitmq_user }} +rabbit_virtual_host = {{ rabbitmq_virtual_host }} +rabbit_password = {{ rabbitmq_password }} +{% if rabbitmq_hosts -%} +rabbit_hosts = {{ rabbitmq_hosts }} +{% if rabbitmq_ha_queues -%} +rabbit_ha_queues = True +rabbit_durable_queues = False +{% endif -%} +{% else -%} +rabbit_host = {{ rabbitmq_host }} +{% endif -%} +{% if rabbit_ssl_port -%} +rabbit_use_ssl = True +rabbit_port = {{ rabbit_ssl_port }} +{% if rabbit_ssl_ca -%} +kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} +{% endif -%} +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-zeromq b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-zeromq new file mode 100644 index 00000000..95f1a76c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/section-zeromq @@ -0,0 +1,14 @@ +{% if zmq_host -%} +# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) +rpc_backend = zmq +rpc_zmq_host = {{ zmq_host }} +{% if zmq_redis_address -%} +rpc_zmq_matchmaker = redis +matchmaker_heartbeat_freq = 15 +matchmaker_heartbeat_ttl = 30 +[matchmaker_redis] +host = {{ zmq_redis_address }} +{% else -%} +rpc_zmq_matchmaker = ring +{% endif -%} +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/vendor_data.json b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/vendor_data.json new file mode 100644 index 00000000..904f612a --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/vendor_data.json @@ -0,0 +1 @@ +{{ vendor_data_json }} \ No newline at end of file diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf new file mode 100644 index 00000000..23b62a38 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf @@ -0,0 +1,91 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} +<VirtualHost *:{{ port }}> + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> +{% endif -%} + +{% if admin_port -%} +<VirtualHost *:{{ admin_port }}> + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> +{% endif -%} + +{% if public_port -%} +<VirtualHost *:{{ public_port }}> + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf new file mode 100644 index 00000000..23b62a38 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf @@ -0,0 +1,91 @@ +# Configuration file maintained by Juju. Local changes may be overwritten. + +{% if port -%} +Listen {{ port }} +{% endif -%} + +{% if admin_port -%} +Listen {{ admin_port }} +{% endif -%} + +{% if public_port -%} +Listen {{ public_port }} +{% endif -%} + +{% if port -%} +<VirtualHost *:{{ port }}> + WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }} + WSGIScriptAlias / {{ script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> +{% endif -%} + +{% if admin_port -%} +<VirtualHost *:{{ admin_port }}> + WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-admin + WSGIScriptAlias / {{ admin_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> +{% endif -%} + +{% if public_port -%} +<VirtualHost *:{{ public_port }}> + WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \ + display-name=%{GROUP} + WSGIProcessGroup {{ service_name }}-public + WSGIScriptAlias / {{ public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + <IfVersion >= 2.4> + ErrorLogFormat "%{cu}t %M" + </IfVersion> + ErrorLog /var/log/apache2/{{ service_name }}_error.log + CustomLog /var/log/apache2/{{ service_name }}_access.log combined + + <Directory /usr/bin> + <IfVersion >= 2.4> + Require all granted + </IfVersion> + <IfVersion < 2.4> + Order allow,deny + Allow from all + </IfVersion> + </Directory> +</VirtualHost> +{% endif -%} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templating.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 00000000..050f8af5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,379 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import six + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO, + TRACE +) +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions +except ImportError: + apt_update(fatal=True) + if six.PY2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg:: + + hooks/charmhelpers/contrib/openstack/templates + + :param templates_dir (str): Base template directory containing release + sub-directories. + :param os_release (str): OpenStack release codename to construct template + loader. + :returns: jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in six.itervalues(OPENSTACK_CODENAMES)] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + # demote this log to the lowest level; we don't really need to see these + # lots in production even when debugging. + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=TRACE) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + + def __init__(self, config_file, contexts, config_template=None): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + self.config_template = config_template + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have satisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + @property + def is_string_template(self): + """:returns: Boolean if this instance is a template initialised with a string""" + return self.config_template is not None + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage:: + + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + configs.register(config_file='/etc/keystone/policy.d/extra.cfg', + contexts=[context.ExtraPolicyContext() + context.KeystoneContext()], + config_template=hookenv.config('extra-policy')) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + **OpenStack Releases and template loading** + + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + For the example above, '/tmp/templates' contains the following structure:: + + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first searches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + **Context generators** + + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + if six.PY2: + apt_install('python-jinja2') + else: + apt_install('python3-jinja2') + + def register(self, config_file, contexts, config_template=None): + """ + Register a config file with a list of context generators to be called + during rendering. + config_template can be used to load a template from a string instead of + using template loaders and template files. + :param config_file (str): a path where a config file will be rendered + :param contexts (list): a list of context dictionaries with kv pairs + :param config_template (str): an optional template string to use + """ + self.templates[config_file] = OSConfigTemplate( + config_file=config_file, + contexts=contexts, + config_template=config_template + ) + log('Registered config file: {}'.format(config_file), + level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from {}'.format(template.filename), + level=INFO) + return template + + def _get_template_from_string(self, ostmpl): + ''' + Get a jinja2 template object from a string. + :param ostmpl: OSConfigTemplate to use as a data source. + ''' + self._get_tmpl_env() + template = self._tmpl_env.from_string(ostmpl.config_template) + log('Loaded a template from a string for {}'.format( + ostmpl.config_file), + level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: {}'.format(config_file), level=ERROR) + raise OSConfigException + + ostmpl = self.templates[config_file] + ctxt = ostmpl.context() + + if ostmpl.is_string_template: + template = self._get_template_from_string(ostmpl) + log('Rendering from a string template: ' + '{}'.format(config_file), + level=INFO) + else: + _tmpl = os.path.basename(config_file) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound: + # if no template is found with basename, try looking + # for it using a munged full path, eg: + # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf + _tmpl = '_'.join(config_file.split('/')[1:]) + try: + template = self._get_template(_tmpl) + except exceptions.TemplateNotFound as e: + log('Could not load template from {} by {} or {}.' + ''.format( + self.templates_dir, + os.path.basename(config_file), + _tmpl + ), + level=ERROR) + raise e + + log('Rendering from template: {}'.format(config_file), + level=INFO) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + + _out = self.render(config_file) + if six.PY3: + _out = _out.encode('UTF-8') + + with open(config_file, 'wb') as out: + out.write(_out) + + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in six.iterkeys(self.templates)] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in six.itervalues(self.templates)] + return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/utils.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 00000000..fbf01561 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,2352 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Common python helper functions used for OpenStack charms. +from collections import OrderedDict, namedtuple +from functools import wraps + +import subprocess +import json +import os +import sys +import re +import itertools +import functools + +import six +import traceback +import uuid +import yaml + +from charmhelpers import deprecate + +from charmhelpers.contrib.network import ip + +from charmhelpers.core import unitdata + +from charmhelpers.core.hookenv import ( + WORKLOAD_STATES, + action_fail, + action_set, + config, + expected_peer_units, + expected_related_units, + log as juju_log, + charm_dir, + INFO, + ERROR, + metadata, + related_units, + relation_get, + relation_id, + relation_ids, + relation_set, + status_set, + hook_name, + application_version_set, + cached, + leader_set, + leader_get, + local_unit, +) + +from charmhelpers.core.strutils import ( + BasicStringComparator, + bool_from_string, +) + +from charmhelpers.contrib.storage.linux.lvm import ( + deactivate_lvm_volume_group, + is_lvm_physical_volume, + remove_lvm_physical_volume, +) + +from charmhelpers.contrib.network.ip import ( + get_ipv6_addr, + is_ipv6, + port_has_listener, +) + +from charmhelpers.core.host import ( + lsb_release, + mounts, + umount, + service_running, + service_pause, + service_resume, + service_stop, + service_start, + restart_on_change_helper, +) +from charmhelpers.fetch import ( + apt_cache, + import_key as fetch_import_key, + add_source as fetch_add_source, + SourceConfigError, + GPGKeyError, + get_upstream_version, + filter_missing_packages, + ubuntu_apt_pkg as apt, +) + +from charmhelpers.fetch.snap import ( + snap_install, + snap_refresh, + valid_snap_channel, +) + +from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk +from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device +from charmhelpers.contrib.openstack.exceptions import OSContextError +from charmhelpers.contrib.openstack.policyd import ( + policyd_status_message_prefix, + POLICYD_CONFIG_NAME, +) + +from charmhelpers.contrib.openstack.ha.utils import ( + expect_ha, +) + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' + 'restricted main multiverse universe') + +OPENSTACK_RELEASES = ( + 'diablo', + 'essex', + 'folsom', + 'grizzly', + 'havana', + 'icehouse', + 'juno', + 'kilo', + 'liberty', + 'mitaka', + 'newton', + 'ocata', + 'pike', + 'queens', + 'rocky', + 'stein', + 'train', + 'ussuri', +) + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), + ('trusty', 'icehouse'), + ('utopic', 'juno'), + ('vivid', 'kilo'), + ('wily', 'liberty'), + ('xenial', 'mitaka'), + ('yakkety', 'newton'), + ('zesty', 'ocata'), + ('artful', 'pike'), + ('bionic', 'queens'), + ('cosmic', 'rocky'), + ('disco', 'stein'), + ('eoan', 'train'), + ('focal', 'ussuri'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), + ('2014.2', 'juno'), + ('2015.1', 'kilo'), + ('2015.2', 'liberty'), + ('2016.1', 'mitaka'), + ('2016.2', 'newton'), + ('2017.1', 'ocata'), + ('2017.2', 'pike'), + ('2018.1', 'queens'), + ('2018.2', 'rocky'), + ('2019.1', 'stein'), + ('2019.2', 'train'), + ('2020.1', 'ussuri'), +]) + +# The ugly duckling - must list releases oldest to newest +SWIFT_CODENAMES = OrderedDict([ + ('diablo', + ['1.4.3']), + ('essex', + ['1.4.8']), + ('folsom', + ['1.7.4']), + ('grizzly', + ['1.7.6', '1.7.7', '1.8.0']), + ('havana', + ['1.9.0', '1.9.1', '1.10.0']), + ('icehouse', + ['1.11.0', '1.12.0', '1.13.0', '1.13.1']), + ('juno', + ['2.0.0', '2.1.0', '2.2.0']), + ('kilo', + ['2.2.1', '2.2.2']), + ('liberty', + ['2.3.0', '2.4.0', '2.5.0']), + ('mitaka', + ['2.5.0', '2.6.0', '2.7.0']), + ('newton', + ['2.8.0', '2.9.0', '2.10.0']), + ('ocata', + ['2.11.0', '2.12.0', '2.13.0']), + ('pike', + ['2.13.0', '2.15.0']), + ('queens', + ['2.16.0', '2.17.0']), + ('rocky', + ['2.18.0', '2.19.0']), + ('stein', + ['2.20.0', '2.21.0']), + ('train', + ['2.22.0', '2.23.0']), + ('ussuri', + ['2.24.0', '2.25.0']), +]) + +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12', 'liberty'), + ('13', 'mitaka'), + ('14', 'newton'), + ('15', 'ocata'), + ('16', 'pike'), + ('17', 'queens'), + ('18', 'rocky'), + ('19', 'stein'), + ('20', 'train'), + ('21', 'ussuri'), + ]), + 'neutron-common': OrderedDict([ + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), + ('14', 'stein'), + ('15', 'train'), + ('16', 'ussuri'), + ]), + 'cinder-common': OrderedDict([ + ('7', 'liberty'), + ('8', 'mitaka'), + ('9', 'newton'), + ('10', 'ocata'), + ('11', 'pike'), + ('12', 'queens'), + ('13', 'rocky'), + ('14', 'stein'), + ('15', 'train'), + ('16', 'ussuri'), + ]), + 'keystone': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), + ('17', 'ussuri'), + ]), + 'horizon-common': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), + ('18', 'ussuri'), + ]), + 'ceilometer-common': OrderedDict([ + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), + ('12', 'stein'), + ('13', 'train'), + ('14', 'ussuri'), + ]), + 'heat-common': OrderedDict([ + ('5', 'liberty'), + ('6', 'mitaka'), + ('7', 'newton'), + ('8', 'ocata'), + ('9', 'pike'), + ('10', 'queens'), + ('11', 'rocky'), + ('12', 'stein'), + ('13', 'train'), + ('14', 'ussuri'), + ]), + 'glance-common': OrderedDict([ + ('11', 'liberty'), + ('12', 'mitaka'), + ('13', 'newton'), + ('14', 'ocata'), + ('15', 'pike'), + ('16', 'queens'), + ('17', 'rocky'), + ('18', 'stein'), + ('19', 'train'), + ('20', 'ussuri'), + ]), + 'openstack-dashboard': OrderedDict([ + ('8', 'liberty'), + ('9', 'mitaka'), + ('10', 'newton'), + ('11', 'ocata'), + ('12', 'pike'), + ('13', 'queens'), + ('14', 'rocky'), + ('15', 'stein'), + ('16', 'train'), + ('18', 'ussuri'), + ]), +} + +DEFAULT_LOOPBACK_SIZE = '5G' + +DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading' + +DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY] + + +class CompareOpenStackReleases(BasicStringComparator): + """Provide comparisons of OpenStack releases. + + Use in the form of + + if CompareOpenStackReleases(release) > 'mitaka': + # do something with mitaka + """ + _list = OPENSTACK_RELEASES + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_installed_semantic_versioned_packages(): + '''Get a list of installed packages which have OpenStack semantic versioning + + :returns List of installed packages + :rtype: [pkg1, pkg2, ...] + ''' + return filter_missing_packages(PACKAGE_CODENAMES.keys()) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src is None: + return rel + if src in ['distro', 'distro-proposed', 'proposed']: + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('-')[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if (src.startswith('deb') or + src.startswith('ppa') or + src.startswith('snap')): + for v in OPENSTACK_CODENAMES.values(): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): + '''Determine OpenStack version number from codename.''' + for k, v in six.iteritems(version_map): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_version_codename_swift(codename): + '''Determine OpenStack version number of swift from codename.''' + for k, v in six.iteritems(SWIFT_CODENAMES): + if k == codename: + return v[-1] + e = 'Could not derive swift version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_swift_codename(version): + '''Determine OpenStack codename that corresponds to swift version.''' + codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] + + if len(codenames) > 1: + # If more than one release codename contains this version we determine + # the actual codename based on the highest available install source. + for codename in reversed(codenames): + releases = UBUNTU_OPENSTACK_RELEASE + release = [k for k, v in six.iteritems(releases) if codename in v] + ret = subprocess.check_output(['apt-cache', 'policy', 'swift']) + if six.PY3: + ret = ret.decode('UTF-8') + if codename in ret or release[0] in ret: + return codename + elif len(codenames) == 1: + return codenames[0] + + # NOTE: fallback - attempt to match with just major.minor version + match = re.match(r'^(\d+)\.(\d+)', version) + if match: + major_minor_version = match.group(0) + for codename, versions in six.iteritems(SWIFT_CODENAMES): + for release_version in versions: + if release_version.startswith(major_minor_version): + return codename + + return None + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + + if snap_install_requested(): + cmd = ['snap', 'list', package] + try: + out = subprocess.check_output(cmd) + if six.PY3: + out = out.decode('UTF-8') + except subprocess.CalledProcessError: + return None + lines = out.split('\n') + for line in lines: + if package in line: + # Second item in list is Version + return line.split()[1] + + cache = apt_cache() + + try: + pkg = cache[package] + except Exception: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.upstream_version(pkg.current_ver.ver_str) + if 'swift' in pkg.name: + # Fully x.y.z match for swift versions + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers) + else: + # x.y match only for 20XX.X + # and ignore patch level for other packages + match = re.match(r'^(\d+)\.(\d+)', vers) + + if match: + vers = match.group(0) + + # Generate a major version number for newer semantic + # versions of openstack projects + major_vers = vers.split('.')[0] + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + major_vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][major_vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + return get_swift_codename(vers) + else: + return OPENSTACK_CODENAMES[vers] + except KeyError: + if not fatal: + return None + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + for cname, version in six.iteritems(vers_map): + if cname == codename: + return version[-1] + else: + vers_map = OPENSTACK_CODENAMES + for version, cname in six.iteritems(vers_map): + if cname == codename: + return version + # e = "Could not determine OpenStack version for package: %s" % pkg + # error_out(e) + + +# Module local cache variable for the os_release. +_os_rel = None + + +def reset_os_release(): + '''Unset the cached os_release version''' + global _os_rel + _os_rel = None + + +def os_release(package, base=None, reset_cache=False, source_key=None): + """Returns OpenStack release codename from a cached global. + + If reset_cache then unset the cached os_release version and return the + freshly determined version. + + If the codename can not be determined from either an installed package or + the installation source, the earliest release supported by the charm should + be returned. + + :param package: Name of package to determine release from + :type package: str + :param base: Fallback codename if endavours to determine from package fail + :type base: Optional[str] + :param reset_cache: Reset any cached codename value + :type reset_cache: bool + :param source_key: Name of source configuration option + (default: 'openstack-origin') + :type source_key: Optional[str] + :returns: OpenStack release codename + :rtype: str + """ + source_key = source_key or 'openstack-origin' + if not base: + base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] + global _os_rel + if reset_cache: + reset_os_release() + if _os_rel: + return _os_rel + _os_rel = ( + get_os_codename_package(package, fatal=False) or + get_os_codename_install_source(config(source_key)) or + base) + return _os_rel + + +@deprecate("moved to charmhelpers.fetch.import_key()", "2017-07", log=juju_log) +def import_key(keyid): + """Import a key, either ASCII armored, or a GPG key id. + + @param keyid: the key in ASCII armor format, or a GPG key id. + @raises SystemExit() via sys.exit() on failure. + """ + try: + return fetch_import_key(keyid) + except GPGKeyError as e: + error_out("Could not import key: {}".format(str(e))) + + +def get_source_and_pgp_key(source_and_key): + """Look for a pgp key ID or ascii-armor key in the given input. + + :param source_and_key: Sting, "source_spec|keyid" where '|keyid' is + optional. + :returns (source_spec, key_id OR None) as a tuple. Returns None for key_id + if there was no '|' in the source_and_key string. + """ + try: + source, key = source_and_key.split('|', 2) + return source, key or None + except ValueError: + return source_and_key, None + + +@deprecate("use charmhelpers.fetch.add_source() instead.", + "2017-07", log=juju_log) +def configure_installation_source(source_plus_key): + """Configure an installation source. + + The functionality is provided by charmhelpers.fetch.add_source() + The difference between the two functions is that add_source() signature + requires the key to be passed directly, whereas this function passes an + optional key by appending '|<key>' to the end of the source specificiation + 'source'. + + Another difference from add_source() is that the function calls sys.exit(1) + if the configuration fails, whereas add_source() raises + SourceConfigurationError(). Another difference, is that add_source() + silently fails (with a juju_log command) if there is no matching source to + configure, whereas this function fails with a sys.exit(1) + + :param source: String_plus_key -- see above for details. + + Note that the behaviour on error is to log the error to the juju log and + then call sys.exit(1). + """ + if source_plus_key.startswith('snap'): + # Do nothing for snap installs + return + # extract the key if there is one, denoted by a '|' in the rel + source, key = get_source_and_pgp_key(source_plus_key) + + # handle the ordinary sources via add_source + try: + fetch_add_source(source, key, fail_invalid=True) + except SourceConfigError as se: + error_out(str(se)) + + +def config_value_changed(option): + """ + Determine if config value changed since last call to this function. + """ + hook_data = unitdata.HookData() + with hook_data(): + db = unitdata.kv() + current = config(option) + saved = db.get(option) + db.set(option, current) + if saved is None: + return False + return current != saved + + +def get_endpoint_key(service_name, relation_id, unit_name): + """Return the key used to refer to an ep changed notification from a unit. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param relation_id: The id of the relation the unit is on. + :type relation_id: str + :param unit_name: The name of the unit publishing the notification. + :type unit_name: str + :returns: The key used to refer to an ep changed notification from a unit + :rtype: str + """ + return '{}-{}-{}'.format( + service_name, + relation_id.replace(':', '_'), + unit_name.replace('/', '_')) + + +def get_endpoint_notifications(service_names, rel_name='identity-service'): + """Return all notifications for the given services. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: A dict containing the source of the notification and its nonce. + :rtype: Dict[str, str] + """ + notifications = {} + for rid in relation_ids(rel_name): + for unit in related_units(relid=rid): + ep_changed_json = relation_get( + rid=rid, + unit=unit, + attribute='ep_changed') + if ep_changed_json: + ep_changed = json.loads(ep_changed_json) + for service in service_names: + if ep_changed.get(service): + key = get_endpoint_key(service, rid, unit) + notifications[key] = ep_changed[service] + return notifications + + +def endpoint_changed(service_name, rel_name='identity-service'): + """Whether a new notification has been recieved for an endpoint. + + :param service_name: Service name eg nova, neutron, placement etc + :type service_name: str + :param rel_name: Name of the relation to query + :type rel_name: str + :returns: Whether endpoint has changed + :rtype: bool + """ + changed = False + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + [service_name], + rel_name=rel_name) + for key, nonce in notifications.items(): + if db.get(key) != nonce: + juju_log(('New endpoint change notification found: ' + '{}={}').format(key, nonce), + 'INFO') + changed = True + break + return changed + + +def save_endpoint_changed_triggers(service_names, rel_name='identity-service'): + """Save the enpoint triggers in db so it can be tracked if they changed. + + :param service_names: List of service name. + :type service_name: List + :param rel_name: Name of the relation to query + :type rel_name: str + """ + with unitdata.HookData()() as t: + db = t[0] + notifications = get_endpoint_notifications( + service_names, + rel_name=rel_name) + for key, nonce in notifications.items(): + db.set(key, nonce) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wt') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in six.iteritems(env_vars) if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + """ + + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + if not cur_vers: + # The package has not been installed yet do not attempt upgrade + return False + if "swift" in package: + codename = get_os_codename_install_source(src) + avail_vers = get_os_version_codename_swift(codename) + else: + try: + avail_vers = get_os_version_install_source(src) + except Exception: + avail_vers = cur_vers + apt.init() + return apt.version_compare(avail_vers, cur_vers) >= 1 + + +def ensure_block_device(block_device): + ''' + Confirm block_device, create as loopback if necessary. + + :param block_device: str: Full path of block device to ensure. + + :returns: str: Full path of ensured block device. + ''' + _none = ['None', 'none', None] + if (block_device in _none): + error_out('prepare_storage(): Missing required input: block_device=%s.' + % block_device) + + if block_device.startswith('/dev/'): + bdev = block_device + elif block_device.startswith('/'): + _bd = block_device.split('|') + if len(_bd) == 2: + bdev, size = _bd + else: + bdev = block_device + size = DEFAULT_LOOPBACK_SIZE + bdev = ensure_loopback_device(bdev, size) + else: + bdev = '/dev/%s' % block_device + + if not is_block_device(bdev): + error_out('Failed to locate valid block device at %s' % bdev) + + return bdev + + +def clean_storage(block_device): + ''' + Ensures a block device is clean. That is: + - unmounted + - any lvm volume groups are deactivated + - any lvm physical device signatures removed + - partition table wiped + + :param block_device: str: Full path to block device to clean. + ''' + for mp, d in mounts(): + if d == block_device: + juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % + (d, mp), level=INFO) + umount(mp, persist=True) + + if is_lvm_physical_volume(block_device): + deactivate_lvm_volume_group(block_device) + remove_lvm_physical_volume(block_device) + else: + zap_disk(block_device) + + +is_ip = ip.is_ip +ns_query = ip.ns_query +get_host_ip = ip.get_host_ip +get_hostname = ip.get_hostname + + +def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): + mm_map = {} + if os.path.isfile(mm_file): + with open(mm_file, 'r') as f: + mm_map = json.load(f) + return mm_map + + +def sync_db_with_multi_ipv6_addresses(database, database_user, + relation_prefix=None): + hosts = get_ipv6_addr(dynamic_only=False) + + if config('vip'): + vips = config('vip').split() + for vip in vips: + if vip and is_ipv6(vip): + hosts.append(vip) + + kwargs = {'database': database, + 'username': database_user, + 'hostname': json.dumps(hosts)} + + if relation_prefix: + for key in list(kwargs.keys()): + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] + del kwargs[key] + + for rid in relation_ids('shared-db'): + relation_set(relation_id=rid, **kwargs) + + +def os_requires_version(ostack_release, pkg): + """ + Decorator for hook to specify minimum supported release + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args): + if os_release(pkg) < ostack_release: + raise Exception("This hook is not supported on releases" + " before %s" % ostack_release) + f(*args) + return wrapped_f + return wrap + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Set the state of the workload status for the charm. + + This calls _determine_os_workload_status() to get the new state, message + and sets the status using status_set() + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _determine_os_workload_status( + configs, required_interfaces, charm_func, services, ports) + status_set(state, message) + + +def _determine_os_workload_status( + configs, required_interfaces, charm_func=None, + services=None, ports=None): + """Determine the state of the workload status for the charm. + + This function returns the new workload status for the charm based + on the state of the interfaces, the paused state and whether the + services are actually running and any specified ports are open. + + This checks: + + 1. if the unit should be paused, that it is actually paused. If so the + state is 'maintenance' + message, else 'broken'. + 2. that the interfaces/relations are complete. If they are not then + it sets the state to either 'broken' or 'waiting' and an appropriate + message. + 3. If all the relation data is set, then it checks that the actual + services really are running. If not it sets the state to 'broken'. + + If everything is okay then the state returns 'active'. + + @param configs: a templating.OSConfigRenderer() object + @param required_interfaces: {generic: [specific, specific2, ...]} + @param charm_func: a callable function that returns state, message. The + signature is charm_func(configs) -> (state, message) + @param services: list of strings OR dictionary specifying services/ports + @param ports: OPTIONAL list of port numbers. + @returns state, message: the new workload status, user message + """ + state, message = _ows_check_if_paused(services, ports) + + if state is None: + state, message = _ows_check_generic_interfaces( + configs, required_interfaces) + + if state != 'maintenance' and charm_func: + # _ows_check_charm_func() may modify the state, message + state, message = _ows_check_charm_func( + state, message, lambda: charm_func(configs)) + + if state is None: + state, message = _ows_check_services_running(services, ports) + + if state is None: + state = 'active' + message = "Unit is ready" + juju_log(message, 'INFO') + + try: + if config(POLICYD_CONFIG_NAME): + message = "{} {}".format(policyd_status_message_prefix(), message) + except Exception: + pass + + return state, message + + +def _ows_check_if_paused(services=None, ports=None): + """Check if the unit is supposed to be paused, and if so check that the + services/ports (if passed) are actually stopped/not being listened to. + + If the unit isn't supposed to be paused, just return None, None + + If the unit is performing a series upgrade, return a message indicating + this. + + @param services: OPTIONAL services spec or list of service names. + @param ports: OPTIONAL list of port numbers. + @returns state, message or None, None + """ + if is_unit_upgrading_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "blocked" + message = ("Ready for do-release-upgrade and reboot. " + "Set complete when finished.") + return state, message + + if is_unit_paused_set(): + state, message = check_actually_paused(services=services, + ports=ports) + if state is None: + # we're paused okay, so set maintenance and return + state = "maintenance" + message = "Paused. Use 'resume' action to resume normal service." + return state, message + return None, None + + +def _ows_check_generic_interfaces(configs, required_interfaces): + """Check the complete contexts to determine the workload status. + + - Checks for missing or incomplete contexts + - juju log details of missing required data. + - determines the correct workload status + - creates an appropriate message for status_set(...) + + if there are no problems then the function returns None, None + + @param configs: a templating.OSConfigRenderer() object + @params required_interfaces: {generic_interface: [specific_interface], } + @returns state, message or None, None + """ + incomplete_rel_data = incomplete_relation_data(configs, + required_interfaces) + state = None + message = None + missing_relations = set() + incomplete_relations = set() + + for generic_interface, relations_states in incomplete_rel_data.items(): + related_interface = None + missing_data = {} + # Related or not? + for interface, relation_state in relations_states.items(): + if relation_state.get('related'): + related_interface = interface + missing_data = relation_state.get('missing_data') + break + # No relation ID for the generic_interface? + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + missing_relations.add(generic_interface) + else: + # Relation ID eists but no related unit + if not missing_data: + # Edge case - relation ID exists but departings + _hook_name = hook_name() + if (('departed' in _hook_name or 'broken' in _hook_name) and + related_interface in _hook_name): + state = 'blocked' + missing_relations.add(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), + "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has" + " no units in the relation." + "".format(generic_interface, related_interface), + "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in missing_relations: + incomplete_relations.add(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + return state, message + + +def _ows_check_charm_func(state, message, charm_func_with_configs): + """Run a custom check function for the charm to see if it wants to + change the state. This is only run if not in 'maintenance' and + tests to see if the new state is more important that the previous + one determined by the interfaces/relations check. + + @param state: the previously determined state so far. + @param message: the user orientated message so far. + @param charm_func: a callable function that returns state, message + @returns state, message strings. + """ + if charm_func_with_configs: + charm_state, charm_message = charm_func_with_configs() + if (charm_state != 'active' and + charm_state != 'unknown' and + charm_state is not None): + state = workload_state_compare(state, charm_state) + if message: + charm_message = charm_message.replace("Incomplete relations: ", + "") + message = "{}, {}".format(message, charm_message) + else: + message = charm_message + return state, message + + +def _ows_check_services_running(services, ports): + """Check that the services that should be running are actually running + and that any ports specified are being listened to. + + @param services: list of strings OR dictionary specifying services/ports + @param ports: list of ports + @returns state, message: strings or None, None + """ + messages = [] + state = None + if services is not None: + services = _extract_services_list_helper(services) + services_running, running = _check_running_services(services) + if not all(running): + messages.append( + "Services not running that should be: {}" + .format(", ".join(_filter_tuples(services_running, False)))) + state = 'blocked' + # also verify that the ports that should be open are open + # NB, that ServiceManager objects only OPTIONALLY have ports + map_not_open, ports_open = ( + _check_listening_on_services_ports(services)) + if not all(ports_open): + # find which service has missing ports. They are in service + # order which makes it a bit easier. + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in map_not_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "Services with ports not open that should be: {}" + .format(message)) + state = 'blocked' + + if ports is not None: + # and we can also check ports which we don't know the service for + ports_open, ports_open_bools = _check_listening_on_ports_list(ports) + if not all(ports_open_bools): + messages.append( + "Ports which should be open, but are not: {}" + .format(", ".join([str(p) for p, v in ports_open + if not v]))) + state = 'blocked' + + if state is not None: + message = "; ".join(messages) + return state, message + + return None, None + + +def _extract_services_list_helper(services): + """Extract a OrderedDict of {service: [ports]} of the supplied services + for use by the other functions. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param services: see above + @returns OrderedDict(service: [ports], ...) + """ + if services is None: + return {} + if isinstance(services, dict): + services = services.values() + # either extract the list of services from the dictionary, or if + # it is a simple string, use that. i.e. works with mixed lists. + _s = OrderedDict() + for s in services: + if isinstance(s, dict) and 'service' in s: + _s[s['service']] = s.get('ports', []) + if isinstance(s, str): + _s[s] = [] + return _s + + +def _check_running_services(services): + """Check that the services dict provided is actually running and provide + a list of (service, boolean) tuples for each service. + + Returns both a zipped list of (service, boolean) and a list of booleans + in the same order as the services. + + @param services: OrderedDict of strings: [ports], one for each service to + check. + @returns [(service, boolean), ...], : results for checks + [boolean] : just the result of the service checks + """ + services_running = [service_running(s) for s in services] + return list(zip(services, services_running)), services_running + + +def _check_listening_on_services_ports(services, test=False): + """Check that the unit is actually listening (has the port open) on the + ports that the service specifies are open. If test is True then the + function returns the services with ports that are open rather than + closed. + + Returns an OrderedDict of service: ports and a list of booleans + + @param services: OrderedDict(service: [port, ...], ...) + @param test: default=False, if False, test for closed, otherwise open. + @returns OrderedDict(service: [port-not-open, ...]...), [boolean] + """ + test = not(not(test)) # ensure test is True or False + all_ports = list(itertools.chain(*services.values())) + ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports] + map_ports = OrderedDict() + matched_ports = [p for p, opened in zip(all_ports, ports_states) + if opened == test] # essentially opened xor test + for service, ports in services.items(): + set_ports = set(ports).intersection(matched_ports) + if set_ports: + map_ports[service] = set_ports + return map_ports, ports_states + + +def _check_listening_on_ports_list(ports): + """Check that the ports list given are being listened to + + Returns a list of ports being listened to and a list of the + booleans. + + @param ports: LIST or port numbers. + @returns [(port_num, boolean), ...], [boolean] + """ + ports_open = [port_has_listener('0.0.0.0', p) for p in ports] + return zip(ports, ports_open), ports_open + + +def _filter_tuples(services_states, state): + """Return a simple list from a list of tuples according to the condition + + @param services_states: LIST of (string, boolean): service and running + state. + @param state: Boolean to match the tuple against. + @returns [LIST of strings] that matched the tuple RHS. + """ + return [s for s, b in services_states if b == state] + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [ + svc_type + for svc_type, interfaces in required_interfaces.items() + if not set(interfaces).intersection(complete_ctxts)] + return { + i: configs.get_incomplete_context_data(required_interfaces[i]) + for i in incomplete_relations} + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except Exception: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret + + +def remote_restart(rel_name, remote_service=None): + trigger = { + 'restart-trigger': str(uuid.uuid4()), + } + if remote_service: + trigger['remote-service'] = remote_service + for rid in relation_ids(rel_name): + # This subordinate can be related to two seperate services using + # different subordinate relations so only issue the restart if + # the principle is conencted down the relation we think it is + if related_units(relid=rid): + relation_set(relation_id=rid, + relation_settings=trigger, + ) + + +def check_actually_paused(services=None, ports=None): + """Check that services listed in the services object and ports + are actually closed (not listened to), to verify that the unit is + properly paused. + + @param services: See _extract_services_list_helper + @returns status, : string for status (None if okay) + message : string for problem for status_set + """ + state = None + message = None + messages = [] + if services is not None: + services = _extract_services_list_helper(services) + services_running, services_states = _check_running_services(services) + if any(services_states): + # there shouldn't be any running so this is a problem + messages.append("these services running: {}" + .format(", ".join( + _filter_tuples(services_running, True)))) + state = "blocked" + ports_open, ports_open_bools = ( + _check_listening_on_services_ports(services, True)) + if any(ports_open_bools): + message_parts = {service: ", ".join([str(v) for v in open_ports]) + for service, open_ports in ports_open.items()} + message = ", ".join( + ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) + messages.append( + "these service:ports are open: {}".format(message)) + state = 'blocked' + if ports is not None: + ports_open, bools = _check_listening_on_ports_list(ports) + if any(bools): + messages.append( + "these ports which should be closed, but are open: {}" + .format(", ".join([str(p) for p, v in ports_open if v]))) + state = 'blocked' + if messages: + message = ("Services should be paused but {}" + .format(", ".join(messages))) + return state, message + + +def set_unit_paused(): + """Set the unit to a paused state in the local kv() store. + This does NOT actually pause the unit + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', True) + + +def clear_unit_paused(): + """Clear the unit from a paused state in the local kv() store + This does NOT actually restart any services - it only clears the + local state. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-paused', False) + + +def is_unit_paused_set(): + """Return the state of the kv().get('unit-paused'). + This does NOT verify that the unit really is paused. + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-paused'))) + except Exception: + return False + + +def manage_payload_services(action, services=None, charm_func=None): + """Run an action against all services. + + An optional charm_func() can be called. It should raise an Exception to + indicate that the function failed. If it was succesfull it should return + None or an optional message. + + The signature for charm_func is: + charm_func() -> message: str + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + :param action: Action to run: pause, resume, start or stop. + :type action: str + :param services: See above + :type services: See above + :param charm_func: function to run for custom charm pausing. + :type charm_func: f() + :returns: Status boolean and list of messages + :rtype: (bool, []) + :raises: RuntimeError + """ + actions = { + 'pause': service_pause, + 'resume': service_resume, + 'start': service_start, + 'stop': service_stop} + action = action.lower() + if action not in actions.keys(): + raise RuntimeError( + "action: {} must be one of: {}".format(action, + ', '.join(actions.keys()))) + services = _extract_services_list_helper(services) + messages = [] + success = True + if services: + for service in services.keys(): + rc = actions[action](service) + if not rc: + success = False + messages.append("{} didn't {} cleanly.".format(service, + action)) + if charm_func: + try: + message = charm_func() + if message: + messages.append(message) + except Exception as e: + success = False + messages.append(str(e)) + return success, messages + + +def pause_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Pause a unit by stopping the services and setting 'unit-paused' + in the local kv() store. + + Also checks that the services have stopped and ports are no longer + being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None, None to indicate that the unit + didn't pause cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are stopped, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm pausing. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + _, messages = manage_payload_services( + 'pause', + services=services, + charm_func=charm_func) + set_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages and not is_unit_upgrading_set(): + raise Exception("Couldn't pause: {}".format("; ".join(messages))) + + +def resume_unit(assess_status_func, services=None, ports=None, + charm_func=None): + """Resume a unit by starting the services and clearning 'unit-paused' + in the local kv() store. + + Also checks that the services have started and ports are being listened to. + + An optional charm_func() can be called that can either raise an + Exception or return non None to indicate that the unit + didn't resume cleanly. + + The signature for charm_func is: + charm_func() -> message: string + + charm_func() is executed after any services are started, if supplied. + + The services object can either be: + - None : no services were passed (an empty dict is returned) + - a list of strings + - A dictionary (optionally OrderedDict) {service_name: {'service': ..}} + - An array of [{'service': service_name, ...}, ...] + + @param assess_status_func: (f() -> message: string | None) or None + @param services: OPTIONAL see above + @param ports: OPTIONAL list of port + @param charm_func: function to run for custom charm resuming. + @returns None + @raises Exception(message) on an error for action_fail(). + """ + _, messages = manage_payload_services( + 'resume', + services=services, + charm_func=charm_func) + clear_unit_paused() + if assess_status_func: + message = assess_status_func() + if message: + messages.append(message) + if messages: + raise Exception("Couldn't resume: {}".format("; ".join(messages))) + + +def make_assess_status_func(*args, **kwargs): + """Creates an assess_status_func() suitable for handing to pause_unit() + and resume_unit(). + + This uses the _determine_os_workload_status(...) function to determine + what the workload_status should be for the unit. If the unit is + not in maintenance or active states, then the message is returned to + the caller. This is so an action that doesn't result in either a + complete pause or complete resume can signal failure with an action_fail() + """ + def _assess_status_func(): + state, message = _determine_os_workload_status(*args, **kwargs) + status_set(state, message) + if state not in ['maintenance', 'active']: + return message + return None + + return _assess_status_func + + +def pausable_restart_on_change(restart_map, stopstart=False, + restart_functions=None): + """A restart_on_change decorator that checks to see if the unit is + paused. If it is paused then the decorated function doesn't fire. + + This is provided as a helper, as the @restart_on_change(...) decorator + is in core.host, yet the openstack specific helpers are in this file + (contrib.openstack.utils). Thus, this needs to be an optional feature + for openstack charms (or charms that wish to use the openstack + pause/resume type features). + + It is used as follows: + + from contrib.openstack.utils import ( + pausable_restart_on_change as restart_on_change) + + @restart_on_change(restart_map, stopstart=<boolean>) + def some_hook(...): + pass + + see core.utils.restart_on_change() for more details. + + Note restart_map can be a callable, in which case, restart_map is only + evaluated at runtime. This means that it is lazy and the underlying + function won't be called if the decorated function is never called. Note, + retains backwards compatibility for passing a non-callable dictionary. + + @param f: the function to decorate + @param restart_map: (optionally callable, which then returns the + restart_map) the restart map {conf_file: [services]} + @param stopstart: DEFAULT false; whether to stop, start or just restart + @returns decorator to use a restart_on_change with pausability + """ + def wrap(f): + # py27 compatible nonlocal variable. When py3 only, replace with + # nonlocal keyword + __restart_map_cache = {'cache': None} + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + if is_unit_paused_set(): + return f(*args, **kwargs) + if __restart_map_cache['cache'] is None: + __restart_map_cache['cache'] = restart_map() \ + if callable(restart_map) else restart_map + # otherwise, normal restart_on_change functionality + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), __restart_map_cache['cache'], + stopstart, restart_functions) + return wrapped_f + return wrap + + +def ordered(orderme): + """Converts the provided dictionary into a collections.OrderedDict. + + The items in the returned OrderedDict will be inserted based on the + natural sort order of the keys. Nested dictionaries will also be sorted + in order to ensure fully predictable ordering. + + :param orderme: the dict to order + :return: collections.OrderedDict + :raises: ValueError: if `orderme` isn't a dict instance. + """ + if not isinstance(orderme, dict): + raise ValueError('argument must be a dict type') + + result = OrderedDict() + for k, v in sorted(six.iteritems(orderme), key=lambda x: x[0]): + if isinstance(v, dict): + result[k] = ordered(v) + else: + result[k] = v + + return result + + +def config_flags_parser(config_flags): + """Parses config flags string into dict. + + This parsing method supports a few different formats for the config + flag values to be parsed: + + 1. A string in the simple format of key=value pairs, with the possibility + of specifying multiple key value pairs within the same string. For + example, a string in the format of 'key1=value1, key2=value2' will + return a dict of: + + {'key1': 'value1', 'key2': 'value2'}. + + 2. A string in the above format, but supporting a comma-delimited list + of values for the same key. For example, a string in the format of + 'key1=value1, key2=value3,value4,value5' will return a dict of: + + {'key1': 'value1', 'key2': 'value2,value3,value4'} + + 3. A string containing a colon character (:) prior to an equal + character (=) will be treated as yaml and parsed as such. This can be + used to specify more complex key value pairs. For example, + a string in the format of 'key1: subkey1=value1, subkey2=value2' will + return a dict of: + + {'key1', 'subkey1=value1, subkey2=value2'} + + The provided config_flags string may be a list of comma-separated values + which themselves may be comma-separated list of values. + """ + # If we find a colon before an equals sign then treat it as yaml. + # Note: limit it to finding the colon first since this indicates assignment + # for inline yaml. + colon = config_flags.find(':') + equals = config_flags.find('=') + if colon > 0: + if colon < equals or equals < 0: + return ordered(yaml.safe_load(config_flags)) + + if config_flags.find('==') >= 0: + juju_log("config_flags is not in expected format (key=value)", + level=ERROR) + raise OSContextError + + # strip the following from each value. + post_strippers = ' ,' + # we strip any leading/trailing '=' or ' ' from the string then + # split on '='. + split = config_flags.strip(' =').split('=') + limit = len(split) + flags = OrderedDict() + for i in range(0, limit - 1): + current = split[i] + next = split[i + 1] + vindex = next.rfind(',') + if (i == limit - 2) or (vindex < 0): + value = next + else: + value = next[:vindex] + + if i == 0: + key = current + else: + # if this not the first entry, expect an embedded key. + index = current.rfind(',') + if index < 0: + juju_log("Invalid config value(s) at index %s" % (i), + level=ERROR) + raise OSContextError + key = current[index + 1:] + + # Add to collection. + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) + + return flags + + +def os_application_version_set(package): + '''Set version of application for Juju 2.0 and later''' + application_version = get_upstream_version(package) + # NOTE(jamespage) if not able to figure out package version, fallback to + # openstack codename version detection. + if not application_version: + application_version_set(os_release(package)) + else: + application_version_set(application_version) + + +def os_application_status_set(check_function): + """Run the supplied function and set the application status accordingly. + + :param check_function: Function to run to get app states and messages. + :type check_function: function + """ + state, message = check_function() + status_set(state, message, application=True) + + +def enable_memcache(source=None, release=None, package=None): + """Determine if memcache should be enabled on the local unit + + @param release: release of OpenStack currently deployed + @param package: package to derive OpenStack version deployed + @returns boolean Whether memcache should be enabled + """ + _release = None + if release: + _release = release + else: + _release = os_release(package) + if not _release: + _release = get_os_codename_install_source(source) + + return CompareOpenStackReleases(_release) >= 'mitaka' + + +def token_cache_pkgs(source=None, release=None): + """Determine additional packages needed for token caching + + @param source: source string for charm + @param release: release of OpenStack currently deployed + @returns List of package to enable token caching + """ + packages = [] + if enable_memcache(source=source, release=release): + packages.extend(['memcached', 'python-memcache']) + return packages + + +def update_json_file(filename, items): + """Updates the json `filename` with a given dict. + :param filename: path to json file (e.g. /etc/glance/policy.json) + :param items: dict of items to update + """ + if not items: + return + + with open(filename) as fd: + policy = json.load(fd) + + # Compare before and after and if nothing has changed don't write the file + # since that could cause unnecessary service restarts. + before = json.dumps(policy, indent=4, sort_keys=True) + policy.update(items) + after = json.dumps(policy, indent=4, sort_keys=True) + if before == after: + return + + with open(filename, "w") as fd: + fd.write(after) + + +@cached +def snap_install_requested(): + """ Determine if installing from snaps + + If openstack-origin is of the form snap:track/channel[/branch] + and channel is in SNAPS_CHANNELS return True. + """ + origin = config('openstack-origin') or "" + if not origin.startswith('snap:'): + return False + + _src = origin[5:] + if '/' in _src: + channel = _src.split('/')[1] + else: + # Handle snap:track with no channel + channel = 'stable' + return valid_snap_channel(channel) + + +def get_snaps_install_info_from_origin(snaps, src, mode='classic'): + """Generate a dictionary of snap install information from origin + + @param snaps: List of snaps + @param src: String of openstack-origin or source of the form + snap:track/channel + @param mode: String classic, devmode or jailmode + @returns: Dictionary of snaps with channels and modes + """ + + if not src.startswith('snap:'): + juju_log("Snap source is not a snap origin", 'WARN') + return {} + + _src = src[5:] + channel = '--channel={}'.format(_src) + + return {snap: {'channel': channel, 'mode': mode} + for snap in snaps} + + +def install_os_snaps(snaps, refresh=False): + """Install OpenStack snaps from channel and with mode + + @param snaps: Dictionary of snaps with channels and modes of the form: + {'snap_name': {'channel': 'snap_channel', + 'mode': 'snap_mode'}} + Where channel is a snapstore channel and mode is --classic, --devmode + or --jailmode. + @param post_snap_install: Callback function to run after snaps have been + installed + """ + + def _ensure_flag(flag): + if flag.startswith('--'): + return flag + return '--{}'.format(flag) + + if refresh: + for snap in snaps.keys(): + snap_refresh(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + else: + for snap in snaps.keys(): + snap_install(snap, + _ensure_flag(snaps[snap]['channel']), + _ensure_flag(snaps[snap]['mode'])) + + +def set_unit_upgrading(): + """Set the unit to a upgrading state in the local kv() store. + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', True) + + +def clear_unit_upgrading(): + """Clear the unit from a upgrading state in the local kv() store + """ + with unitdata.HookData()() as t: + kv = t[0] + kv.set('unit-upgrading', False) + + +def is_unit_upgrading_set(): + """Return the state of the kv().get('unit-upgrading'). + + To help with units that don't have HookData() (testing) + if it excepts, return False + """ + try: + with unitdata.HookData()() as t: + kv = t[0] + # transform something truth-y into a Boolean. + return not(not(kv.get('unit-upgrading'))) + except Exception: + return False + + +def series_upgrade_prepare(pause_unit_helper=None, configs=None): + """ Run common series upgrade prepare tasks. + + :param pause_unit_helper: function: Function to pause unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + set_unit_upgrading() + if pause_unit_helper and configs: + if not is_unit_paused_set(): + pause_unit_helper(configs) + + +def series_upgrade_complete(resume_unit_helper=None, configs=None): + """ Run common series upgrade complete tasks. + + :param resume_unit_helper: function: Function to resume unit + :param configs: OSConfigRenderer object: Configurations + :returns None: + """ + clear_unit_paused() + clear_unit_upgrading() + if configs: + configs.write_all() + if resume_unit_helper: + resume_unit_helper(configs) + + +def is_db_initialised(): + """Check leader storage to see if database has been initialised. + + :returns: Whether DB has been initialised + :rtype: bool + """ + db_initialised = None + if leader_get('db-initialised') is None: + juju_log( + 'db-initialised key missing, assuming db is not initialised', + 'DEBUG') + db_initialised = False + else: + db_initialised = bool_from_string(leader_get('db-initialised')) + juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') + return db_initialised + + +def set_db_initialised(): + """Add flag to leader storage to indicate database has been initialised. + """ + juju_log('Setting db-initialised to True', 'DEBUG') + leader_set({'db-initialised': True}) + + +def is_db_maintenance_mode(relid=None): + """Check relation data from notifications of db in maintenance mode. + + :returns: Whether db has notified it is in maintenance mode. + :rtype: bool + """ + juju_log('Checking for maintenance notifications', 'DEBUG') + if relid: + r_ids = [relid] + else: + r_ids = relation_ids('shared-db') + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + notifications = [] + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + for key, value in settings.items(): + if value and key in DB_MAINTENANCE_KEYS: + juju_log( + 'Unit: {}, Key: {}, Value: {}'.format(unit, key, value), + 'DEBUG') + try: + notifications.append(bool_from_string(value)) + except ValueError: + juju_log( + 'Could not discern bool from {}'.format(value), + 'WARN') + pass + return True in notifications + + +@cached +def container_scoped_relations(): + """Get all the container scoped relations + + :returns: List of relation names + :rtype: List + """ + md = metadata() + relations = [] + for relation_type in ('provides', 'requires', 'peers'): + for relation in md.get(relation_type, []): + if md[relation_type][relation].get('scope') == 'container': + relations.append(relation) + return relations + + +def is_db_ready(use_current_context=False, rel_name=None): + """Check remote database is ready to be used. + + Database relations are expected to provide a list of 'allowed' units to + confirm that the database is ready for use by those units. + + If db relation has provided this information and local unit is a member, + returns True otherwise False. + + :param use_current_context: Whether to limit checks to current hook + context. + :type use_current_context: bool + :param rel_name: Name of relation to check + :type rel_name: string + :returns: Whether remote db is ready. + :rtype: bool + :raises: Exception + """ + key = 'allowed_units' + + rel_name = rel_name or 'shared-db' + this_unit = local_unit() + + if use_current_context: + if relation_id() in relation_ids(rel_name): + rids_units = [(None, None)] + else: + raise Exception("use_current_context=True but not in {} " + "rel hook contexts (currently in {})." + .format(rel_name, relation_id())) + else: + rids_units = [(r_id, u) + for r_id in relation_ids(rel_name) + for u in related_units(r_id)] + + for rid, unit in rids_units: + allowed_units = relation_get(rid=rid, unit=unit, attribute=key) + if allowed_units and this_unit in allowed_units.split(): + juju_log("This unit ({}) is in allowed unit list from {}".format( + this_unit, + unit), 'DEBUG') + return True + + juju_log("This unit was not found in any allowed unit list") + return False + + +def is_expected_scale(peer_relation_name='cluster'): + """Query juju goal-state to determine whether our peer- and dependency- + relations are at the expected scale. + + Useful for deferring per unit per relation housekeeping work until we are + ready to complete it successfully and without unnecessary repetiton. + + Always returns True if version of juju used does not support goal-state. + + :param peer_relation_name: Name of peer relation + :type rel_name: string + :returns: True or False + :rtype: bool + """ + def _get_relation_id(rel_type): + return next((rid for rid in relation_ids(reltype=rel_type)), None) + + Relation = namedtuple('Relation', 'rel_type rel_id') + peer_rid = _get_relation_id(peer_relation_name) + # Units with no peers should still have a peer relation. + if not peer_rid: + juju_log('Not at expected scale, no peer relation found', 'DEBUG') + return False + expected_relations = [ + Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))] + if expect_ha(): + expected_relations.append( + Relation( + rel_type='ha', + rel_id=_get_relation_id('ha'))) + juju_log( + 'Checking scale of {} relations'.format( + ','.join([r.rel_type for r in expected_relations])), + 'DEBUG') + try: + if (len(related_units(relid=peer_rid)) < + len(list(expected_peer_units()))): + return False + for rel in expected_relations: + if not rel.rel_id: + juju_log( + 'Expected to find {} relation, but it is missing'.format( + rel.rel_type), + 'DEBUG') + return False + # Goal state returns every unit even for container scoped + # relations but the charm only ever has a relation with + # the local unit. + if rel.rel_type in container_scoped_relations(): + expected_count = 1 + else: + expected_count = len( + list(expected_related_units(reltype=rel.rel_type))) + if len(related_units(relid=rel.rel_id)) < expected_count: + juju_log( + ('Not at expected scale, not enough units on {} ' + 'relation'.format(rel.rel_type)), + 'DEBUG') + return False + except NotImplementedError: + return True + juju_log('All checks have passed, unit is at expected scale', 'DEBUG') + return True + + +def get_peer_key(unit_name): + """Get the peer key for this unit. + + The peer key is the key a unit uses to publish its status down the peer + relation + + :param unit_name: Name of unit + :type unit_name: string + :returns: Peer key for given unit + :rtype: string + """ + return 'unit-state-{}'.format(unit_name.replace('/', '-')) + + +UNIT_READY = 'READY' +UNIT_NOTREADY = 'NOTREADY' +UNIT_UNKNOWN = 'UNKNOWN' +UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN] + + +def inform_peers_unit_state(state, relation_name='cluster'): + """Inform peers of the state of this unit. + + :param state: State of unit to publish + :type state: string + :param relation_name: Name of relation to publish state on + :type relation_name: string + """ + if state not in UNIT_STATES: + raise ValueError( + "Setting invalid state {} for unit".format(state)) + for r_id in relation_ids(relation_name): + relation_set(relation_id=r_id, + relation_settings={ + get_peer_key(local_unit()): state}) + + +def get_peers_unit_state(relation_name='cluster'): + """Get the state of all peers. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Unit states keyed on unit name. + :rtype: dict + :raises: ValueError + """ + r_ids = relation_ids(relation_name) + rids_units = [(r, u) for r in r_ids for u in related_units(r)] + unit_states = {} + for r_id, unit in rids_units: + settings = relation_get(unit=unit, rid=r_id) + unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN) + if unit_states[unit] not in UNIT_STATES: + raise ValueError( + "Unit in unknown state {}".format(unit_states[unit])) + return unit_states + + +def are_peers_ready(relation_name='cluster'): + """Check if all peers are ready. + + :param relation_name: Name of relation to check peers on. + :type relation_name: string + :returns: Whether all units are ready. + :rtype: bool + """ + unit_states = get_peers_unit_state(relation_name) + return all(v == UNIT_READY for v in unit_states.values()) + + +def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'): + """Inform peers if this unit is ready. + + The check function should return a tuple (state, message). A state + of 'READY' indicates the unit is READY. + + :param check_unit_ready_func: Function to run to check readiness + :type check_unit_ready_func: function + :param relation_name: Name of relation to check peers on. + :type relation_name: string + """ + unit_ready, msg = check_unit_ready_func() + if unit_ready: + state = UNIT_READY + else: + state = UNIT_NOTREADY + juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG') + inform_peers_unit_state(state, relation_name) + + +def check_api_unit_ready(check_db_ready=True): + """Check if this unit is ready. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Whether unit state is ready and status message + :rtype: (bool, str) + """ + unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready) + return unit_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_unit_status(check_db_ready=True): + """Return a workload status and message for this unit. + + :param check_db_ready: Include checks of database readiness. + :type check_db_ready: bool + :returns: Workload state and message + :rtype: (bool, str) + """ + unit_state = WORKLOAD_STATES.ACTIVE + msg = 'Unit is ready' + if is_db_maintenance_mode(): + unit_state = WORKLOAD_STATES.MAINTENANCE + msg = 'Database in maintenance mode.' + elif is_unit_paused_set(): + unit_state = WORKLOAD_STATES.BLOCKED + msg = 'Unit paused.' + elif check_db_ready and not is_db_ready(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Allowed_units list provided but this unit not present' + elif not is_db_initialised(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Database not initialised' + elif not is_expected_scale(): + unit_state = WORKLOAD_STATES.WAITING + msg = 'Charm and its dependencies not yet at expected scale' + juju_log(msg, 'DEBUG') + return unit_state, msg + + +def check_api_application_ready(): + """Check if this application is ready. + + :returns: Whether application state is ready and status message + :rtype: (bool, str) + """ + app_state, msg = get_api_application_status() + return app_state == WORKLOAD_STATES.ACTIVE, msg + + +def get_api_application_status(): + """Return a workload status and message for this application. + + :returns: Workload state and message + :rtype: (bool, str) + """ + app_state, msg = get_api_unit_status() + if app_state == WORKLOAD_STATES.ACTIVE: + if are_peers_ready(): + return WORKLOAD_STATES.ACTIVE, 'Application Ready' + else: + return WORKLOAD_STATES.WAITING, 'Some units are not ready' + return app_state, msg diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/vaultlocker.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/vaultlocker.py new file mode 100644 index 00000000..4ee6c1db --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/openstack/vaultlocker.py @@ -0,0 +1,179 @@ +# Copyright 2018 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import charmhelpers.contrib.openstack.alternatives as alternatives +import charmhelpers.contrib.openstack.context as context + +import charmhelpers.core.hookenv as hookenv +import charmhelpers.core.host as host +import charmhelpers.core.templating as templating +import charmhelpers.core.unitdata as unitdata + +VAULTLOCKER_BACKEND = 'charm-vaultlocker' + + +class VaultKVContext(context.OSContextGenerator): + """Vault KV context for interaction with vault-kv interfaces""" + interfaces = ['secrets-storage'] + + def __init__(self, secret_backend=None): + super(context.OSContextGenerator, self).__init__() + self.secret_backend = ( + secret_backend or 'charm-{}'.format(hookenv.service_name()) + ) + + def __call__(self): + try: + import hvac + except ImportError: + # BUG: #1862085 - if the relation is made to vault, but the + # 'encrypt' option is not made, then the charm errors with an + # import warning. This catches that, logs a warning, and returns + # with an empty context. + hookenv.log("VaultKVContext: trying to use hvac pythong module " + "but it's not available. Is secrets-stroage relation " + "made, but encrypt option not set?", + level=hookenv.WARNING) + # return an emptry context on hvac import error + return {} + ctxt = {} + # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 + db = unitdata.kv() + # currently known-good secret-id + secret_id = db.get('secret-id') + + for relation_id in hookenv.relation_ids(self.interfaces[0]): + for unit in hookenv.related_units(relation_id): + data = hookenv.relation_get(unit=unit, + rid=relation_id) + vault_url = data.get('vault_url') + role_id = data.get('{}_role_id'.format(hookenv.local_unit())) + token = data.get('{}_token'.format(hookenv.local_unit())) + + if all([vault_url, role_id, token]): + token = json.loads(token) + vault_url = json.loads(vault_url) + + # Tokens may change when secret_id's are being + # reissued - if so use token to get new secret_id + token_success = False + try: + secret_id = retrieve_secret_id( + url=vault_url, + token=token + ) + token_success = True + except hvac.exceptions.InvalidRequest: + # Try next + pass + + if token_success: + db.set('secret-id', secret_id) + db.flush() + + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + self.complete = True + break + else: + if secret_id: + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + if self.complete: + break + + if ctxt: + self.complete = True + + return ctxt + + +def write_vaultlocker_conf(context, priority=100): + """Write vaultlocker configuration to disk and install alternative + + :param context: Dict of data from vault-kv relation + :ptype: context: dict + :param priority: Priority of alternative configuration + :ptype: priority: int""" + charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format( + hookenv.service_name() + ) + host.mkdir(os.path.dirname(charm_vl_path), perms=0o700) + templating.render(source='vaultlocker.conf.j2', + target=charm_vl_path, + context=context, perms=0o600), + alternatives.install_alternative('vaultlocker.conf', + '/etc/vaultlocker/vaultlocker.conf', + charm_vl_path, priority) + + +def vault_relation_complete(backend=None): + """Determine whether vault relation is complete + + :param backend: Name of secrets backend requested + :ptype backend: string + :returns: whether the relation to vault is complete + :rtype: bool""" + try: + import hvac + except ImportError: + return False + try: + vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND) + vault_kv() + return vault_kv.complete + except hvac.exceptions.InvalidRequest: + return False + + +# TODO: contrib a high level unwrap method to hvac that works +def retrieve_secret_id(url, token): + """Retrieve a response-wrapped secret_id from Vault + + :param url: URL to Vault Server + :ptype url: str + :param token: One shot Token to use + :ptype token: str + :returns: secret_id to use for Vault Access + :rtype: str""" + import hvac + try: + # hvac 0.10.1 changed default adapter to JSONAdapter + client = hvac.Client(url=url, token=token, adapter=hvac.adapters.Request) + except AttributeError: + # hvac < 0.6.2 doesn't have adapter but uses the same response interface + client = hvac.Client(url=url, token=token) + else: + # hvac < 0.9.2 assumes adapter is an instance, so doesn't instantiate + if not isinstance(client.adapter, hvac.adapters.Request): + client.adapter = hvac.adapters.Request(base_uri=url, token=token) + response = client._post('/v1/sys/wrapping/unwrap') + if response.status_code == 200: + data = response.json() + return data['data']['secret_id'] diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/peerstorage/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/peerstorage/__init__.py new file mode 100644 index 00000000..a8fa60c2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/peerstorage/__init__.py @@ -0,0 +1,267 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import six + +from charmhelpers.core.hookenv import relation_id as current_relation_id +from charmhelpers.core.hookenv import ( + is_relation_made, + relation_ids, + relation_get as _relation_get, + local_unit, + relation_set as _relation_set, + leader_get as _leader_get, + leader_set, + is_leader, +) + + +""" +This helper provides functions to support use of a peer relation +for basic key/value storage, with the added benefit that all storage +can be replicated across peer units. + +Requirement to use: + +To use this, the "peer_echo()" method has to be called form the peer +relation's relation-changed hook: + +@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name +def cluster_relation_changed(): + peer_echo() + +Once this is done, you can use peer storage from anywhere: + +@hooks.hook("some-hook") +def some_hook(): + # You can store and retrieve key/values this way: + if is_relation_made("cluster"): # from charmhelpers.core.hookenv + # There are peers available so we can work with peer storage + peer_store("mykey", "myvalue") + value = peer_retrieve("mykey") + print value + else: + print "No peers joind the relation, cannot share key/values :(" +""" + + +def leader_get(attribute=None, rid=None): + """Wrapper to ensure that settings are migrated from the peer relation. + + This is to support upgrading an environment that does not support + Juju leadership election to one that does. + + If a setting is not extant in the leader-get but is on the relation-get + peer rel, it is migrated and marked as such so that it is not re-migrated. + """ + migration_key = '__leader_get_migrated_settings__' + if not is_leader(): + return _leader_get(attribute=attribute) + + settings_migrated = False + leader_settings = _leader_get(attribute=attribute) + previously_migrated = _leader_get(attribute=migration_key) + + if previously_migrated: + migrated = set(json.loads(previously_migrated)) + else: + migrated = set([]) + + try: + if migration_key in leader_settings: + del leader_settings[migration_key] + except TypeError: + pass + + if attribute: + if attribute in migrated: + return leader_settings + + # If attribute not present in leader db, check if this unit has set + # the attribute in the peer relation + if not leader_settings: + peer_setting = _relation_get(attribute=attribute, unit=local_unit(), + rid=rid) + if peer_setting: + leader_set(settings={attribute: peer_setting}) + leader_settings = peer_setting + + if leader_settings: + settings_migrated = True + migrated.add(attribute) + else: + r_settings = _relation_get(unit=local_unit(), rid=rid) + if r_settings: + for key in set(r_settings.keys()).difference(migrated): + # Leader setting wins + if not leader_settings.get(key): + leader_settings[key] = r_settings[key] + + settings_migrated = True + migrated.add(key) + + if settings_migrated: + leader_set(**leader_settings) + + if migrated and settings_migrated: + migrated = json.dumps(list(migrated)) + leader_set(settings={migration_key: migrated}) + + return leader_settings + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Attempt to use leader-set if supported in the current version of Juju, + otherwise falls back on relation-set. + + Note that we only attempt to use leader-set if the provided relation_id is + a peer relation id or no relation id is provided (in which case we assume + we are within the peer relation context). + """ + try: + if relation_id in relation_ids('cluster'): + return leader_set(settings=relation_settings, **kwargs) + else: + raise NotImplementedError + except NotImplementedError: + return _relation_set(relation_id=relation_id, + relation_settings=relation_settings, **kwargs) + + +def relation_get(attribute=None, unit=None, rid=None): + """Attempt to use leader-get if supported in the current version of Juju, + otherwise falls back on relation-get. + + Note that we only attempt to use leader-get if the provided rid is a peer + relation id or no relation id is provided (in which case we assume we are + within the peer relation context). + """ + try: + if rid in relation_ids('cluster'): + return leader_get(attribute, rid) + else: + raise NotImplementedError + except NotImplementedError: + return _relation_get(attribute=attribute, rid=rid, unit=unit) + + +def peer_retrieve(key, relation_name='cluster'): + """Retrieve a named key from peer relation `relation_name`.""" + cluster_rels = relation_ids(relation_name) + if len(cluster_rels) > 0: + cluster_rid = cluster_rels[0] + return relation_get(attribute=key, rid=cluster_rid, + unit=local_unit()) + else: + raise ValueError('Unable to detect' + 'peer relation {}'.format(relation_name)) + + +def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_', + inc_list=None, exc_list=None): + """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """ + inc_list = inc_list if inc_list else [] + exc_list = exc_list if exc_list else [] + peerdb_settings = peer_retrieve('-', relation_name=relation_name) + matched = {} + if peerdb_settings is None: + return matched + for k, v in peerdb_settings.items(): + full_prefix = prefix + delimiter + if k.startswith(full_prefix): + new_key = k.replace(full_prefix, '') + if new_key in exc_list: + continue + if new_key in inc_list or len(inc_list) == 0: + matched[new_key] = v + return matched + + +def peer_store(key, value, relation_name='cluster'): + """Store the key/value pair on the named peer relation `relation_name`.""" + cluster_rels = relation_ids(relation_name) + if len(cluster_rels) > 0: + cluster_rid = cluster_rels[0] + relation_set(relation_id=cluster_rid, + relation_settings={key: value}) + else: + raise ValueError('Unable to detect ' + 'peer relation {}'.format(relation_name)) + + +def peer_echo(includes=None, force=False): + """Echo filtered attributes back onto the same relation for storage. + + This is a requirement to use the peerstorage module - it needs to be called + from the peer relation's changed hook. + + If Juju leader support exists this will be a noop unless force is True. + """ + try: + is_leader() + except NotImplementedError: + pass + else: + if not force: + return # NOOP if leader-election is supported + + # Use original non-leader calls + relation_get = _relation_get + relation_set = _relation_set + + rdata = relation_get() + echo_data = {} + if includes is None: + echo_data = rdata.copy() + for ex in ['private-address', 'public-address']: + if ex in echo_data: + echo_data.pop(ex) + else: + for attribute, value in six.iteritems(rdata): + for include in includes: + if include in attribute: + echo_data[attribute] = value + if len(echo_data) > 0: + relation_set(relation_settings=echo_data) + + +def peer_store_and_set(relation_id=None, peer_relation_name='cluster', + peer_store_fatal=False, relation_settings=None, + delimiter='_', **kwargs): + """Store passed-in arguments both in argument relation and in peer storage. + + It functions like doing relation_set() and peer_store() at the same time, + with the same data. + + @param relation_id: the id of the relation to store the data on. Defaults + to the current relation. + @param peer_store_fatal: Set to True, the function will raise an exception + should the peer storage not be available.""" + + relation_settings = relation_settings if relation_settings else {} + relation_set(relation_id=relation_id, + relation_settings=relation_settings, + **kwargs) + if is_relation_made(peer_relation_name): + for key, value in six.iteritems(dict(list(kwargs.items()) + + list(relation_settings.items()))): + key_prefix = relation_id or current_relation_id() + peer_store(key_prefix + delimiter + key, + value, + relation_name=peer_relation_name) + else: + if peer_store_fatal: + raise ValueError('Unable to detect ' + 'peer relation {}'.format(peer_relation_name)) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/python.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/python.py new file mode 100644 index 00000000..84cba8c4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/python.py @@ -0,0 +1,21 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +# deprecated aliases for backwards compatibility +from charmhelpers.fetch.python import debug # noqa +from charmhelpers.fetch.python import packages # noqa +from charmhelpers.fetch.python import rpdb # noqa +from charmhelpers.fetch.python import version # noqa diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/saltstack/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/saltstack/__init__.py new file mode 100644 index 00000000..d74f4039 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/saltstack/__init__.py @@ -0,0 +1,116 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Charm Helpers saltstack - declare the state of your machines. + +This helper enables you to declare your machine state, rather than +program it procedurally (and have to test each change to your procedures). +Your install hook can be as simple as:: + + {{{ + from charmhelpers.contrib.saltstack import ( + install_salt_support, + update_machine_state, + ) + + + def install(): + install_salt_support() + update_machine_state('machine_states/dependencies.yaml') + update_machine_state('machine_states/installed.yaml') + }}} + +and won't need to change (nor will its tests) when you change the machine +state. + +It's using a python package called salt-minion which allows various formats for +specifying resources, such as:: + + {{{ + /srv/{{ basedir }}: + file.directory: + - group: ubunet + - user: ubunet + - require: + - user: ubunet + - recurse: + - user + - group + + ubunet: + group.present: + - gid: 1500 + user.present: + - uid: 1500 + - gid: 1500 + - createhome: False + - require: + - group: ubunet + }}} + +The docs for all the different state definitions are at: + http://docs.saltstack.com/ref/states/all/ + + +TODO: + * Add test helpers which will ensure that machine state definitions + are functionally (but not necessarily logically) correct (ie. getting + salt to parse all state defs. + * Add a link to a public bootstrap charm example / blogpost. + * Find a way to obviate the need to use the grains['charm_dir'] syntax + in templates. +""" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers <juju@lists.ubuntu.com> +import subprocess + +import charmhelpers.contrib.templating.contexts +import charmhelpers.core.host +import charmhelpers.core.hookenv + + +salt_grains_path = '/etc/salt/grains' + + +def install_salt_support(from_ppa=True): + """Installs the salt-minion helper for machine state. + + By default the salt-minion package is installed from + the saltstack PPA. If from_ppa is False you must ensure + that the salt-minion package is available in the apt cache. + """ + if from_ppa: + subprocess.check_call([ + '/usr/bin/add-apt-repository', + '--yes', + 'ppa:saltstack/salt', + ]) + subprocess.check_call(['/usr/bin/apt-get', 'update']) + # We install salt-common as salt-minion would run the salt-minion + # daemon. + charmhelpers.fetch.apt_install('salt-common') + + +def update_machine_state(state_path): + """Update the machine state using the provided state declaration.""" + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + salt_grains_path) + subprocess.check_call([ + 'salt-call', + '--local', + 'state.template', + state_path, + ]) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ssl/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ssl/__init__.py new file mode 100644 index 00000000..1d238b52 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ssl/__init__.py @@ -0,0 +1,92 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from charmhelpers.core import hookenv + + +def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None): + """Generate selfsigned SSL keypair + + You must provide one of the 3 optional arguments: + config, subject or cn + If more than one is provided the leftmost will be used + + Arguments: + keyfile -- (required) full path to the keyfile to be created + certfile -- (required) full path to the certfile to be created + keysize -- (optional) SSL key length + config -- (optional) openssl configuration file + subject -- (optional) dictionary with SSL subject variables + cn -- (optional) cerfificate common name + + Required keys in subject dict: + cn -- Common name (eq. FQDN) + + Optional keys in subject dict + country -- Country Name (2 letter code) + state -- State or Province Name (full name) + locality -- Locality Name (eg, city) + organization -- Organization Name (eg, company) + organizational_unit -- Organizational Unit Name (eg, section) + email -- Email Address + """ + + cmd = [] + if config: + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-config", config] + elif subject: + ssl_subject = "" + if "country" in subject: + ssl_subject = ssl_subject + "/C={}".format(subject["country"]) + if "state" in subject: + ssl_subject = ssl_subject + "/ST={}".format(subject["state"]) + if "locality" in subject: + ssl_subject = ssl_subject + "/L={}".format(subject["locality"]) + if "organization" in subject: + ssl_subject = ssl_subject + "/O={}".format(subject["organization"]) + if "organizational_unit" in subject: + ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"]) + if "cn" in subject: + ssl_subject = ssl_subject + "/CN={}".format(subject["cn"]) + else: + hookenv.log("When using \"subject\" argument you must " + "provide \"cn\" field at very least") + return False + if "email" in subject: + ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"]) + + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-subj", ssl_subject] + elif cn: + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-subj", "/CN={}".format(cn)] + + if not cmd: + hookenv.log("No config, subject or cn provided," + "unable to generate self signed SSL certificates") + return False + try: + subprocess.check_call(cmd) + return True + except Exception as e: + print("Execution of openssl command failed:\n{}".format(e)) + return False diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ssl/service.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ssl/service.py new file mode 100644 index 00000000..06b534ff --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/ssl/service.py @@ -0,0 +1,277 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from os.path import join as path_join +from os.path import exists +import subprocess + +from charmhelpers.core.hookenv import log, DEBUG + +STD_CERT = "standard" + +# Mysql server is fairly picky about cert creation +# and types, spec its creation separately for now. +MYSQL_CERT = "mysql" + + +class ServiceCA(object): + + default_expiry = str(365 * 2) + default_ca_expiry = str(365 * 6) + + def __init__(self, name, ca_dir, cert_type=STD_CERT): + self.name = name + self.ca_dir = ca_dir + self.cert_type = cert_type + + ############### + # Hook Helper API + @staticmethod + def get_ca(type=STD_CERT): + service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] + ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca') + ca = ServiceCA(service_name, ca_path, type) + ca.init() + return ca + + @classmethod + def get_service_cert(cls, type=STD_CERT): + service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] + ca = cls.get_ca() + crt, key = ca.get_or_create_cert(service_name) + return crt, key, ca.get_ca_bundle() + + ############### + + def init(self): + log("initializing service ca", level=DEBUG) + if not exists(self.ca_dir): + self._init_ca_dir(self.ca_dir) + self._init_ca() + + @property + def ca_key(self): + return path_join(self.ca_dir, 'private', 'cacert.key') + + @property + def ca_cert(self): + return path_join(self.ca_dir, 'cacert.pem') + + @property + def ca_conf(self): + return path_join(self.ca_dir, 'ca.cnf') + + @property + def signing_conf(self): + return path_join(self.ca_dir, 'signing.cnf') + + def _init_ca_dir(self, ca_dir): + os.mkdir(ca_dir) + for i in ['certs', 'crl', 'newcerts', 'private']: + sd = path_join(ca_dir, i) + if not exists(sd): + os.mkdir(sd) + + if not exists(path_join(ca_dir, 'serial')): + with open(path_join(ca_dir, 'serial'), 'w') as fh: + fh.write('02\n') + + if not exists(path_join(ca_dir, 'index.txt')): + with open(path_join(ca_dir, 'index.txt'), 'w') as fh: + fh.write('') + + def _init_ca(self): + """Generate the root ca's cert and key. + """ + if not exists(path_join(self.ca_dir, 'ca.cnf')): + with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh: + fh.write( + CA_CONF_TEMPLATE % (self.get_conf_variables())) + + if not exists(path_join(self.ca_dir, 'signing.cnf')): + with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh: + fh.write( + SIGNING_CONF_TEMPLATE % (self.get_conf_variables())) + + if exists(self.ca_cert) or exists(self.ca_key): + raise RuntimeError("Initialized called when CA already exists") + cmd = ['openssl', 'req', '-config', self.ca_conf, + '-x509', '-nodes', '-newkey', 'rsa', + '-days', self.default_ca_expiry, + '-keyout', self.ca_key, '-out', self.ca_cert, + '-outform', 'PEM'] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + log("CA Init:\n %s" % output, level=DEBUG) + + def get_conf_variables(self): + return dict( + org_name="juju", + org_unit_name="%s service" % self.name, + common_name=self.name, + ca_dir=self.ca_dir) + + def get_or_create_cert(self, common_name): + if common_name in self: + return self.get_certificate(common_name) + return self.create_certificate(common_name) + + def create_certificate(self, common_name): + if common_name in self: + return self.get_certificate(common_name) + key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name) + self._create_certificate(common_name, key_p, csr_p, crt_p) + return self.get_certificate(common_name) + + def get_certificate(self, common_name): + if common_name not in self: + raise ValueError("No certificate for %s" % common_name) + key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + with open(crt_p) as fh: + crt = fh.read() + with open(key_p) as fh: + key = fh.read() + return crt, key + + def __contains__(self, common_name): + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + return exists(crt_p) + + def _create_certificate(self, common_name, key_p, csr_p, crt_p): + template_vars = self.get_conf_variables() + template_vars['common_name'] = common_name + subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % ( + template_vars) + + log("CA Create Cert %s" % common_name, level=DEBUG) + cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048', + '-nodes', '-days', self.default_expiry, + '-keyout', key_p, '-out', csr_p, '-subj', subj] + subprocess.check_call(cmd, stderr=subprocess.PIPE) + cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p] + subprocess.check_call(cmd, stderr=subprocess.PIPE) + + log("CA Sign Cert %s" % common_name, level=DEBUG) + if self.cert_type == MYSQL_CERT: + cmd = ['openssl', 'x509', '-req', + '-in', csr_p, '-days', self.default_expiry, + '-CA', self.ca_cert, '-CAkey', self.ca_key, + '-set_serial', '01', '-out', crt_p] + else: + cmd = ['openssl', 'ca', '-config', self.signing_conf, + '-extensions', 'req_extensions', + '-days', self.default_expiry, '-notext', + '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch'] + log("running %s" % " ".join(cmd), level=DEBUG) + subprocess.check_call(cmd, stderr=subprocess.PIPE) + + def get_ca_bundle(self): + with open(self.ca_cert) as fh: + return fh.read() + + +CA_CONF_TEMPLATE = """ +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = %(ca_dir)s +policy = policy_match +database = $dir/index.txt +serial = $dir/serial +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +certificate = $dir/cacert.pem +private_key = $dir/private/cacert.key +RANDFILE = $dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = ca_distinguished_name + +x509_extensions = ca_extensions + +[ ca_distinguished_name ] +organizationName = %(org_name)s +organizationalUnitName = %(org_unit_name)s Certificate Authority + + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ ca_extensions ] +basicConstraints = critical,CA:true +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = cRLSign, keyCertSign +""" + + +SIGNING_CONF_TEMPLATE = """ +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = %(ca_dir)s +policy = policy_match +database = $dir/index.txt +serial = $dir/serial +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +certificate = $dir/cacert.pem +private_key = $dir/private/cacert.key +RANDFILE = $dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = req_distinguished_name + +x509_extensions = req_extensions + +[ req_distinguished_name ] +organizationName = %(org_name)s +organizationalUnitName = %(org_unit_name)s machine resources +commonName = %(common_name)s + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ req_extensions ] +basicConstraints = CA:false +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = digitalSignature, keyEncipherment, keyAgreement +extendedKeyUsage = serverAuth, clientAuth +""" diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/bcache.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/bcache.py new file mode 100644 index 00000000..605991e1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/bcache.py @@ -0,0 +1,74 @@ +# Copyright 2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import json + +from charmhelpers.core.hookenv import log + +stats_intervals = ['stats_day', 'stats_five_minute', + 'stats_hour', 'stats_total'] + +SYSFS = '/sys' + + +class Bcache(object): + """Bcache behaviour + """ + + def __init__(self, cachepath): + self.cachepath = cachepath + + @classmethod + def fromdevice(cls, devname): + return cls('{}/block/{}/bcache'.format(SYSFS, devname)) + + def __str__(self): + return self.cachepath + + def get_stats(self, interval): + """Get cache stats + """ + intervaldir = 'stats_{}'.format(interval) + path = "{}/{}".format(self.cachepath, intervaldir) + out = dict() + for elem in os.listdir(path): + out[elem] = open('{}/{}'.format(path, elem)).read().strip() + return out + + +def get_bcache_fs(): + """Return all cache sets + """ + cachesetroot = "{}/fs/bcache".format(SYSFS) + try: + dirs = os.listdir(cachesetroot) + except OSError: + log("No bcache fs found") + return [] + cacheset = set([Bcache('{}/{}'.format(cachesetroot, d)) for d in dirs if not d.startswith('register')]) + return cacheset + + +def get_stats_action(cachespec, interval): + """Action for getting bcache statistics for a given cachespec. + Cachespec can either be a device name, eg. 'sdb', which will retrieve + cache stats for the given device, or 'global', which will retrieve stats + for all cachesets + """ + if cachespec == 'global': + caches = get_bcache_fs() + else: + caches = [Bcache.fromdevice(cachespec)] + res = dict((c.cachepath, c.get_stats(interval)) for c in caches) + return json.dumps(res, indent=4, separators=(',', ': ')) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/ceph.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/ceph.py new file mode 100644 index 00000000..814d5c72 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/ceph.py @@ -0,0 +1,1810 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page <james.page@ubuntu.com> +# Adam Gandelman <adamg@ubuntu.com> +# + +import collections +import errno +import hashlib +import math +import six + +import os +import shutil +import json +import time +import uuid + +from subprocess import ( + check_call, + check_output, + CalledProcessError, +) +from charmhelpers.core.hookenv import ( + config, + service_name, + local_unit, + relation_get, + relation_ids, + relation_set, + related_units, + log, + DEBUG, + INFO, + WARNING, + ERROR, +) +from charmhelpers.core.host import ( + mount, + mounts, + service_start, + service_stop, + service_running, + umount, + cmp_pkgrevno, +) +from charmhelpers.fetch import ( + apt_install, +) +from charmhelpers.core.unitdata import kv + +from charmhelpers.core.kernel import modprobe +from charmhelpers.contrib.openstack.utils import config_flags_parser + +KEYRING = '/etc/ceph/ceph.client.{}.keyring' +KEYFILE = '/etc/ceph/ceph.client.{}.key' + +CEPH_CONF = """[global] +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} +""" + +# The number of placement groups per OSD to target for placement group +# calculations. This number is chosen as 100 due to the ceph PG Calc +# documentation recommending to choose 100 for clusters which are not +# expected to increase in the foreseeable future. Since the majority of the +# calculations are done on deployment, target the case of non-expanding +# clusters as the default. +DEFAULT_PGS_PER_OSD_TARGET = 100 +DEFAULT_POOL_WEIGHT = 10.0 +LEGACY_PG_COUNT = 200 +DEFAULT_MINIMUM_PGS = 2 +AUTOSCALER_DEFAULT_PGS = 32 + + +class OsdPostUpgradeError(Exception): + """Error class for OSD post-upgrade operations.""" + pass + + +class OSDSettingConflict(Exception): + """Error class for conflicting osd setting requests.""" + pass + + +class OSDSettingNotAllowed(Exception): + """Error class for a disallowed setting.""" + pass + + +OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed) + +OSD_SETTING_WHITELIST = [ + 'osd heartbeat grace', + 'osd heartbeat interval', +] + + +def _order_dict_by_key(rdict): + """Convert a dictionary into an OrderedDict sorted by key. + + :param rdict: Dictionary to be ordered. + :type rdict: dict + :returns: Ordered Dictionary. + :rtype: collections.OrderedDict + """ + return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0])) + + +def get_osd_settings(relation_name): + """Consolidate requested osd settings from all clients. + + Consolidate requested osd settings from all clients. Check that the + requested setting is on the whitelist and it does not conflict with + any other requested settings. + + :returns: Dictionary of settings + :rtype: dict + + :raises: OSDSettingNotAllowed + :raises: OSDSettingConflict + """ + rel_ids = relation_ids(relation_name) + osd_settings = {} + for relid in rel_ids: + for unit in related_units(relid): + unit_settings = relation_get('osd-settings', unit, relid) or '{}' + unit_settings = json.loads(unit_settings) + for key, value in unit_settings.items(): + if key not in OSD_SETTING_WHITELIST: + msg = 'Illegal settings "{}"'.format(key) + raise OSDSettingNotAllowed(msg) + if key in osd_settings: + if osd_settings[key] != unit_settings[key]: + msg = 'Conflicting settings for "{}"'.format(key) + raise OSDSettingConflict(msg) + else: + osd_settings[key] = value + return _order_dict_by_key(osd_settings) + + +def send_osd_settings(): + """Pass on requested OSD settings to osd units.""" + try: + settings = get_osd_settings('client') + except OSD_SETTING_EXCEPTIONS as e: + # There is a problem with the settings, not passing them on. Update + # status will notify the user. + log(e, level=ERROR) + return + data = { + 'osd-settings': json.dumps(settings, sort_keys=True)} + for relid in relation_ids('osd'): + relation_set(relation_id=relid, + relation_settings=data) + + +def validator(value, valid_type, valid_range=None): + """ + Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values + Example input: + validator(value=1, + valid_type=int, + valid_range=[0, 2]) + This says I'm testing value=1. It must be an int inclusive in [0,2] + + :param value: The value to validate + :param valid_type: The type that value should be. + :param valid_range: A range of values that value can assume. + :return: + """ + assert isinstance(value, valid_type), "{} is not a {}".format( + value, + valid_type) + if valid_range is not None: + assert isinstance(valid_range, list), \ + "valid_range must be a list, was given {}".format(valid_range) + # If we're dealing with strings + if isinstance(value, six.string_types): + assert value in valid_range, \ + "{} is not in the list {}".format(value, valid_range) + # Integer, float should have a min and max + else: + if len(valid_range) != 2: + raise ValueError( + "Invalid valid_range list of {} for {}. " + "List must be [min,max]".format(valid_range, value)) + assert value >= valid_range[0], \ + "{} is less than minimum allowed value of {}".format( + value, valid_range[0]) + assert value <= valid_range[1], \ + "{} is greater than maximum allowed value of {}".format( + value, valid_range[1]) + + +class PoolCreationError(Exception): + """ + A custom error to inform the caller that a pool creation failed. Provides an error message + """ + + def __init__(self, message): + super(PoolCreationError, self).__init__(message) + + +class Pool(object): + """ + An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. + Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). + """ + + def __init__(self, service, name): + self.service = service + self.name = name + + # Create the pool if it doesn't exist already + # To be implemented by subclasses + def create(self): + pass + + def add_cache_tier(self, cache_pool, mode): + """ + Adds a new cache tier to an existing pool. + :param cache_pool: six.string_types. The cache tier pool name to add. + :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] + :return: None + """ + # Check the input types and values + validator(value=cache_pool, valid_type=six.string_types) + validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) + + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) + check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) + + def remove_cache_tier(self, cache_pool): + """ + Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. + :param cache_pool: six.string_types. The cache tier pool name to remove. + :return: None + """ + # read-only is easy, writeback is much harder + mode = get_cache_mode(self.service, cache_pool) + if mode == 'readonly': + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + elif mode == 'writeback': + pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', + 'cache-mode', cache_pool, 'forward'] + if cmp_pkgrevno('ceph-common', '10.1') >= 0: + # Jewel added a mandatory flag + pool_forward_cmd.append('--yes-i-really-mean-it') + + check_call(pool_forward_cmd) + # Flush the cache and wait for it to return + check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) + check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) + + def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT, + device_class=None): + """Return the number of placement groups to use when creating the pool. + + Returns the number of placement groups which should be specified when + creating the pool. This is based upon the calculation guidelines + provided by the Ceph Placement Group Calculator (located online at + http://ceph.com/pgcalc/). + + The number of placement groups are calculated using the following: + + (Target PGs per OSD) * (OSD #) * (%Data) + ---------------------------------------- + (Pool size) + + Per the upstream guidelines, the OSD # should really be considered + based on the number of OSDs which are eligible to be selected by the + pool. Since the pool creation doesn't specify any of CRUSH set rules, + the default rule will be dependent upon the type of pool being + created (replicated or erasure). + + This code makes no attempt to determine the number of OSDs which can be + selected for the specific rule, rather it is left to the user to tune + in the form of 'expected-osd-count' config option. + + :param pool_size: int. pool_size is either the number of replicas for + replicated pools or the K+M sum for erasure coded pools + :param percent_data: float. the percentage of data that is expected to + be contained in the pool for the specific OSD set. Default value + is to assume 10% of the data is for this pool, which is a + relatively low % of the data but allows for the pg_num to be + increased. NOTE: the default is primarily to handle the scenario + where related charms requiring pools has not been upgraded to + include an update to indicate their relative usage of the pools. + :param device_class: str. class of storage to use for basis of pgs + calculation; ceph supports nvme, ssd and hdd by default based + on presence of devices of each type in the deployment. + :return: int. The number of pgs to use. + """ + + # Note: This calculation follows the approach that is provided + # by the Ceph PG Calculator located at http://ceph.com/pgcalc/. + validator(value=pool_size, valid_type=int) + + # Ensure that percent data is set to something - even with a default + # it can be set to None, which would wreak havoc below. + if percent_data is None: + percent_data = DEFAULT_POOL_WEIGHT + + # If the expected-osd-count is specified, then use the max between + # the expected-osd-count and the actual osd_count + osd_list = get_osds(self.service, device_class) + expected = config('expected-osd-count') or 0 + + if osd_list: + if device_class: + osd_count = len(osd_list) + else: + osd_count = max(expected, len(osd_list)) + + # Log a message to provide some insight if the calculations claim + # to be off because someone is setting the expected count and + # there are more OSDs in reality. Try to make a proper guess + # based upon the cluster itself. + if not device_class and expected and osd_count != expected: + log("Found more OSDs than provided expected count. " + "Using the actual count instead", INFO) + elif expected: + # Use the expected-osd-count in older ceph versions to allow for + # a more accurate pg calculations + osd_count = expected + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + return LEGACY_PG_COUNT + + percent_data /= 100.0 + target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET + num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size + + # NOTE: ensure a sane minimum number of PGS otherwise we don't get any + # reasonable data distribution in minimal OSD configurations + if num_pg < DEFAULT_MINIMUM_PGS: + num_pg = DEFAULT_MINIMUM_PGS + + # The CRUSH algorithm has a slight optimization for placement groups + # with powers of 2 so find the nearest power of 2. If the nearest + # power of 2 is more than 25% below the original value, the next + # highest value is used. To do this, find the nearest power of 2 such + # that 2^n <= num_pg, check to see if its within the 25% tolerance. + exponent = math.floor(math.log(num_pg, 2)) + nearest = 2 ** exponent + if (num_pg - nearest) > (num_pg * 0.25): + # Choose the next highest power of 2 since the nearest is more + # than 25% below the original value. + return int(nearest * 2) + else: + return int(nearest) + + +class ReplicatedPool(Pool): + def __init__(self, service, name, pg_num=None, replicas=2, + percent_data=10.0, app_name=None): + super(ReplicatedPool, self).__init__(service=service, name=name) + self.replicas = replicas + self.percent_data = percent_data + if pg_num: + # Since the number of placement groups were specified, ensure + # that there aren't too many created. + max_pgs = self.get_pgs(self.replicas, 100.0) + self.pg_num = min(pg_num, max_pgs) + else: + self.pg_num = self.get_pgs(self.replicas, percent_data) + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' + + def create(self): + if not pool_exists(self.service, self.name): + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, self.pg_num) + ), + self.name, str(self.pg_num) + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(self.pg_num) + ] + + try: + check_call(cmd) + # Set the pool replica size + update_pool(client=self.service, + pool=self.name, + settings={'size': str(self.replicas)}) + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name), level=WARNING) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e), level=WARNING) + except CalledProcessError: + raise + + +# Default jerasure erasure coded pool +class ErasurePool(Pool): + def __init__(self, service, name, erasure_code_profile="default", + percent_data=10.0, app_name=None): + super(ErasurePool, self).__init__(service=service, name=name) + self.erasure_code_profile = erasure_code_profile + self.percent_data = percent_data + if app_name: + self.app_name = app_name + else: + self.app_name = 'unknown' + + def create(self): + if not pool_exists(self.service, self.name): + # Try to find the erasure profile information in order to properly + # size the number of placement groups. The size of an erasure + # coded placement group is calculated as k+m. + erasure_profile = get_erasure_profile(self.service, + self.erasure_code_profile) + + # Check for errors + if erasure_profile is None: + msg = ("Failed to discover erasure profile named " + "{}".format(self.erasure_code_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + if 'k' not in erasure_profile or 'm' not in erasure_profile: + # Error + msg = ("Unable to find k (data chunks) or m (coding chunks) " + "in erasure profile {}".format(erasure_profile)) + log(msg, level=ERROR) + raise PoolCreationError(msg) + + k = int(erasure_profile['k']) + m = int(erasure_profile['m']) + pgs = self.get_pgs(k + m, self.percent_data) + nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0 + # Create it + if nautilus_or_later: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + '--pg-num-min={}'.format( + min(AUTOSCALER_DEFAULT_PGS, pgs) + ), + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + else: + cmd = [ + 'ceph', '--id', self.service, 'osd', 'pool', 'create', + self.name, str(pgs), str(pgs), + 'erasure', self.erasure_code_profile + ] + + try: + check_call(cmd) + try: + set_app_name_for_pool(client=self.service, + pool=self.name, + name=self.app_name) + except CalledProcessError: + log('Could not set app name for pool {}'.format(self.name), level=WARNING) + if nautilus_or_later: + # Ensure we set the expected pool ratio + update_pool(client=self.service, + pool=self.name, + settings={'target_size_ratio': str(self.percent_data / 100.0)}) + if 'pg_autoscaler' in enabled_manager_modules(): + try: + enable_pg_autoscale(self.service, self.name) + except CalledProcessError as e: + log('Could not configure auto scaling for pool {}: {}'.format( + self.name, e), level=WARNING) + except CalledProcessError: + raise + + """Get an existing erasure code profile if it already exists. + Returns json formatted output""" + + +def enabled_manager_modules(): + """Return a list of enabled manager modules. + + :rtype: List[str] + """ + cmd = ['ceph', 'mgr', 'module', 'ls'] + try: + modules = check_output(cmd) + if six.PY3: + modules = modules.decode('UTF-8') + except CalledProcessError as e: + log("Failed to list ceph modules: {}".format(e), WARNING) + return [] + modules = json.loads(modules) + return modules['enabled_modules'] + + +def enable_pg_autoscale(service, pool_name): + """ + Enable Ceph's PG autoscaler for the specified pool. + + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types. The name of the pool to enable sutoscaling on + :raise: CalledProcessError if the command fails + """ + check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on']) + + +def get_mon_map(service): + """ + Returns the current monitor map. + :param service: six.string_types. The Ceph user name to run the command under + :return: json string. :raise: ValueError if the monmap fails to parse. + Also raises CalledProcessError if our ceph command fails + """ + try: + mon_status = check_output(['ceph', '--id', service, + 'mon_status', '--format=json']) + if six.PY3: + mon_status = mon_status.decode('UTF-8') + try: + return json.loads(mon_status) + except ValueError as v: + log("Unable to parse mon_status json: {}. Error: {}" + .format(mon_status, str(v))) + raise + except CalledProcessError as e: + log("mon_status command failed with message: {}" + .format(str(e))) + raise + + +def hash_monitor_names(service): + """ + Uses the get_mon_map() function to get information about the monitor + cluster. + Hash the name of each monitor. Return a sorted list of monitor hashes + in an ascending order. + :param service: six.string_types. The Ceph user name to run the command under + :rtype : dict. json dict of monitor name, ip address and rank + example: { + 'name': 'ip-172-31-13-165', + 'rank': 0, + 'addr': '172.31.13.165:6789/0'} + """ + try: + hash_list = [] + monitor_list = get_mon_map(service=service) + if monitor_list['monmap']['mons']: + for mon in monitor_list['monmap']['mons']: + hash_list.append( + hashlib.sha224(mon['name'].encode('utf-8')).hexdigest()) + return sorted(hash_list) + else: + return None + except (ValueError, CalledProcessError): + raise + + +def monitor_key_delete(service, key): + """ + Delete a key and value pair from the monitor cluster + :param service: six.string_types. The Ceph user name to run the command under + Deletes a key value pair on the monitor cluster. + :param key: six.string_types. The key to delete. + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'del', str(key)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_set(service, key, value): + """ + Sets a key value pair on the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to set. + :param value: The value to set. This will be converted to a string + before setting + """ + try: + check_output( + ['ceph', '--id', service, + 'config-key', 'put', str(key), str(value)]) + except CalledProcessError as e: + log("Monitor config-key put failed with message: {}".format( + e.output)) + raise + + +def monitor_key_get(service, key): + """ + Gets the value of an existing key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for. + :return: Returns the value of that key or None if not found. + """ + try: + output = check_output( + ['ceph', '--id', service, + 'config-key', 'get', str(key)]).decode('UTF-8') + return output + except CalledProcessError as e: + log("Monitor config-key get failed with message: {}".format( + e.output)) + return None + + +def monitor_key_exists(service, key): + """ + Searches for the existence of a key in the monitor cluster. + :param service: six.string_types. The Ceph user name to run the command under + :param key: six.string_types. The key to search for + :return: Returns True if the key exists, False if not and raises an + exception if an unknown error occurs. :raise: CalledProcessError if + an unknown error occurs + """ + try: + check_call( + ['ceph', '--id', service, + 'config-key', 'exists', str(key)]) + # I can return true here regardless because Ceph returns + # ENOENT if the key wasn't found + return True + except CalledProcessError as e: + if e.returncode == errno.ENOENT: + return False + else: + log("Unknown error from ceph config-get exists: {} {}".format( + e.returncode, e.output)) + raise + + +def get_erasure_profile(service, name): + """ + :param service: six.string_types. The Ceph user name to run the command under + :param name: + :return: + """ + try: + out = check_output(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name, '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) + except (CalledProcessError, OSError, ValueError): + return None + + +def pool_set(service, pool_name, key, value): + """ + Sets a value for a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param key: six.string_types + :param value: + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, + str(value).lower()] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def snapshot_pool(service, pool_name, snapshot_name): + """ + Snapshots a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_pool_snapshot(service, pool_name, snapshot_name): + """ + Remove a snapshot from a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :param snapshot_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None): + """ + :param service: The Ceph user name to run the command under + :type service: str + :param pool_name: Name of pool + :type pool_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + :raises: subprocess.CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name] + if max_bytes: + cmd = cmd + ['max_bytes', str(max_bytes)] + if max_objects: + cmd = cmd + ['max_objects', str(max_objects)] + check_call(cmd) + + +def remove_pool_quota(service, pool_name): + """ + Set a byte quota on a RADOS pool in ceph. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def remove_erasure_profile(service, profile_name): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :return: None. Can raise CalledProcessError + """ + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', + profile_name] + try: + check_call(cmd) + except CalledProcessError: + raise + + +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', + failure_domain='host', + data_chunks=2, coding_chunks=1, + locality=None, durability_estimator=None, + device_class=None): + """ + Create a new erasure code profile if one does not already exist for it. Updates + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ + for more details + :param service: six.string_types. The Ceph user name to run the command under + :param profile_name: six.string_types + :param erasure_plugin_name: six.string_types + :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', + 'room', 'root', 'row']) + :param data_chunks: int + :param coding_chunks: int + :param locality: int + :param durability_estimator: int + :param device_class: six.string_types + :return: None. Can raise CalledProcessError + """ + # Ensure this failure_domain is allowed by Ceph + validator(failure_domain, six.string_types, + ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) + + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, + 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks) + ] + if locality is not None and durability_estimator is not None: + raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") + + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + # failure_domain changed in luminous + if luminous_or_later: + cmd.append('crush-failure-domain=' + failure_domain) + else: + cmd.append('ruleset-failure-domain=' + failure_domain) + + # device class new in luminous + if luminous_or_later and device_class: + cmd.append('crush-device-class={}'.format(device_class)) + else: + log('Skipping device class configuration (ceph < 12.0.0)', + level=DEBUG) + + # Add plugin specific information + if locality is not None: + # For local erasure codes + cmd.append('l=' + str(locality)) + if durability_estimator is not None: + # For Shec erasure codes + cmd.append('c=' + str(durability_estimator)) + + if erasure_profile_exists(service, profile_name): + cmd.append('--force') + + try: + check_call(cmd) + except CalledProcessError: + raise + + +def rename_pool(service, old_name, new_name): + """ + Rename a Ceph pool from old_name to new_name + :param service: six.string_types. The Ceph user name to run the command under + :param old_name: six.string_types + :param new_name: six.string_types + :return: None + """ + validator(value=old_name, valid_type=six.string_types) + validator(value=new_name, valid_type=six.string_types) + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] + check_call(cmd) + + +def erasure_profile_exists(service, name): + """ + Check to see if an Erasure code profile already exists. + :param service: six.string_types. The Ceph user name to run the command under + :param name: six.string_types + :return: int or None + """ + validator(value=name, valid_type=six.string_types) + try: + check_call(['ceph', '--id', service, + 'osd', 'erasure-code-profile', 'get', + name]) + return True + except CalledProcessError: + return False + + +def get_cache_mode(service, pool_name): + """ + Find the current caching mode of the pool_name given. + :param service: six.string_types. The Ceph user name to run the command under + :param pool_name: six.string_types + :return: int or None + """ + validator(value=service, valid_type=six.string_types) + validator(value=pool_name, valid_type=six.string_types) + out = check_output(['ceph', '--id', service, + 'osd', 'dump', '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + try: + osd_json = json.loads(out) + for pool in osd_json['pools']: + if pool['pool_name'] == pool_name: + return pool['cache_mode'] + return None + except ValueError: + raise + + +def pool_exists(service, name): + """Check to see if a RADOS pool already exists.""" + try: + out = check_output(['rados', '--id', service, 'lspools']) + if six.PY3: + out = out.decode('UTF-8') + except CalledProcessError: + return False + + return name in out.split() + + +def get_osds(service, device_class=None): + """Return a list of all Ceph Object Storage Daemons currently in the + cluster (optionally filtered by storage device class). + + :param device_class: Class of storage device for OSD's + :type device_class: str + """ + luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0 + if luminous_or_later and device_class: + out = check_output(['ceph', '--id', service, + 'osd', 'crush', 'class', + 'ls-osd', device_class, + '--format=json']) + else: + out = check_output(['ceph', '--id', service, + 'osd', 'ls', + '--format=json']) + if six.PY3: + out = out.decode('UTF-8') + return json.loads(out) + + +def install(): + """Basic Ceph client installation.""" + ceph_dir = "/etc/ceph" + if not os.path.exists(ceph_dir): + os.mkdir(ceph_dir) + + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + """Check to see if a RADOS block device exists.""" + try: + out = check_output(['rbd', 'list', '--id', + service, '--pool', pool]) + if six.PY3: + out = out.decode('UTF-8') + except CalledProcessError: + return False + + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + """Create a new RADOS block device.""" + cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, + '--pool', pool] + check_call(cmd) + + +def update_pool(client, pool, settings): + cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] + for k, v in six.iteritems(settings): + cmd.append(k) + cmd.append(v) + + check_call(cmd) + + +def set_app_name_for_pool(client, pool, name): + """ + Calls `osd pool application enable` for the specified pool name + + :param client: Name of the ceph client to use + :type client: str + :param pool: Pool to set app name for + :type pool: str + :param name: app name for the specified pool + :type name: str + + :raises: CalledProcessError if ceph call fails + """ + if cmp_pkgrevno('ceph-common', '12.0.0') >= 0: + cmd = ['ceph', '--id', client, 'osd', 'pool', + 'application', 'enable', pool, name] + check_call(cmd) + + +def create_pool(service, name, replicas=3, pg_num=None): + """Create a new RADOS pool.""" + if pool_exists(service, name): + log("Ceph pool {} already exists, skipping creation".format(name), + level=WARNING) + return + + if not pg_num: + # Calculate the number of placement groups based + # on upstream recommended best practices. + osds = get_osds(service) + if osds: + pg_num = (len(osds) * 100 // replicas) + else: + # NOTE(james-page): Default to 200 for older ceph versions + # which don't support OSD query from cli + pg_num = 200 + + cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] + check_call(cmd) + + update_pool(service, name, settings={'size': str(replicas)}) + + +def delete_pool(service, name): + """Delete a RADOS pool from ceph.""" + cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, + '--yes-i-really-really-mean-it'] + check_call(cmd) + + +def _keyfile_path(service): + return KEYFILE.format(service) + + +def _keyring_path(service): + return KEYRING.format(service) + + +def add_key(service, key): + """ + Add a key to a keyring. + + Creates the keyring if it doesn't already exist. + + Logs and returns if the key is already in the keyring. + """ + keyring = _keyring_path(service) + if os.path.exists(keyring): + with open(keyring, 'r') as ring: + if key in ring.read(): + log('Ceph keyring exists at %s and has not changed.' % keyring, + level=DEBUG) + return + log('Updating existing keyring %s.' % keyring, level=DEBUG) + + cmd = ['ceph-authtool', keyring, '--create-keyring', + '--name=client.{}'.format(service), '--add-key={}'.format(key)] + check_call(cmd) + log('Created new ceph keyring at %s.' % keyring, level=DEBUG) + + +def create_keyring(service, key): + """Deprecated. Please use the more accurately named 'add_key'""" + return add_key(service, key) + + +def delete_keyring(service): + """Delete an existing Ceph keyring.""" + keyring = _keyring_path(service) + if not os.path.exists(keyring): + log('Keyring does not exist at %s' % keyring, level=WARNING) + return + + os.remove(keyring) + log('Deleted ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + """Create a file containing key.""" + keyfile = _keyfile_path(service) + if os.path.exists(keyfile): + log('Keyfile exists at %s.' % keyfile, level=WARNING) + return + + with open(keyfile, 'w') as fd: + fd.write(key) + + log('Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(relation='ceph'): + """Query named relation to determine current nodes.""" + hosts = [] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + + return hosts + + +def configure(service, key, auth, use_syslog): + """Perform basic configuration of Ceph.""" + add_key(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF.format(auth=auth, + keyring=_keyring_path(service), + mon_hosts=",".join(map(str, hosts)), + use_syslog=use_syslog)) + modprobe('rbd') + + +def image_mapped(name): + """Determine whether a RADOS block device is mapped locally.""" + try: + out = check_output(['rbd', 'showmapped']) + if six.PY3: + out = out.decode('UTF-8') + except CalledProcessError: + return False + + return name in out + + +def map_block_storage(service, pool, image): + """Map a RADOS block device for local use.""" + cmd = [ + 'rbd', + 'map', + '{}/{}'.format(pool, image), + '--user', + service, + '--secret', + _keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + """Determine whether a filesytems is already mounted.""" + return fs in [f for f, m in mounts()] + + +def make_filesystem(blk_device, fstype='ext4', timeout=10): + """Make a new filesystem on the specified block device.""" + count = 0 + e_noent = errno.ENOENT + while not os.path.exists(blk_device): + if count >= timeout: + log('Gave up waiting on block device %s' % blk_device, + level=ERROR) + raise IOError(e_noent, os.strerror(e_noent), blk_device) + + log('Waiting for block device %s to appear' % blk_device, + level=DEBUG) + count += 1 + time.sleep(1) + else: + log('Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + check_call(['mkfs', '-t', fstype, blk_device]) + + +def place_data_on_block_device(blk_device, data_src_dst): + """Migrate data in data_src_dst to blk_device and then remount.""" + # mount block device into /mnt + mount(blk_device, '/mnt') + # copy data to /mnt + copy_files(data_src_dst, '/mnt') + # umount block device + umount('/mnt') + # Grab user/group ID's from original source + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + # re-mount where the data should originally be + # TODO: persist is currently a NO-OP in core.host + mount(blk_device, data_src_dst, persist=True) + # ensure original ownership of new mount. + os.chown(data_src_dst, uid, gid) + + +def copy_files(src, dst, symlinks=False, ignore=None): + """Copy files from src to dst.""" + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[], + replicas=3): + """NOTE: This function must only be called from a single service unit for + the same rbd_img otherwise data loss will occur. + + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being re-mounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('Creating new pool {}.'.format(pool), level=INFO) + create_pool(service, pool, replicas=replicas) + + if not rbd_exists(service, pool, rbd_img): + log('Creating RBD image ({}).'.format(rbd_img), level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), + level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if service_running(svc): + log('Stopping services {} prior to migrating data.' + .format(svc), level=DEBUG) + service_stop(svc) + + place_data_on_block_device(blk_device, mount_point) + + for svc in system_services: + log('Starting service {} after migrating data.' + .format(svc), level=DEBUG) + service_start(svc) + + +def ensure_ceph_keyring(service, user=None, group=None, + relation='ceph', key=None): + """Ensures a ceph keyring is created for a named service and optionally + ensures user and group ownership. + + @returns boolean: Flag to indicate whether a key was successfully written + to disk based on either relation data or a supplied key + """ + if not key: + for rid in relation_ids(relation): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + + if not key: + return False + + add_key(service=service, key=key) + keyring = _keyring_path(service) + if user and group: + check_call(['chown', '%s.%s' % (user, group), keyring]) + + return True + + +class CephBrokerRq(object): + """Ceph broker request. + + Multiple operations can be added to a request and sent to the Ceph broker + to be executed. + + Request is json-encoded for sending over the wire. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, api_version=1, request_id=None): + self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) + self.ops = [] + + def add_op(self, op): + """Add an op if it is not already in the list. + + :param op: Operation to add. + :type op: dict + """ + if op not in self.ops: + self.ops.append(op) + + def add_op_request_access_to_group(self, name, namespace=None, + permission=None, key_name=None, + object_prefix_permissions=None): + """ + Adds the requested permissions to the current service's Ceph key, + allowing the key to access only the specified pools or + object prefixes. object_prefix_permissions should be a dictionary + keyed on the permission with the corresponding value being a list + of prefixes to apply that permission to. + { + 'rwx': ['prefix1', 'prefix2'], + 'class-read': ['prefix3']} + """ + self.add_op({ + 'op': 'add-permissions-to-key', 'group': name, + 'namespace': namespace, + 'name': key_name or service_name(), + 'group-permission': permission, + 'object-prefix-permissions': object_prefix_permissions}) + + def add_op_create_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, max_objects=None): + """DEPRECATED: Use ``add_op_create_replicated_pool()`` or + ``add_op_create_erasure_pool()`` instead. + """ + return self.add_op_create_replicated_pool( + name, replica_count=replica_count, pg_num=pg_num, weight=weight, + group=group, namespace=namespace, app_name=app_name, + max_bytes=max_bytes, max_objects=max_objects) + + def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None, + weight=None, group=None, namespace=None, + app_name=None, max_bytes=None, + max_objects=None): + """Adds an operation to create a replicated pool. + + :param name: Name of pool to create + :type name: str + :param replica_count: Number of copies Ceph should keep of your data. + :type replica_count: int + :param pg_num: Request specific number of Placement Groups to create + for pool. + :type pg_num: int + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + Used to calculate number of Placement Groups to create + for pool. + :type weight: float + :param group: Group to add pool to + :type group: str + :param namespace: Group namespace + :type namespace: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + if pg_num and weight: + raise ValueError('pg_num and weight are mutually exclusive') + + self.add_op({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight, 'group': group, + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def add_op_create_erasure_pool(self, name, erasure_profile=None, + weight=None, group=None, app_name=None, + max_bytes=None, max_objects=None): + """Adds an operation to create a erasure coded pool. + + :param name: Name of pool to create + :type name: str + :param erasure_profile: Name of erasure code profile to use. If not + set the ceph-mon unit handling the broker + request will set its default value. + :type erasure_profile: str + :param weight: The percentage of data that is expected to be contained + in the pool from the total available space on the OSDs. + :type weight: float + :param group: Group to add pool to + :type group: str + :param app_name: (Optional) Tag pool with application name. Note that + there is certain protocols emerging upstream with + regard to meaningful application names to use. + Examples are ``rbd`` and ``rgw``. + :type app_name: str + :param max_bytes: Maximum bytes quota to apply + :type max_bytes: int + :param max_objects: Maximum objects quota to apply + :type max_objects: int + """ + self.add_op({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) + + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + + @property + def request(self): + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in [ + 'replicas', 'name', 'op', 'pg_num', 'weight', + 'group', 'group-namespace', 'group-permission', + 'object-prefix-permissions']: + if self.ops[req_no].get(key) != other.ops[req_no].get(key): + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) + + +class CephBrokerRsp(object): + """Ceph broker response. + + Response is json-decoded and contents provided as methods/properties. + + The API is versioned and defaults to version 1. + """ + + def __init__(self, encoded_rsp): + self.api_version = None + self.rsp = json.loads(encoded_rsp) + + @property + def request_id(self): + return self.rsp.get('request-id') + + @property + def exit_code(self): + return self.rsp.get('exit-code') + + @property + def exit_msg(self): + return self.rsp.get('stderr') + + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# <Request complete actions> +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request, relation='ceph'): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids(relation): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + + return requests + + +def is_request_sent(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request, relation='ceph'): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request, relation=relation) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies', level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request, relation='ceph'): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request, relation=relation): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids(relation): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) + + +def has_broker_rsp(rid=None, unit=None): + """Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data. + + :param rid: The relation to check (default of None means current relation) + :type rid: Union[str, None] + :param unit: The remote unit to check (default of None means current unit) + :type unit: Union[str, None] + :returns: True if broker key exists and is set to something 'truthy' + :rtype: bool + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + return True if broker_rsp else False + + +def is_broker_action_done(action, rid=None, unit=None): + """Check whether broker action has completed yet. + + @param action: name of action to be performed + @returns True if action complete otherwise False + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return False + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + val = kvstore.get(key=key) + if val and val == rsp.request_id: + return True + + return False + + +def mark_broker_action_done(action, rid=None, unit=None): + """Mark action as having been completed. + + @param action: name of action to be performed + @returns None + """ + rdata = relation_get(rid=rid, unit=unit) or {} + broker_rsp = rdata.get(get_broker_rsp_key()) + if not broker_rsp: + return + + rsp = CephBrokerRsp(broker_rsp) + unit_name = local_unit().partition('/')[2] + key = "unit_{}_ceph_broker_action.{}".format(unit_name, action) + kvstore = kv() + kvstore.set(key=key, value=rsp.request_id) + kvstore.flush() + + +class CephConfContext(object): + """Ceph config (ceph.conf) context. + + Supports user-provided Ceph configuration settings. Use can provide a + dictionary as the value for the config-flags charm option containing + Ceph configuration settings keyede by their section in ceph.conf. + """ + def __init__(self, permitted_sections=None): + self.permitted_sections = permitted_sections or [] + + def __call__(self): + conf = config('config-flags') + if not conf: + return {} + + conf = config_flags_parser(conf) + if not isinstance(conf, dict): + log("Provided config-flags is not a dictionary - ignoring", + level=WARNING) + return {} + + permitted = self.permitted_sections + if permitted: + diff = set(conf.keys()).difference(set(permitted)) + if diff: + log("Config-flags contains invalid keys '%s' - they will be " + "ignored" % (', '.join(diff)), level=WARNING) + + ceph_conf = {} + for key in conf: + if permitted and key not in permitted: + log("Ignoring key '%s'" % key, level=WARNING) + continue + + ceph_conf[key] = conf[key] + return ceph_conf + + +class CephOSDConfContext(CephConfContext): + """Ceph config (ceph.conf) context. + + Consolidates settings from config-flags via CephConfContext with + settings provided by the mons. The config-flag values are preserved in + conf['osd'], settings from the mons which do not clash with config-flag + settings are in conf['osd_from_client'] and finally settings which do + clash are in conf['osd_from_client_conflict']. Rather than silently drop + the conflicting settings they are provided in the context so they can be + rendered commented out to give some visability to the admin. + """ + + def __init__(self, permitted_sections=None): + super(CephOSDConfContext, self).__init__( + permitted_sections=permitted_sections) + try: + self.settings_from_mons = get_osd_settings('mon') + except OSDSettingConflict: + log( + "OSD settings from mons are inconsistent, ignoring them", + level=WARNING) + self.settings_from_mons = {} + + def filter_osd_from_mon_settings(self): + """Filter settings from client relation against config-flags. + + :returns: A tuple ( + ,config-flag values, + ,client settings which do not conflict with config-flag values, + ,client settings which confilct with config-flag values) + :rtype: (OrderedDict, OrderedDict, OrderedDict) + """ + ceph_conf = super(CephOSDConfContext, self).__call__() + conflicting_entries = {} + clear_entries = {} + for key, value in self.settings_from_mons.items(): + if key in ceph_conf.get('osd', {}): + if ceph_conf['osd'][key] != value: + conflicting_entries[key] = value + else: + clear_entries[key] = value + clear_entries = _order_dict_by_key(clear_entries) + conflicting_entries = _order_dict_by_key(conflicting_entries) + return ceph_conf, clear_entries, conflicting_entries + + def __call__(self): + """Construct OSD config context. + + Standard context with two additional special keys. + osd_from_client_conflict: client settings which confilct with + config-flag values + osd_from_client: settings which do not conflict with config-flag + values + + :returns: OSD config context dict. + :rtype: dict + """ + conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings() + conf['osd_from_client_conflict'] = osd_conflict + conf['osd_from_client'] = osd_clear + return conf diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/loopback.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 00000000..74bab40e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,92 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from subprocess import ( + check_call, + check_output, +) + +import six + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + or: + + /dev/loop0: [0807]:961814 (/tmp/my.img (deleted)) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + output = check_output(cmd) + if six.PY3: + output = output.decode('utf-8') + devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + file_path = os.path.abspath(file_path) + check_call(['losetup', '--find', file_path]) + for d, f in six.iteritems(loopback_devices()): + if f == file_path: + return d + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in six.iteritems(loopback_devices()): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) + + +def is_mapped_loopback_device(device): + """ + Checks if a given device name is an existing/mapped loopback device. + :param device: str: Full path to the device (eg, /dev/loop1). + :returns: str: Path to the backing file if is a loopback device + empty string otherwise + """ + return loopback_devices().get(device, "") diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/lvm.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 00000000..c8bde692 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,182 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for lvm in pvd: + lvm = lvm.decode('UTF-8') + if lvm.strip().startswith('VG Name'): + vg = ' '.join(lvm.strip().split()[2:]) + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) + + +def list_logical_volumes(select_criteria=None, path_mode=False): + ''' + List logical volumes + + :param select_criteria: str: Limit list to those volumes matching this + criteria (see 'lvs -S help' for more details) + :param path_mode: bool: return logical volume name in 'vg/lv' format, this + format is required for some commands like lvextend + :returns: [str]: List of logical volumes + ''' + lv_diplay_attr = 'lv_name' + if path_mode: + # Parsing output logic relies on the column order + lv_diplay_attr = 'vg_name,' + lv_diplay_attr + cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings'] + if select_criteria: + cmd.extend(['--select', select_criteria]) + lvs = [] + for lv in check_output(cmd).decode('UTF-8').splitlines(): + if not lv: + continue + if path_mode: + lvs.append('/'.join(lv.strip().split())) + else: + lvs.append(lv.strip()) + return lvs + + +list_thin_logical_volume_pools = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^t') + +list_thin_logical_volumes = functools.partial( + list_logical_volumes, + select_criteria='lv_attr =~ ^V') + + +def extend_logical_volume_by_device(lv_name, block_device): + ''' + Extends the size of logical volume lv_name by the amount of free space on + physical volume block_device. + + :param lv_name: str: name of logical volume to be extended (vg/lv format) + :param block_device: str: name of block_device to be allocated to lv_name + ''' + cmd = ['lvextend', lv_name, block_device] + check_call(cmd) + + +def create_logical_volume(lv_name, volume_group, size=None): + ''' + Create a new logical volume in an existing volume group + + :param lv_name: str: name of logical volume to be created. + :param volume_group: str: Name of volume group to use for the new volume. + :param size: str: Size of logical volume to create (100% if not supplied) + :raises subprocess.CalledProcessError: in the event that the lvcreate fails. + ''' + if size: + check_call([ + 'lvcreate', + '--yes', + '-L', + '{}'.format(size), + '-n', lv_name, volume_group + ]) + # create the lv with all the space available, this is needed because the + # system call is different for LVM + else: + check_call([ + 'lvcreate', + '--yes', + '-l', + '100%FREE', + '-n', lv_name, volume_group + ]) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/utils.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 00000000..a3561760 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,128 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from stat import S_ISBLK + +from subprocess import ( + CalledProcessError, + check_call, + check_output, + call +) + + +def _luks_uuid(dev): + """ + Check to see if dev is a LUKS encrypted volume, returning the UUID + of volume if it is. + + :param: dev: path to block device to check. + :returns: str. UUID of LUKS device or None if not a LUKS device + """ + try: + cmd = ['cryptsetup', 'luksUUID', dev] + return check_output(cmd).decode('UTF-8').strip() + except CalledProcessError: + return None + + +def is_luks_device(dev): + """ + Determine if dev is a LUKS-formatted block device. + + :param: dev: A full path to a block device to check for LUKS header + presence + :returns: boolean: indicates whether a device is used based on LUKS header. + """ + return True if _luks_uuid(dev) else False + + +def is_mapped_luks_device(dev): + """ + Determine if dev is a mapped LUKS device + :param: dev: A full path to a block device to be checked + :returns: boolean: indicates whether a device is mapped + """ + _, dirs, _ = next(os.walk( + '/sys/class/block/{}/holders/' + .format(os.path.basename(os.path.realpath(dev)))) + ) + is_held = len(dirs) > 0 + return is_held and is_luks_device(dev) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + if not os.path.exists(path): + return False + return S_ISBLK(os.stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b + # sometimes sgdisk exits non-zero; this is OK, dd will clean up + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) + dev_end = check_output(['blockdev', '--getsz', + block_device]).decode('UTF-8') + gpt_end = int(dev_end.split()[0]) - 100 + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=1M', 'count=1']) + check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), + 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) + + +def is_device_mounted(device): + '''Given a device path, return True if that device is mounted, and False + if it isn't. + + :param device: str: Full path of the device to check. + :returns: boolean: True if the path represents a mounted device, False if + it doesn't. + ''' + try: + out = check_output(['lsblk', '-P', device]).decode('UTF-8') + except Exception: + return False + return bool(re.search(r'MOUNTPOINT=".+"', out)) + + +def mkfs_xfs(device, force=False, inode_size=1024): + """Format device with XFS filesystem. + + By default this should fail if the device already has a filesystem on it. + :param device: Full path to device to format + :ptype device: tr + :param force: Force operation + :ptype: force: boolean + :param inode_size: XFS inode size in bytes + :ptype inode_size: int""" + cmd = ['mkfs.xfs'] + if force: + cmd.append("-f") + + cmd += ['-i', "size={}".format(inode_size), device] + check_call(cmd) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/contexts.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/contexts.py new file mode 100644 index 00000000..c1adf94b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/contexts.py @@ -0,0 +1,137 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers <juju@lists.ubuntu.com> +"""A helper to create a yaml cache of config with namespaced relation data.""" +import os +import yaml + +import six + +import charmhelpers.core.hookenv + + +charm_dir = os.environ.get('CHARM_DIR', '') + + +def dict_keys_without_hyphens(a_dict): + """Return the a new dict with underscores instead of hyphens in keys.""" + return dict( + (key.replace('-', '_'), val) for key, val in a_dict.items()) + + +def update_relations(context, namespace_separator=':'): + """Update the context with the relation data.""" + # Add any relation data prefixed with the relation type. + relation_type = charmhelpers.core.hookenv.relation_type() + relations = [] + context['current_relation'] = {} + if relation_type is not None: + relation_data = charmhelpers.core.hookenv.relation_get() + context['current_relation'] = relation_data + # Deprecated: the following use of relation data as keys + # directly in the context will be removed. + relation_data = dict( + ("{relation_type}{namespace_separator}{key}".format( + relation_type=relation_type, + key=key, + namespace_separator=namespace_separator), val) + for key, val in relation_data.items()) + relation_data = dict_keys_without_hyphens(relation_data) + context.update(relation_data) + relations = charmhelpers.core.hookenv.relations_of_type(relation_type) + relations = [dict_keys_without_hyphens(rel) for rel in relations] + + context['relations_full'] = charmhelpers.core.hookenv.relations() + + # the hookenv.relations() data structure is effectively unusable in + # templates and other contexts when trying to access relation data other + # than the current relation. So provide a more useful structure that works + # with any hook. + local_unit = charmhelpers.core.hookenv.local_unit() + relations = {} + for rname, rids in context['relations_full'].items(): + relations[rname] = [] + for rid, rdata in rids.items(): + data = rdata.copy() + if local_unit in rdata: + data.pop(local_unit) + for unit_name, rel_data in data.items(): + new_data = {'__relid__': rid, '__unit__': unit_name} + new_data.update(rel_data) + relations[rname].append(new_data) + context['relations'] = relations + + +def juju_state_to_yaml(yaml_path, namespace_separator=':', + allow_hyphens_in_keys=True, mode=None): + """Update the juju config and state in a yaml file. + + This includes any current relation-get data, and the charm + directory. + + This function was created for the ansible and saltstack + support, as those libraries can use a yaml file to supply + context to templates, but it may be useful generally to + create and update an on-disk cache of all the config, including + previous relation data. + + By default, hyphens are allowed in keys as this is supported + by yaml, but for tools like ansible, hyphens are not valid [1]. + + [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name + """ + config = charmhelpers.core.hookenv.config() + + # Add the charm_dir which we will need to refer to charm + # file resources etc. + config['charm_dir'] = charm_dir + config['local_unit'] = charmhelpers.core.hookenv.local_unit() + config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip() + config['unit_public_address'] = charmhelpers.core.hookenv.unit_get( + 'public-address' + ) + + # Don't use non-standard tags for unicode which will not + # work when salt uses yaml.load_safe. + yaml.add_representer(six.text_type, + lambda dumper, value: dumper.represent_scalar( + six.u('tag:yaml.org,2002:str'), value)) + + yaml_dir = os.path.dirname(yaml_path) + if not os.path.exists(yaml_dir): + os.makedirs(yaml_dir) + + if os.path.exists(yaml_path): + with open(yaml_path, "r") as existing_vars_file: + existing_vars = yaml.load(existing_vars_file.read()) + else: + with open(yaml_path, "w+"): + pass + existing_vars = {} + + if mode is not None: + os.chmod(yaml_path, mode) + + if not allow_hyphens_in_keys: + config = dict_keys_without_hyphens(config) + existing_vars.update(config) + + update_relations(existing_vars, namespace_separator) + + with open(yaml_path, "w+") as fp: + fp.write(yaml.dump(existing_vars, default_flow_style=False)) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/jinja.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/jinja.py new file mode 100644 index 00000000..38d4fba0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/jinja.py @@ -0,0 +1,38 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Templating using the python-jinja2 package. +""" +import six +from charmhelpers.fetch import apt_install, apt_update +try: + import jinja2 +except ImportError: + apt_update(fatal=True) + if six.PY3: + apt_install(["python3-jinja2"], fatal=True) + else: + apt_install(["python-jinja2"], fatal=True) + import jinja2 + + +DEFAULT_TEMPLATES_DIR = 'templates' + + +def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir)) + template = templates.get_template(template_name) + return template.render(context) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/pyformat.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/pyformat.py new file mode 100644 index 00000000..51a24dc4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/templating/pyformat.py @@ -0,0 +1,27 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +Templating using standard Python str.format() method. +''' + +from charmhelpers.core import hookenv + + +def render(template, extra={}, **kwargs): + """Return the template rendered using Python's str.format().""" + context = hookenv.execution_environment() + context.update(extra) + context.update(kwargs) + return template.format(**context) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/unison/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/unison/__init__.py new file mode 100644 index 00000000..61409b14 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/contrib/unison/__init__.py @@ -0,0 +1,314 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Easy file synchronization among peer units using ssh + unison. +# +# For the -joined, -changed, and -departed peer relations, add a call to +# ssh_authorized_peers() describing the peer relation and the desired +# user + group. After all peer relations have settled, all hosts should +# be able to connect to on another via key auth'd ssh as the specified user. +# +# Other hooks are then free to synchronize files and directories using +# sync_to_peers(). +# +# For a peer relation named 'cluster', for example: +# +# cluster-relation-joined: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# cluster-relation-changed: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# cluster-relation-departed: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# Hooks are now free to sync files as easily as: +# +# files = ['/etc/fstab', '/etc/apt.conf.d/'] +# sync_to_peers(peer_interface='cluster', +# user='juju_ssh, paths=[files]) +# +# It is assumed the charm itself has setup permissions on each unit +# such that 'juju_ssh' has read + write permissions. Also assumed +# that the calling charm takes care of leader delegation. +# +# Additionally files can be synchronized only to an specific unit: +# sync_to_peer(slave_address, user='juju_ssh', +# paths=[files], verbose=False) + +import os +import pwd + +from copy import copy +from subprocess import check_call, check_output + +from charmhelpers.core.host import ( + adduser, + add_user_to_group, + pwgen, + remove_password_expiry, +) + +from charmhelpers.core.hookenv import ( + log, + hook_name, + relation_ids, + related_units, + relation_set, + relation_get, + unit_private_ip, + INFO, + ERROR, +) + +BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false', + '-fastcheck=true', '-group=false', '-owner=false', + '-prefer=newer', '-times=true'] + + +def get_homedir(user): + try: + user = pwd.getpwnam(user) + return user.pw_dir + except KeyError: + log('Could not get homedir for user %s: user exists?' % (user), ERROR) + raise Exception + + +def create_private_key(user, priv_key_path, key_type='rsa'): + types_bits = { + 'rsa': '2048', + 'ecdsa': '521', + } + if key_type not in types_bits: + log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR) + key_type = 'rsa' + if not os.path.isfile(priv_key_path): + log('Generating new SSH key for user %s.' % user) + cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type, + '-b', types_bits[key_type], '-f', priv_key_path] + check_call(cmd) + else: + log('SSH key already exists at %s.' % priv_key_path) + check_call(['chown', user, priv_key_path]) + check_call(['chmod', '0600', priv_key_path]) + + +def create_public_key(user, priv_key_path, pub_key_path): + if not os.path.isfile(pub_key_path): + log('Generating missing ssh public key @ %s.' % pub_key_path) + cmd = ['ssh-keygen', '-y', '-f', priv_key_path] + p = check_output(cmd).strip() + with open(pub_key_path, 'wb') as out: + out.write(p) + check_call(['chown', user, pub_key_path]) + + +def get_keypair(user): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + priv_key = os.path.join(ssh_dir, 'id_rsa') + pub_key = '%s.pub' % priv_key + + if not os.path.isdir(ssh_dir): + os.mkdir(ssh_dir) + check_call(['chown', '-R', user, ssh_dir]) + + create_private_key(user, priv_key) + create_public_key(user, priv_key, pub_key) + + with open(priv_key, 'r') as p: + _priv = p.read().strip() + + with open(pub_key, 'r') as p: + _pub = p.read().strip() + + return (_priv, _pub) + + +def write_authorized_keys(user, keys): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + auth_keys = os.path.join(ssh_dir, 'authorized_keys') + log('Syncing authorized_keys @ %s.' % auth_keys) + with open(auth_keys, 'w') as out: + for k in keys: + out.write('%s\n' % k) + + +def write_known_hosts(user, hosts): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + known_hosts = os.path.join(ssh_dir, 'known_hosts') + khosts = [] + for host in hosts: + cmd = ['ssh-keyscan', host] + remote_key = check_output(cmd, universal_newlines=True).strip() + khosts.append(remote_key) + log('Syncing known_hosts @ %s.' % known_hosts) + with open(known_hosts, 'w') as out: + for host in khosts: + out.write('%s\n' % host) + + +def ensure_user(user, group=None): + adduser(user, pwgen()) + if group: + add_user_to_group(user, group) + # Remove password expiry (Bug #1686085) + remove_password_expiry(user) + + +def ssh_authorized_peers(peer_interface, user, group=None, + ensure_local_user=False): + """ + Main setup function, should be called from both peer -changed and -joined + hooks with the same parameters. + """ + if ensure_local_user: + ensure_user(user, group) + priv_key, pub_key = get_keypair(user) + hook = hook_name() + if hook == '%s-relation-joined' % peer_interface: + relation_set(ssh_pub_key=pub_key) + elif hook == '%s-relation-changed' % peer_interface or \ + hook == '%s-relation-departed' % peer_interface: + hosts = [] + keys = [] + + for r_id in relation_ids(peer_interface): + for unit in related_units(r_id): + ssh_pub_key = relation_get('ssh_pub_key', + rid=r_id, + unit=unit) + priv_addr = relation_get('private-address', + rid=r_id, + unit=unit) + if ssh_pub_key: + keys.append(ssh_pub_key) + hosts.append(priv_addr) + else: + log('ssh_authorized_peers(): ssh_pub_key ' + 'missing for unit %s, skipping.' % unit) + write_authorized_keys(user, keys) + write_known_hosts(user, hosts) + authed_hosts = ':'.join(hosts) + relation_set(ssh_authorized_hosts=authed_hosts) + + +def _run_as_user(user, gid=None): + try: + user = pwd.getpwnam(user) + except KeyError: + log('Invalid user: %s' % user) + raise Exception + uid = user.pw_uid + gid = gid or user.pw_gid + os.environ['HOME'] = user.pw_dir + + def _inner(): + os.setgid(gid) + os.setuid(uid) + return _inner + + +def run_as_user(user, cmd, gid=None): + return check_output(cmd, preexec_fn=_run_as_user(user, gid), cwd='/') + + +def collect_authed_hosts(peer_interface): + '''Iterate through the units on peer interface to find all that + have the calling host in its authorized hosts list''' + hosts = [] + for r_id in (relation_ids(peer_interface) or []): + for unit in related_units(r_id): + private_addr = relation_get('private-address', + rid=r_id, unit=unit) + authed_hosts = relation_get('ssh_authorized_hosts', + rid=r_id, unit=unit) + + if not authed_hosts: + log('Peer %s has not authorized *any* hosts yet, skipping.' % + (unit), level=INFO) + continue + + if unit_private_ip() in authed_hosts.split(':'): + hosts.append(private_addr) + else: + log('Peer %s has not authorized *this* host yet, skipping.' % + (unit), level=INFO) + return hosts + + +def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync path to an specific peer host + + Propagates exception if operation fails and fatal=True. + """ + cmd = cmd or copy(BASE_CMD) + if not verbose: + cmd.append('-silent') + + # removing trailing slash from directory paths, unison + # doesn't like these. + if path.endswith('/'): + path = path[:(len(path) - 1)] + + cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)] + + try: + log('Syncing local path %s to %s@%s:%s' % (path, user, host, path)) + run_as_user(user, cmd, gid) + except Exception: + log('Error syncing remote files') + if fatal: + raise + + +def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync paths to an specific peer host + + Propagates exception if any operation fails and fatal=True. + """ + if paths: + for p in paths: + sync_path_to_host(p, host, user, verbose, cmd, gid, fatal) + + +def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None, + gid=None, fatal=False): + """Sync all hosts to an specific path + + The type of group is integer, it allows user has permissions to + operate a directory have a different group id with the user id. + + Propagates exception if any operation fails and fatal=True. + """ + if paths: + for host in collect_authed_hosts(peer_interface): + sync_to_peer(host, user, paths, verbose, cmd, gid, fatal) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/coordinator.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/coordinator.py new file mode 100644 index 00000000..59bee3e5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/coordinator.py @@ -0,0 +1,606 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +''' +The coordinator module allows you to use Juju's leadership feature to +coordinate operations between units of a service. + +Behavior is defined in subclasses of coordinator.BaseCoordinator. +One implementation is provided (coordinator.Serial), which allows an +operation to be run on a single unit at a time, on a first come, first +served basis. You can trivially define more complex behavior by +subclassing BaseCoordinator or Serial. + +:author: Stuart Bishop <stuart.bishop@canonical.com> + + +Services Framework Usage +======================== + +Ensure a peers relation is defined in metadata.yaml. Instantiate a +BaseCoordinator subclass before invoking ServiceManager.manage(). +Ensure that ServiceManager.manage() is wired up to the leader-elected, +leader-settings-changed, peers relation-changed and peers +relation-departed hooks in addition to any other hooks you need, or your +service will deadlock. + +Ensure calls to acquire() are guarded, so that locks are only requested +when they are really needed (and thus hooks only triggered when necessary). +Failing to do this and calling acquire() unconditionally will put your unit +into a hook loop. Calls to granted() do not need to be guarded. + +For example:: + + from charmhelpers.core import hookenv, services + from charmhelpers import coordinator + + def maybe_restart(servicename): + serial = coordinator.Serial() + if needs_restart(): + serial.acquire('restart') + if serial.granted('restart'): + hookenv.service_restart(servicename) + + services = [dict(service='servicename', + data_ready=[maybe_restart])] + + if __name__ == '__main__': + _ = coordinator.Serial() # Must instantiate before manager.manage() + manager = services.ServiceManager(services) + manager.manage() + + +You can implement a similar pattern using a decorator. If the lock has +not been granted, an attempt to acquire() it will be made if the guard +function returns True. If the lock has been granted, the decorated function +is run as normal:: + + from charmhelpers.core import hookenv, services + from charmhelpers import coordinator + + serial = coordinator.Serial() # Global, instatiated on module import. + + def needs_restart(): + [ ... Introspect state. Return True if restart is needed ... ] + + @serial.require('restart', needs_restart) + def maybe_restart(servicename): + hookenv.service_restart(servicename) + + services = [dict(service='servicename', + data_ready=[maybe_restart])] + + if __name__ == '__main__': + manager = services.ServiceManager(services) + manager.manage() + + +Traditional Usage +================= + +Ensure a peers relation is defined in metadata.yaml. + +If you are using charmhelpers.core.hookenv.Hooks, ensure that a +BaseCoordinator subclass is instantiated before calling Hooks.execute. + +If you are not using charmhelpers.core.hookenv.Hooks, ensure +that a BaseCoordinator subclass is instantiated and its handle() +method called at the start of all your hooks. + +For example:: + + import sys + from charmhelpers.core import hookenv + from charmhelpers import coordinator + + hooks = hookenv.Hooks() + + def maybe_restart(): + serial = coordinator.Serial() + if serial.granted('restart'): + hookenv.service_restart('myservice') + + @hooks.hook + def config_changed(): + update_config() + serial = coordinator.Serial() + if needs_restart(): + serial.acquire('restart'): + maybe_restart() + + # Cluster hooks must be wired up. + @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') + def cluster_relation_changed(): + maybe_restart() + + # Leader hooks must be wired up. + @hooks.hook('leader-elected', 'leader-settings-changed') + def leader_settings_changed(): + maybe_restart() + + [ ... repeat for *all* other hooks you are using ... ] + + if __name__ == '__main__': + _ = coordinator.Serial() # Must instantiate before execute() + hooks.execute(sys.argv) + + +You can also use the require decorator. If the lock has not been granted, +an attempt to acquire() it will be made if the guard function returns True. +If the lock has been granted, the decorated function is run as normal:: + + from charmhelpers.core import hookenv + + hooks = hookenv.Hooks() + serial = coordinator.Serial() # Must instantiate before execute() + + @require('restart', needs_restart) + def maybe_restart(): + hookenv.service_restart('myservice') + + @hooks.hook('install', 'config-changed', 'upgrade-charm', + # Peers and leader hooks must be wired up. + 'cluster-relation-changed', 'cluster-relation-departed', + 'leader-elected', 'leader-settings-changed') + def default_hook(): + [...] + maybe_restart() + + if __name__ == '__main__': + hooks.execute() + + +Details +======= + +A simple API is provided similar to traditional locking APIs. A lock +may be requested using the acquire() method, and the granted() method +may be used do to check if a lock previously requested by acquire() has +been granted. It doesn't matter how many times acquire() is called in a +hook. + +Locks are released at the end of the hook they are acquired in. This may +be the current hook if the unit is leader and the lock is free. It is +more likely a future hook (probably leader-settings-changed, possibly +the peers relation-changed or departed hook, potentially any hook). + +Whenever a charm needs to perform a coordinated action it will acquire() +the lock and perform the action immediately if acquisition is +successful. It will also need to perform the same action in every other +hook if the lock has been granted. + + +Grubby Details +-------------- + +Why do you need to be able to perform the same action in every hook? +If the unit is the leader, then it may be able to grant its own lock +and perform the action immediately in the source hook. If the unit is +the leader and cannot immediately grant the lock, then its only +guaranteed chance of acquiring the lock is in the peers relation-joined, +relation-changed or peers relation-departed hooks when another unit has +released it (the only channel to communicate to the leader is the peers +relation). If the unit is not the leader, then it is unlikely the lock +is granted in the source hook (a previous hook must have also made the +request for this to happen). A non-leader is notified about the lock via +leader settings. These changes may be visible in any hook, even before +the leader-settings-changed hook has been invoked. Or the requesting +unit may be promoted to leader after making a request, in which case the +lock may be granted in leader-elected or in a future peers +relation-changed or relation-departed hook. + +This could be simpler if leader-settings-changed was invoked on the +leader. We could then never grant locks except in +leader-settings-changed hooks giving one place for the operation to be +performed. Unfortunately this is not the case with Juju 1.23 leadership. + +But of course, this doesn't really matter to most people as most people +seem to prefer the Services Framework or similar reset-the-world +approaches, rather than the twisty maze of attempting to deduce what +should be done based on what hook happens to be running (which always +seems to evolve into reset-the-world anyway when the charm grows beyond +the trivial). + +I chose not to implement a callback model, where a callback was passed +to acquire to be executed when the lock is granted, because the callback +may become invalid between making the request and the lock being granted +due to an upgrade-charm being run in the interim. And it would create +restrictions, such no lambdas, callback defined at the top level of a +module, etc. Still, we could implement it on top of what is here, eg. +by adding a defer decorator that stores a pickle of itself to disk and +have BaseCoordinator unpickle and execute them when the locks are granted. +''' +from datetime import datetime +from functools import wraps +import json +import os.path + +from six import with_metaclass + +from charmhelpers.core import hookenv + + +# We make BaseCoordinator and subclasses singletons, so that if we +# need to spill to local storage then only a single instance does so, +# rather than having multiple instances stomp over each other. +class Singleton(type): + _instances = {} + + def __call__(cls, *args, **kwargs): + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, cls).__call__(*args, + **kwargs) + return cls._instances[cls] + + +class BaseCoordinator(with_metaclass(Singleton, object)): + relid = None # Peer relation-id, set by __init__ + relname = None + + grants = None # self.grants[unit][lock] == timestamp + requests = None # self.requests[unit][lock] == timestamp + + def __init__(self, relation_key='coordinator', peer_relation_name=None): + '''Instatiate a Coordinator. + + Data is stored on the peers relation and in leadership storage + under the provided relation_key. + + The peers relation is identified by peer_relation_name, and defaults + to the first one found in metadata.yaml. + ''' + # Most initialization is deferred, since invoking hook tools from + # the constructor makes testing hard. + self.key = relation_key + self.relname = peer_relation_name + hookenv.atstart(self.initialize) + + # Ensure that handle() is called, without placing that burden on + # the charm author. They still need to do this manually if they + # are not using a hook framework. + hookenv.atstart(self.handle) + + def initialize(self): + if self.requests is not None: + return # Already initialized. + + assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+' + + if self.relname is None: + self.relname = _implicit_peer_relation_name() + + relids = hookenv.relation_ids(self.relname) + if relids: + self.relid = sorted(relids)[0] + + # Load our state, from leadership, the peer relationship, and maybe + # local state as a fallback. Populates self.requests and self.grants. + self._load_state() + self._emit_state() + + # Save our state if the hook completes successfully. + hookenv.atexit(self._save_state) + + # Schedule release of granted locks for the end of the hook. + # This needs to be the last of our atexit callbacks to ensure + # it will be run first when the hook is complete, because there + # is no point mutating our state after it has been saved. + hookenv.atexit(self._release_granted) + + def acquire(self, lock): + '''Acquire the named lock, non-blocking. + + The lock may be granted immediately, or in a future hook. + + Returns True if the lock has been granted. The lock will be + automatically released at the end of the hook in which it is + granted. + + Do not mindlessly call this method, as it triggers a cascade of + hooks. For example, if you call acquire() every time in your + peers relation-changed hook you will end up with an infinite loop + of hooks. It should almost always be guarded by some condition. + ''' + unit = hookenv.local_unit() + ts = self.requests[unit].get(lock) + if not ts: + # If there is no outstanding request on the peers relation, + # create one. + self.requests.setdefault(lock, {}) + self.requests[unit][lock] = _timestamp() + self.msg('Requested {}'.format(lock)) + + # If the leader has granted the lock, yay. + if self.granted(lock): + self.msg('Acquired {}'.format(lock)) + return True + + # If the unit making the request also happens to be the + # leader, it must handle the request now. Even though the + # request has been stored on the peers relation, the peers + # relation-changed hook will not be triggered. + if hookenv.is_leader(): + return self.grant(lock, unit) + + return False # Can't acquire lock, yet. Maybe next hook. + + def granted(self, lock): + '''Return True if a previously requested lock has been granted''' + unit = hookenv.local_unit() + ts = self.requests[unit].get(lock) + if ts and self.grants.get(unit, {}).get(lock) == ts: + return True + return False + + def requested(self, lock): + '''Return True if we are in the queue for the lock''' + return lock in self.requests[hookenv.local_unit()] + + def request_timestamp(self, lock): + '''Return the timestamp of our outstanding request for lock, or None. + + Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute. + ''' + ts = self.requests[hookenv.local_unit()].get(lock, None) + if ts is not None: + return datetime.strptime(ts, _timestamp_format) + + def handle(self): + if not hookenv.is_leader(): + return # Only the leader can grant requests. + + self.msg('Leader handling coordinator requests') + + # Clear our grants that have been released. + for unit in self.grants.keys(): + for lock, grant_ts in list(self.grants[unit].items()): + req_ts = self.requests.get(unit, {}).get(lock) + if req_ts != grant_ts: + # The request timestamp does not match the granted + # timestamp. Several hooks on 'unit' may have run + # before the leader got a chance to make a decision, + # and 'unit' may have released its lock and attempted + # to reacquire it. This will change the timestamp, + # and we correctly revoke the old grant putting it + # to the end of the queue. + ts = datetime.strptime(self.grants[unit][lock], + _timestamp_format) + del self.grants[unit][lock] + self.released(unit, lock, ts) + + # Grant locks + for unit in self.requests.keys(): + for lock in self.requests[unit]: + self.grant(lock, unit) + + def grant(self, lock, unit): + '''Maybe grant the lock to a unit. + + The decision to grant the lock or not is made for $lock + by a corresponding method grant_$lock, which you may define + in a subclass. If no such method is defined, the default_grant + method is used. See Serial.default_grant() for details. + ''' + if not hookenv.is_leader(): + return False # Not the leader, so we cannot grant. + + # Set of units already granted the lock. + granted = set() + for u in self.grants: + if lock in self.grants[u]: + granted.add(u) + if unit in granted: + return True # Already granted. + + # Ordered list of units waiting for the lock. + reqs = set() + for u in self.requests: + if u in granted: + continue # In the granted set. Not wanted in the req list. + for _lock, ts in self.requests[u].items(): + if _lock == lock: + reqs.add((ts, u)) + queue = [t[1] for t in sorted(reqs)] + if unit not in queue: + return False # Unit has not requested the lock. + + # Locate custom logic, or fallback to the default. + grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant) + + if grant_func(lock, unit, granted, queue): + # Grant the lock. + self.msg('Leader grants {} to {}'.format(lock, unit)) + self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock] + return True + + return False + + def released(self, unit, lock, timestamp): + '''Called on the leader when it has released a lock. + + By default, does nothing but log messages. Override if you + need to perform additional housekeeping when a lock is released, + for example recording timestamps. + ''' + interval = _utcnow() - timestamp + self.msg('Leader released {} from {}, held {}'.format(lock, unit, + interval)) + + def require(self, lock, guard_func, *guard_args, **guard_kw): + """Decorate a function to be run only when a lock is acquired. + + The lock is requested if the guard function returns True. + + The decorated function is called if the lock has been granted. + """ + def decorator(f): + @wraps(f) + def wrapper(*args, **kw): + if self.granted(lock): + self.msg('Granted {}'.format(lock)) + return f(*args, **kw) + if guard_func(*guard_args, **guard_kw) and self.acquire(lock): + return f(*args, **kw) + return None + return wrapper + return decorator + + def msg(self, msg): + '''Emit a message. Override to customize log spam.''' + hookenv.log('coordinator.{} {}'.format(self._name(), msg), + level=hookenv.INFO) + + def _name(self): + return self.__class__.__name__ + + def _load_state(self): + self.msg('Loading state') + + # All responses must be stored in the leadership settings. + # The leader cannot use local state, as a different unit may + # be leader next time. Which is fine, as the leadership + # settings are always available. + self.grants = json.loads(hookenv.leader_get(self.key) or '{}') + + local_unit = hookenv.local_unit() + + # All requests must be stored on the peers relation. This is + # the only channel units have to communicate with the leader. + # Even the leader needs to store its requests here, as a + # different unit may be leader by the time the request can be + # granted. + if self.relid is None: + # The peers relation is not available. Maybe we are early in + # the units's lifecycle. Maybe this unit is standalone. + # Fallback to using local state. + self.msg('No peer relation. Loading local state') + self.requests = {local_unit: self._load_local_state()} + else: + self.requests = self._load_peer_state() + if local_unit not in self.requests: + # The peers relation has just been joined. Update any state + # loaded from our peers with our local state. + self.msg('New peer relation. Merging local state') + self.requests[local_unit] = self._load_local_state() + + def _emit_state(self): + # Emit this units lock status. + for lock in sorted(self.requests[hookenv.local_unit()].keys()): + if self.granted(lock): + self.msg('Granted {}'.format(lock)) + else: + self.msg('Waiting on {}'.format(lock)) + + def _save_state(self): + self.msg('Publishing state') + if hookenv.is_leader(): + # sort_keys to ensure stability. + raw = json.dumps(self.grants, sort_keys=True) + hookenv.leader_set({self.key: raw}) + + local_unit = hookenv.local_unit() + + if self.relid is None: + # No peers relation yet. Fallback to local state. + self.msg('No peer relation. Saving local state') + self._save_local_state(self.requests[local_unit]) + else: + # sort_keys to ensure stability. + raw = json.dumps(self.requests[local_unit], sort_keys=True) + hookenv.relation_set(self.relid, relation_settings={self.key: raw}) + + def _load_peer_state(self): + requests = {} + units = set(hookenv.related_units(self.relid)) + units.add(hookenv.local_unit()) + for unit in units: + raw = hookenv.relation_get(self.key, unit, self.relid) + if raw: + requests[unit] = json.loads(raw) + return requests + + def _local_state_filename(self): + # Include the class name. We allow multiple BaseCoordinator + # subclasses to be instantiated, and they are singletons, so + # this avoids conflicts (unless someone creates and uses two + # BaseCoordinator subclasses with the same class name, so don't + # do that). + return '.charmhelpers.coordinator.{}'.format(self._name()) + + def _load_local_state(self): + fn = self._local_state_filename() + if os.path.exists(fn): + with open(fn, 'r') as f: + return json.load(f) + return {} + + def _save_local_state(self, state): + fn = self._local_state_filename() + with open(fn, 'w') as f: + json.dump(state, f) + + def _release_granted(self): + # At the end of every hook, release all locks granted to + # this unit. If a hook neglects to make use of what it + # requested, it will just have to make the request again. + # Implicit release is the only way this will work, as + # if the unit is standalone there may be no future triggers + # called to do a manual release. + unit = hookenv.local_unit() + for lock in list(self.requests[unit].keys()): + if self.granted(lock): + self.msg('Released local {} lock'.format(lock)) + del self.requests[unit][lock] + + +class Serial(BaseCoordinator): + def default_grant(self, lock, unit, granted, queue): + '''Default logic to grant a lock to a unit. Unless overridden, + only one unit may hold the lock and it will be granted to the + earliest queued request. + + To define custom logic for $lock, create a subclass and + define a grant_$lock method. + + `unit` is the unit name making the request. + + `granted` is the set of units already granted the lock. It will + never include `unit`. It may be empty. + + `queue` is the list of units waiting for the lock, ordered by time + of request. It will always include `unit`, but `unit` is not + necessarily first. + + Returns True if the lock should be granted to `unit`. + ''' + return unit == queue[0] and not granted + + +def _implicit_peer_relation_name(): + md = hookenv.metadata() + assert 'peers' in md, 'No peer relations in metadata.yaml' + return sorted(md['peers'].keys())[0] + + +# A human readable, sortable UTC timestamp format. +_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ' + + +def _utcnow(): # pragma: no cover + # This wrapper exists as mocking datetime methods is problematic. + return datetime.utcnow() + + +def _timestamp(): + return _utcnow().strftime(_timestamp_format) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/__init__.py new file mode 100644 index 00000000..d7567b86 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/decorators.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/decorators.py new file mode 100644 index 00000000..6ad41ee4 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/decorators.py @@ -0,0 +1,55 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Copyright 2014 Canonical Ltd. +# +# Authors: +# Edward Hope-Morley <opentastic@gmail.com> +# + +import time + +from charmhelpers.core.hookenv import ( + log, + INFO, +) + + +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): + """If the decorated function raises exception exc_type, allow num_retries + retry attempts before raise the exception. + """ + def _retry_on_exception_inner_1(f): + def _retry_on_exception_inner_2(*args, **kwargs): + retries = num_retries + multiplier = 1 + while True: + try: + return f(*args, **kwargs) + except exc_type: + if not retries: + raise + + delay = base_delay * multiplier + multiplier += 1 + log("Retrying '%s' %d more times (delay=%s)" % + (f.__name__, retries, delay), level=INFO) + retries -= 1 + if delay: + time.sleep(delay) + + return _retry_on_exception_inner_2 + + return _retry_on_exception_inner_1 diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/files.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/files.py new file mode 100644 index 00000000..fdd82b75 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/files.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/fstab.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/fstab.py new file mode 100644 index 00000000..d9fa9152 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/fstab.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' + + +class Fstab(io.FileIO): + """This class extends file in order to implement a file reader/writer + for file `/etc/fstab` + """ + + class Entry(object): + """Entry class represents a non-comment line on the `/etc/fstab` file + """ + def __init__(self, device, mountpoint, filesystem, + options, d=0, p=0): + self.device = device + self.mountpoint = mountpoint + self.filesystem = filesystem + + if not options: + options = "defaults" + + self.options = options + self.d = int(d) + self.p = int(p) + + def __eq__(self, o): + return str(self) == str(o) + + def __str__(self): + return "{} {} {} {} {} {}".format(self.device, + self.mountpoint, + self.filesystem, + self.options, + self.d, + self.p) + + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') + + def __init__(self, path=None): + if path: + self._path = path + else: + self._path = self.DEFAULT_PATH + super(Fstab, self).__init__(self._path, 'rb+') + + def _hydrate_entry(self, line): + # NOTE: use split with no arguments to split on any + # whitespace including tabs + return Fstab.Entry(*filter( + lambda x: x not in ('', None), + line.strip("\n").split())) + + @property + def entries(self): + self.seek(0) + for line in self.readlines(): + line = line.decode('us-ascii') + try: + if line.strip() and not line.strip().startswith("#"): + yield self._hydrate_entry(line) + except ValueError: + pass + + def get_entry_by_attr(self, attr, value): + for entry in self.entries: + e_attr = getattr(entry, attr) + if e_attr == value: + return entry + return None + + def add_entry(self, entry): + if self.get_entry_by_attr('device', entry.device): + return False + + self.write((str(entry) + '\n').encode('us-ascii')) + self.truncate() + return entry + + def remove_entry(self, entry): + self.seek(0) + + lines = [l.decode('us-ascii') for l in self.readlines()] + + found = False + for index, line in enumerate(lines): + if line.strip() and not line.strip().startswith("#"): + if self._hydrate_entry(line) == entry: + found = True + break + + if not found: + return False + + lines.remove(line) + + self.seek(0) + self.write(''.join(lines).encode('us-ascii')) + self.truncate() + return True + + @classmethod + def remove_by_mountpoint(cls, mountpoint, path=None): + fstab = cls(path=path) + entry = fstab.get_entry_by_attr('mountpoint', mountpoint) + if entry: + return fstab.remove_entry(entry) + return False + + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, path=None): + return cls(path=path).add_entry(Fstab.Entry(device, + mountpoint, filesystem, + options=options)) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/hookenv.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/hookenv.py new file mode 100644 index 00000000..db7ce728 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/hookenv.py @@ -0,0 +1,1613 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers <juju@lists.ubuntu.com> + +from __future__ import print_function +import copy +from distutils.version import LooseVersion +from enum import Enum +from functools import wraps +from collections import namedtuple +import glob +import os +import json +import yaml +import re +import subprocess +import sys +import errno +import tempfile +from subprocess import CalledProcessError + +from charmhelpers import deprecate + +import six +if not six.PY3: + from UserDict import UserDict +else: + from collections import UserDict + + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +TRACE = "TRACE" +MARKER = object() +SH_MAX_ARG = 131071 + + +RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' + 'This may not be compatible with software you are ' + 'running in your shell.') + + +class WORKLOAD_STATES(Enum): + ACTIVE = 'active' + BLOCKED = 'blocked' + MAINTENANCE = 'maintenance' + WAITING = 'waiting' + + +cache = {} + + +def cached(func): + """Cache return values for multiple executions of func + args + + For example:: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + """ + @wraps(func) + def wrapper(*args, **kwargs): + global cache + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) + try: + return cache[key] + except KeyError: + pass # Drop out of the exception handler scope. + res = func(*args, **kwargs) + cache[key] = res + return res + wrapper._wrapped = func + return wrapper + + +def flush(key): + """Flushes any entries from function cache where the + key is found in the function+args """ + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + """Write a message to the juju log""" + command = ['juju-log'] + if level: + command += ['-l', level] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing juju-log should not cause failures in unit tests + # Send log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + if level: + message = "{}: {}".format(level, message) + message = "juju-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +def function_log(message): + """Write a function progress message""" + command = ['function-log'] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing function-log should not cause failures in unit tests + # Send function_log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + message = "function-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + +class Serializable(UserDict): + """Wrapper, an object that can be serialized to yaml or json""" + + def __init__(self, obj): + # wrap the object + UserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + """Serialize the object to json""" + return json.dumps(self.data) + + def yaml(self): + """Serialize the object to yaml""" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + """Determine whether we're running in a relation hook""" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + """The scope for the current relation hook""" + return os.environ.get('JUJU_RELATION', None) + + +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') + + +def local_unit(): + """Local unit ID""" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + """The remote unit for the current relation hook""" + return os.environ.get('JUJU_REMOTE_UNIT', None) + + +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ + return local_unit().split('/')[0] + + +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + if not md: + continue + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + +def hook_name(): + """The name of the currently executing hook""" + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) + + +class Config(dict): + """A dictionary representation of the charm's config.yaml, with some + extra features: + + - See which values in the dictionary have changed since the previous hook. + - For values that have changed, see what the previous value was. + - Store arbitrary data for use in a later hook. + + NOTE: Do not instantiate this object directly - instead call + ``hookenv.config()``, which will return an instance of :class:`Config`. + + Example usage:: + + >>> # inside a hook + >>> from charmhelpers.core import hookenv + >>> config = hookenv.config() + >>> config['foo'] + 'bar' + >>> # store a new key/value for later use + >>> config['mykey'] = 'myval' + + + >>> # user runs `juju set mycharm foo=baz` + >>> # now we're inside subsequent config-changed hook + >>> config = hookenv.config() + >>> config['foo'] + 'baz' + >>> # test to see if this val has changed since last hook + >>> config.changed('foo') + True + >>> # what was the previous value? + >>> config.previous('foo') + 'bar' + >>> # keys/values that we add are preserved across hooks + >>> config['mykey'] + 'myval' + + """ + CONFIG_FILE_NAME = '.juju-persistent-config' + + def __init__(self, *args, **kw): + super(Config, self).__init__(*args, **kw) + self.implicit_save = True + self._prev_dict = None + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) + if os.path.exists(self.path) and os.stat(self.path).st_size: + self.load_previous() + atexit(self._implicit_save) + + def load_previous(self, path=None): + """Load previous copy of config from disk. + + In normal usage you don't need to call this method directly - it + is called automatically at object initialization. + + :param path: + + File path from which to load the previous config. If `None`, + config is loaded from the default location. If `path` is + specified, subsequent `save()` calls will write to the same + path. + + """ + self.path = path or self.path + with open(self.path) as f: + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Found but was unable to parse previous config data, ' + 'ignoring which will report all values as changed - {}' + .format(str(e)), level=ERROR) + return + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v + + def changed(self, key): + """Return True if the current value for this key is different from + the previous value. + + """ + if self._prev_dict is None: + return True + return self.previous(key) != self.get(key) + + def previous(self, key): + """Return previous value for this key, or None if there + is no previous value. + + """ + if self._prev_dict: + return self._prev_dict.get(key) + return None + + def save(self): + """Save this config to disk. + + If the charm is using the :mod:`Services Framework <services.base>` + or :meth:'@hook <Hooks.hook>' decorator, this + is called automatically at the end of successful hook execution. + Otherwise, it should be called directly by user code. + + To disable automatic saves, set ``implicit_save=False`` on this + instance. + + """ + with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) + json.dump(self, f) + + def _implicit_save(self): + if self.implicit_save: + self.save() + + +_cache_config = None + + +def config(scope=None): + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] + try: + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) + if scope is not None: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + """Get relation information""" + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except CalledProcessError as e: + if e.returncode == 2: + return None + raise + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Set relation information for the current unit""" + relation_settings = relation_settings if relation_settings else {} + relation_cmd_line = ['relation-set'] + accepts_file = "--file" in subprocess.check_output( + relation_cmd_line + ["--help"], universal_newlines=True) + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + settings = relation_settings.copy() + settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) + if accepts_file: + # --file was introduced in Juju 1.23.2. Use it by default if + # available, since otherwise we'll break if the relation data is + # too big. Ideally we should tell relation-set to read the data from + # stdin, but that feature is broken in 1.23.2: Bug #1454678. + with tempfile.NamedTemporaryFile(delete=False) as settings_file: + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) + subprocess.check_call( + relation_cmd_line + ["--file", settings_file.name]) + os.remove(settings_file.name) + else: + for key, value in settings.items(): + if value is None: + relation_cmd_line.append('{}='.format(key)) + else: + relation_cmd_line.append('{}={}'.format(key, value)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + +@cached +def relation_ids(reltype=None): + """A list of relation_ids""" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads( + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] + return [] + + +@cached +def related_units(relid=None): + """A list of related units""" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads( + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] + + +def expected_peer_units(): + """Get a generator for units we expect to join peer relation based on + goal-state. + + The local unit is excluded from the result to make it easy to gauge + completion of all peers joining the relation with existing hook tools. + + Example usage: + log('peer {} of {} joined peer relation' + .format(len(related_units()), + len(list(expected_peer_units())))) + + This function will raise NotImplementedError if used with juju versions + without goal-state support. + + :returns: iterator + :rtype: types.GeneratorType + :raises: NotImplementedError + """ + if not has_juju_version("2.4.0"): + # goal-state first appeared in 2.4.0. + raise NotImplementedError("goal-state") + _goal_state = goal_state() + return (key for key in _goal_state['units'] + if '/' in key and key != local_unit()) + + +def expected_related_units(reltype=None): + """Get a generator for units we expect to join relation based on + goal-state. + + Note that you can not use this function for the peer relation, take a look + at expected_peer_units() for that. + + This function will raise KeyError if you request information for a + relation type for which juju goal-state does not have information. It will + raise NotImplementedError if used with juju versions without goal-state + support. + + Example usage: + log('participant {} of {} joined relation {}' + .format(len(related_units()), + len(list(expected_related_units())), + relation_type())) + + :param reltype: Relation type to list data for, default is to list data for + the realtion type we are currently executing a hook for. + :type reltype: str + :returns: iterator + :rtype: types.GeneratorType + :raises: KeyError, NotImplementedError + """ + if not has_juju_version("2.4.4"): + # goal-state existed in 2.4.0, but did not list individual units to + # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) + raise NotImplementedError("goal-state relation unit count") + reltype = reltype or relation_type() + _goal_state = goal_state() + return (key for key in _goal_state['relations'][reltype] if '/' in key) + + +@cached +def relation_for_unit(unit=None, rid=None): + """Get the json represenation of a unit's relation""" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + """Get relations of a specific relation ID""" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + """Get relations of a specific type""" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def metadata(): + """Get the current charm metadata.yaml contents as a python object""" + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: + return yaml.safe_load(md) + + +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: + return yaml.safe_load(md) + + +@cached +def relation_types(): + """Get a list of relation types supported by this charm""" + rel_types = [] + md = metadata() + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + return rel_types + + +@cached +def peer_relation_id(): + '''Get the peers relation id if a peers relation has been joined, else None.''' + md = metadata() + section = md.get('peers') + if section: + for key in section: + relids = relation_ids(key) + if relids: + return relids[0] + return None + + +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peers'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peers``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peers'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + +@cached +def charm_name(): + """Get the name of the current charm as is specified on metadata.yaml""" + return metadata().get('name') + + +@cached +def relations(): + """Get a nested dictionary of relation data for all related units""" + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +@cached +def is_relation_made(relation, keys='private-address'): + ''' + Determine whether a relation is established by checking for + presence of key(s). If a list of keys is provided, they + must all be present for the relation to be identified as made + ''' + if isinstance(keys, str): + keys = [keys] + for r_id in relation_ids(relation): + for unit in related_units(r_id): + context = {} + for k in keys: + context[k] = relation_get(k, rid=r_id, + unit=unit) + if None not in context.values(): + return True + return False + + +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + +def open_port(port, protocol="TCP"): + """Open a service network port""" + _port_op('open-port', port, protocol) + + +def close_port(port, protocol="TCP"): + """Close a service network port""" + _port_op('close-port', port, protocol) + + +def open_ports(start, end, protocol="TCP"): + """Opens a range of service network ports""" + _args = ['open-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def close_ports(start, end, protocol="TCP"): + """Close a range of service network ports""" + _args = ['close-port'] + _args.append('{}-{}/{}'.format(start, end, protocol)) + subprocess.check_call(_args) + + +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + +@cached +def unit_get(attribute): + """Get the unit ID for the remote unit""" + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +def unit_public_ip(): + """Get this unit's public IP address""" + return unit_get('public-address') + + +def unit_private_ip(): + """Get this unit's private IP address""" + return unit_get('private-address') + + +@cached +def storage_get(attribute=None, storage_id=None): + """Get storage attributes""" + _args = ['storage-get', '--format=json'] + if storage_id: + _args.extend(('-s', storage_id)) + if attribute: + _args.append(attribute) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + + +@cached +def storage_list(storage_name=None): + """List the storage IDs for the unit""" + _args = ['storage-list', '--format=json'] + if storage_name: + _args.append(storage_name) + try: + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + except ValueError: + return None + except OSError as e: + import errno + if e.errno == errno.ENOENT: + # storage-list does not exist + return [] + raise + + +class UnregisteredHookError(Exception): + """Raised when an undefined hook is called""" + pass + + +class Hooks(object): + """A convenient handler for hook functions. + + Example:: + + hooks = Hooks() + + # register a hook, taking its name from the function name + @hooks.hook() + def install(): + pass # your code here + + # register a hook, providing a custom hook name + @hooks.hook("config-changed") + def config_changed(): + pass # your code here + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + """ + + def __init__(self, config_save=None): + super(Hooks, self).__init__() + self._hooks = {} + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save + + def register(self, name, function): + """Register a hook""" + self._hooks[name] = function + + def execute(self, args): + """Execute a registered hook based on args[0]""" + _run_atstart() + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + """Decorator, registering them as hooks""" + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +class NoNetworkBinding(Exception): + pass + + +def charm_dir(): + """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d + return os.environ.get('CHARM_DIR') + + +def cmd_exists(cmd): + """Return True if the specified cmd exists in the path""" + return any( + os.access(os.path.join(path, cmd), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + + +@cached +@deprecate("moved to function_get()", log=log) +def action_get(key=None): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ + cmd = ['action-get'] + if key is not None: + cmd.append(key) + cmd.append('--format=json') + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return action_data + + +@cached +def function_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['function-get'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-get'] + + if key is not None: + cmd.append(key) + cmd.append('--format=json') + function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return function_data + + +@deprecate("moved to function_set()", log=log) +def action_set(values): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ + cmd = ['action-set'] + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +def function_set(values): + """Sets the values to be returned after the function finishes""" + cmd = ['function-set'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-set'] + + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@deprecate("moved to function_fail()", log=log) +def action_fail(message): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + + Sets the action status to failed and sets the error message. + + The results set by action_set are preserved. + """ + subprocess.check_call(['action-fail', message]) + + +def function_fail(message): + """Sets the function status to failed and sets the error message. + + The results set by function_set are preserved.""" + cmd = ['function-fail'] + # Fallback for older charms. + if not cmd_exists('function-fail'): + cmd = ['action-fail'] + cmd.append(message) + + subprocess.check_call(cmd) + + +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def function_name(): + """Get the name of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_NAME') or action_name() + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def function_id(): + """Get the ID of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + +def function_tag(): + """Get the tag for the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() + + +def status_set(workload_state, message, application=False): + """Set the workload state with a message + + Use status-set to set the workload state with a message which is visible + to the user via juju status. If the status-set command is not found then + assume this is juju < 1.23 and juju-log the message instead. + + workload_state -- valid juju workload state. str or WORKLOAD_STATES + message -- status update message + application -- Whether this is an application state set + """ + bad_state_msg = '{!r} is not a valid workload state' + + if isinstance(workload_state, str): + try: + # Convert string to enum. + workload_state = WORKLOAD_STATES[workload_state.upper()] + except KeyError: + raise ValueError(bad_state_msg.format(workload_state)) + + if workload_state not in WORKLOAD_STATES: + raise ValueError(bad_state_msg.format(workload_state)) + + cmd = ['status-set'] + if application: + cmd.append('--application') + cmd.extend([workload_state.value, message]) + try: + ret = subprocess.call(cmd) + if ret == 0: + return + except OSError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'status-set failed: {} {}'.format(workload_state.value, + message) + log(log_message, level='INFO') + + +def status_get(): + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" + + """ + cmd = ['status-get', "--format=json", "--include-data"] + try: + raw_status = subprocess.check_output(cmd) + except OSError as e: + if e.errno == errno.ENOENT: + return ('unknown', "") + else: + raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + @wraps(f) + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +def application_version_set(version): + """Charm authors may trigger this command from any hook to output what + version of the application is running. This could be a package version, + for instance postgres version 9.5. It could also be a build number or + version control revision identifier, for instance git sha 6fb7ba68. """ + + cmd = ['application-version-set'] + cmd.append(version) + try: + subprocess.check_call(cmd) + except OSError: + log("Application Version: {}".format(version)) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +@cached +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_register(ptype, klass, pid): + """ is used while a hook is running to let Juju know that a + payload has been started.""" + cmd = ['payload-register'] + for x in [ptype, klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_unregister(klass, pid): + """ is used while a hook is running to let Juju know + that a payload has been manually stopped. The <class> and <id> provided + must match a payload that has been previously registered with juju using + payload-register.""" + cmd = ['payload-unregister'] + for x in [klass, pid]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def payload_status_set(klass, pid, status): + """is used to update the current status of a registered payload. + The <class> and <id> provided must match a payload that has been previously + registered with juju using payload-register. The <status> must be one of the + follow: starting, started, stopping, stopped""" + cmd = ['payload-status-set'] + for x in [klass, pid, status]: + cmd.append(x) + subprocess.check_call(cmd) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + <name> must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Deprecated since Juju 2.3; use network_get() + + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response + + +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if request not supported by the Juju version. + """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + return yaml.safe_load(response) + + +def add_metric(*args, **kwargs): + """Add metric values. Values may be expressed with keyword arguments. For + metric names containing dashes, these may be expressed as one or more + 'key=value' positional arguments. May only be called from the collect-metrics + hook.""" + _args = ['add-metric'] + _kvpairs = [] + _kvpairs.extend(args) + _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()]) + _args.extend(sorted(_kvpairs)) + try: + subprocess.check_call(_args) + return + except EnvironmentError as e: + if e.errno != errno.ENOENT: + raise + log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs)) + log(log_message, level='INFO') + + +def meter_status(): + """Get the meter status, if running in the meter-status-changed hook.""" + return os.environ.get('JUJU_METER_STATUS') + + +def meter_info(): + """Get the meter status information, if running in the meter-status-changed + hook.""" + return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') + + +def env_proxy_settings(selected_settings=None): + """Get proxy settings from process environment variables. + + Get charm proxy settings from environment variables that correspond to + juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see + lp:1782236) and juju-ftp-proxy in a format suitable for passing to an + application that reacts to proxy settings passed as environment variables. + Some applications support lowercase or uppercase notation (e.g. curl), some + support only lowercase (e.g. wget), there are also subjectively rare cases + of only uppercase notation support. no_proxy CIDR and wildcard support also + varies between runtimes and applications as there is no enforced standard. + + Some applications may connect to multiple destinations and expose config + options that would affect only proxy settings for a specific destination + these should be handled in charms in an application-specific manner. + + :param selected_settings: format only a subset of possible settings + :type selected_settings: list + :rtype: Option(None, dict[str, str]) + """ + SUPPORTED_SETTINGS = { + 'http': 'HTTP_PROXY', + 'https': 'HTTPS_PROXY', + 'no_proxy': 'NO_PROXY', + 'ftp': 'FTP_PROXY' + } + if selected_settings is None: + selected_settings = SUPPORTED_SETTINGS + + selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() + if k in selected_settings] + proxy_settings = {} + for var in selected_vars: + var_val = os.getenv(var) + if var_val: + proxy_settings[var] = var_val + proxy_settings[var.lower()] = var_val + # Now handle juju-prefixed environment variables. The legacy vs new + # environment variable usage is mutually exclusive + charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) + if charm_var_val: + proxy_settings[var] = charm_var_val + proxy_settings[var.lower()] = charm_var_val + if 'no_proxy' in proxy_settings: + if _contains_range(proxy_settings['no_proxy']): + log(RANGE_WARNING, level=WARNING) + return proxy_settings if proxy_settings else None + + +def _contains_range(addresses): + """Check for cidr or wildcard domain in a string. + + Given a string comprising a comma seperated list of ip addresses + and domain names, determine whether the string contains IP ranges + or wildcard domains. + + :param addresses: comma seperated list of domains and ip addresses. + :type addresses: str + """ + return ( + # Test for cidr (e.g. 10.20.20.0/24) + "/" in addresses or + # Test for wildcard domains (*.foo.com or .foo.com) + "*" in addresses or + addresses.startswith(".") or + ",." in addresses or + " ." in addresses) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host.py new file mode 100644 index 00000000..b33ac906 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host.py @@ -0,0 +1,1104 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt <nick.moffitt@canonical.com> +# Matthew Wedgwood <matthew.wedgwood@canonical.com> + +import os +import re +import pwd +import glob +import grp +import random +import string +import subprocess +import hashlib +import functools +import itertools +import six + +from contextlib import contextmanager +from collections import OrderedDict +from .hookenv import log, INFO, DEBUG, local_unit, charm_name +from .fstab import Fstab +from charmhelpers.osplatform import get_platform + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + CompareHostReleases, + get_distrib_codename, + arch + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.host_factory.centos import ( # NOQA:F401 + service_available, + add_new_group, + lsb_release, + cmp_pkgrevno, + CompareHostReleases, + ) # flake8: noqa -- ignore F401 for this import + +UPDATEDB_PATH = '/etc/updatedb.conf' + + +def service_start(service_name, **kwargs): + """Start a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('start', service_name, **kwargs) + + +def service_stop(service_name, **kwargs): + """Stop a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example stops the ceph-osd service for instance id=4: + + service_stop('ceph-osd', id=4) + + :param service_name: the name of the service to stop + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + return service('stop', service_name, **kwargs) + + +def service_restart(service_name, **kwargs): + """Restart a system service. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be restarted. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_restart('ceph-osd', id=4) + + :param service_name: the name of the service to restart + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + return service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False, **kwargs): + """Reload a system service, optionally falling back to restart if + reload fails. + + The specified service name is managed via the system level init system. + Some init systems (e.g. upstart) require that additional arguments be + provided in order to directly control service instances whereas other init + systems allow for addressing instances of a service directly by name (e.g. + systemd). + + The kwargs allow for the additional parameters to be passed to underlying + init systems for those systems which require/allow for them. For example, + the ceph-osd upstart script requires the id parameter to be passed along + in order to identify which running daemon should be reloaded. The follow- + ing example restarts the ceph-osd service for instance id=4: + + service_reload('ceph-osd', id=4) + + :param service_name: the name of the service to reload + :param restart_on_failure: boolean indicating whether to fallback to a + restart if the reload fails. + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems not allowing additional + parameters via the commandline (systemd). + """ + service_result = service('reload', service_name, **kwargs) + if not service_result and restart_on_failure: + service_result = service('restart', service_name, **kwargs) + return service_result + + +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d", + **kwargs): + """Pause a system service. + + Stop it, and prevent it from starting again at boot. + + :param service_name: the name of the service to pause + :param init_dir: path to the upstart init directory + :param initd_dir: path to the sysv init directory + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for init systems which do not support + key=value arguments via the commandline. + """ + stopped = True + if service_running(service_name, **kwargs): + stopped = service_stop(service_name, **kwargs) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('disable', service_name) + service('mask', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + return stopped + + +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d", **kwargs): + """Resume a system service. + + Reenable starting again at boot. Start the service. + + :param service_name: the name of the service to resume + :param init_dir: the path to the init dir + :param initd dir: the path to the initd dir + :param **kwargs: additional parameters to pass to the init system when + managing services. These will be passed as key=value + parameters to the init system's commandline. kwargs + are ignored for systemd enabled systems. + """ + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if init_is_systemd(): + service('unmask', service_name) + service('enable', service_name) + elif os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + raise ValueError( + "Unable to detect {0} as SystemD, Upstart {1} or" + " SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_running(service_name, **kwargs) + + if not started: + started = service_start(service_name, **kwargs) + return started + + +def service(action, service_name, **kwargs): + """Control a system service. + + :param action: the action to take on the service + :param service_name: the name of the service to perform th action on + :param **kwargs: additional params to be passed to the service command in + the form of key=value. + """ + if init_is_systemd(): + cmd = ['systemctl', action, service_name] + else: + cmd = ['service', service_name, action] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + return subprocess.call(cmd) == 0 + + +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + +def service_running(service_name, **kwargs): + """Determine whether a system service is running. + + :param service_name: the name of the service + :param **kwargs: additional args to pass to the service command. This is + used to pass additional key=value arguments to the + service command line for managing specific instance + units (e.g. service ceph-osd status id=2). The kwargs + are ignored in systemd services. + """ + if init_is_systemd(): + return service('is-active', service_name) + else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + cmd = ['status', service_name] + for key, value in six.iteritems(kwargs): + parameter = '%s=%s' % (key, value) + cmd.append(parameter) + output = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: + return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running + # 'start/running' + if ("start/running" in output or + "is running" in output or + "up and running" in output): + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False + + +SYSTEMD_SYSTEM = '/run/systemd/system' + + +def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" + if lsb_release()['DISTRIB_CODENAME'] == 'trusty': + return False + return os.path.isdir(SYSTEMD_SYSTEM) + + +def adduser(username, password=None, shell='/bin/bash', + system_user=False, primary_group=None, + secondary_groups=None, uid=None, home_dir=None): + """Add a user to the system. + + Will log but otherwise succeed if the user already exists. + + :param str username: Username to create + :param str password: Password for user; if ``None``, create a system user + :param str shell: The default shell for the user + :param bool system_user: Whether to create a login or system user + :param str primary_group: Primary group for user; defaults to username + :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created + :param str home_dir: Home directory for user + + :returns: The password database entry struct, as returned by `pwd.getpwnam` + """ + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) + if home_dir: + cmd.extend(['--home', str(home_dir)]) + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + if not primary_group: + try: + grp.getgrnam(username) + primary_group = username # avoid "group exists" error + except KeyError: + pass + if primary_group: + cmd.extend(['-g', primary_group]) + if secondary_groups: + cmd.extend(['-G', ','.join(secondary_groups)]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ + try: + group_info = grp.getgrnam(group_name) + log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) + except KeyError: + log('creating group {0}'.format(group_name)) + add_new_group(group_name, system_group, gid) + group_info = grp.getgrnam(group_name) + return group_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = ['gpasswd', '-a', username, group] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + + +def rsync(from_path, to_path, flags='-r', options=None, timeout=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + if timeout: + cmd = ['timeout', str(timeout)] + cmd + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0o555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + path_exists = os.path.exists(realpath) + if path_exists and force: + if not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + os.makedirs(realpath, perms) + elif not path_exists: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + os.chmod(realpath, perms) + + +def write_file(path, content, owner='root', group='root', perms=0o444): + """Create or overwrite a file with the contents of a byte string.""" + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid, existing_perms = None, None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid, existing_perms = ( + stat.st_uid, stat.st_gid, stat.st_mode + ) + except Exception: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership or permissions. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) + if existing_perms != perms: + log("Changing permissions on existing content: {} -> {}" + .format(existing_perms, perms), level=DEBUG) + os.chmod(path, perms) + + +def fstab_remove(mp): + """Remove the given mountpoint entry from /etc/fstab""" + return Fstab.remove_by_mountpoint(mp) + + +def fstab_add(dev, mp, fs, options=None): + """Adds the given device entry to the /etc/fstab file""" + return Fstab.add(dev, mp, fs, options=options) + + +def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): + """Mount a filesystem at a particular mountpoint""" + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + + if persist: + return fstab_add(device, mountpoint, filesystem, options=options) + return True + + +def umount(mountpoint, persist=False): + """Unmount a filesystem""" + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + + if persist: + return fstab_remove(mountpoint) + return True + + +def mounts(): + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + +def file_hash(path, hash_type='md5'): + """Generate a hash checksum of the contents of 'path' or None if not found. + + :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + """ + if os.path.exists(path): + h = getattr(hashlib, hash_type)() + with open(path, 'rb') as source: + h.update(source.read()) + return h.hexdigest() + else: + return None + + +def path_hash(path): + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + +def check_hash(path, checksum, hash_type='md5'): + """Validate a file using a cryptographic checksum. + + :param str checksum: Value of the checksum used to validate the file. + :param str hash_type: Hash algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + :raises ChecksumError: If the file fails the checksum + + """ + actual_checksum = file_hash(path, hash_type) + if checksum != actual_checksum: + raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) + + +class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" + pass + + +def restart_on_change(restart_map, stopstart=False, restart_functions=None): + """Restart services based on configuration files changing + + This function is used a decorator, for example:: + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] + }) + def config_changed(): + pass # your code here + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function + """ + def wrap(f): + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) + return wrapped_f + return wrap + + +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + +def pwgen(length=None): + """Generate a random pasword.""" + if length is None: + # A random length is ok to use a weak PRNG + length = random.choice(range(35, 45)) + alphanumeric_chars = [ + l for l in (string.ascii_letters + string.digits) + if l not in 'l0QD1vAEIOUaeiou'] + # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the + # actual password + random_generator = random.SystemRandom() + random_chars = [ + random_generator.choice(alphanumeric_chars) for _ in range(length)] + return(''.join(random_chars)) + + +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): + """Return a list of nics of given type(s)""" + if isinstance(nic_type, six.string_types): + int_types = [nic_type] + else: + int_types = nic_type + + interfaces = [] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile(r'^[0-9]+:\s+(.+):') + for line in ip_output: + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) + + return interfaces + + +def set_nic_mtu(nic, mtu): + """Set the Maximum Transmission Unit (MTU) on a network interface.""" + cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] + subprocess.check_call(cmd) + + +def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" + cmd = ['ip', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') + mtu = "" + for line in ip_output: + words = line.split() + if 'mtu' in words: + mtu = words[words.index("mtu") + 1] + return mtu + + +def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" + cmd = ['ip', '-o', '-0', 'addr', 'show', nic] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + hwaddr = "" + words = ip_output.split() + if 'link/ether' in words: + hwaddr = words[words.index('link/ether') + 1] + return hwaddr + + +@contextmanager +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ + cur = os.getcwd() + try: + yield os.chdir(directory) + finally: + os.chdir(cur) + + +def chownr(path, owner, group, follow_links=True, chowntopdir=False): + """Recursively change user and group ownership of files and directories + in given path. Doesn't chown path itself by default, only its children. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + :param bool follow_links: Also follow and chown links if True + :param bool chowntopdir: Also chown path itself if True + """ + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + if follow_links: + chown = os.chown + else: + chown = os.lchown + + if chowntopdir: + broken_symlink = os.path.lexists(path) and not os.path.exists(path) + if not broken_symlink: + chown(path, uid, gid) + for root, dirs, files in os.walk(path, followlinks=follow_links): + for name in dirs + files: + full = os.path.join(root, name) + broken_symlink = os.path.lexists(full) and not os.path.exists(full) + if not broken_symlink: + chown(full, uid, gid) + + +def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ + chownr(path, owner, group, follow_links=False) + + +def owner(path): + """Returns a tuple containing the username & groupname owning the path. + + :param str path: the string path to retrieve the ownership + :return tuple(str, str): A (username, groupname) tuple containing the + name of the user and group owning the path. + :raises OSError: if the specified path does not exist + """ + stat = os.stat(path) + username = pwd.getpwuid(stat.st_uid)[0] + groupname = grp.getgrgid(stat.st_gid)[0] + return username, groupname + + +def get_total_ram(): + """The total amount of system RAM in bytes. + + This is what is reported by the OS, and may be overcommitted when + there are multiple containers hosted on the same machine. + """ + with open('/proc/meminfo', 'r') as f: + for line in f.readlines(): + if line: + key, value, unit = line.split() + if key == 'MemTotal:': + assert unit == 'kB', 'Unknown unit' + return int(value) * 1024 # Classic, not KiB. + raise NotImplementedError() + + +UPSTART_CONTAINER_TYPE = '/run/container_type' + + +def is_container(): + """Determine whether unit is running in a container + + @return: boolean indicating if unit is in a container + """ + if init_is_systemd(): + # Detect using systemd-detect-virt + return subprocess.call(['systemd-detect-virt', + '--container']) == 0 + else: + # Detect using upstart container file marker + return os.path.exists(UPSTART_CONTAINER_TYPE) + + +def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + + with open(updatedb_path, 'r+') as f_id: + updatedb_text = f_id.read() + output = updatedb(updatedb_text, path) + f_id.seek(0) + f_id.write(output) + f_id.truncate() + + +def updatedb(updatedb_text, new_path): + lines = [line for line in updatedb_text.split("\n")] + for i, line in enumerate(lines): + if line.startswith("PRUNEPATHS="): + paths_line = line.split("=")[1].replace('"', '') + paths = paths_line.split(" ") + if new_path not in paths: + paths.append(new_path) + lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) + output = "\n".join(lines) + return output + + +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time + + +def install_ca_cert(ca_cert, name=None): + """ + Install the given cert as a trusted CA. + + The ``name`` is the stem of the filename where the cert is written, and if + not provided, it will default to ``juju-{charm_name}``. + + If the cert is empty or None, or is unchanged, nothing is done. + """ + if not ca_cert: + return + if not isinstance(ca_cert, bytes): + ca_cert = ca_cert.encode('utf8') + if not name: + name = 'juju-{}'.format(charm_name()) + cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + new_hash = hashlib.md5(ca_cert).hexdigest() + if file_hash(cert_file) == new_hash: + return + log("Installing new CA cert at: {}".format(cert_file), level=INFO) + write_file(cert_file, ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) + + +def get_system_env(key, default=None): + """Get data from system environment as represented in ``/etc/environment``. + + :param key: Key to look up + :type key: str + :param default: Value to return if key is not found + :type default: any + :returns: Value for key if found or contents of default parameter + :rtype: any + :raises: subprocess.CalledProcessError + """ + env_file = '/etc/environment' + # use the shell and env(1) to parse the global environments file. This is + # done to get the correct result even if the user has shell variable + # substitutions or other shell logic in that file. + output = subprocess.check_output( + ['env', '-i', '/bin/bash', '-c', + 'set -a && source {} && env'.format(env_file)], + universal_newlines=True) + for k, v in (line.split('=', 1) + for line in output.splitlines() if '=' in line): + if k == key: + return v + else: + return default diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/centos.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/centos.py new file mode 100644 index 00000000..7781a396 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/centos.py @@ -0,0 +1,72 @@ +import subprocess +import yum +import os + +from charmhelpers.core.strutils import BasicStringComparator + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Host releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + + def __init__(self, item): + raise NotImplementedError( + "CompareHostReleases() is not implemented for CentOS") + + +def service_available(service_name): + # """Determine whether a system service is available.""" + if os.path.isdir('/run/systemd/system'): + cmd = ['systemctl', 'is-enabled', service_name] + else: + cmd = ['service', service_name, 'is-enabled'] + return subprocess.call(cmd) == 0 + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['groupadd'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('-r') + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/os-release in a dict.""" + d = {} + with open('/etc/os-release', 'r') as lsb: + for l in lsb: + s = l.split('=') + if len(s) != 2: + continue + d[s[0].strip()] = s[1].strip() + return d + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports YumBase function if the pkgcache argument + is None. + """ + if not pkgcache: + y = yum.YumBase() + packages = y.doPackageLists() + pkgcache = {i.Name: i.version for i in packages['installed']} + pkg = pkgcache[package] + if pkg > revno: + return 1 + if pkg < revno: + return -1 + return 0 diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/ubuntu.py new file mode 100644 index 00000000..3edc0687 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/host_factory/ubuntu.py @@ -0,0 +1,116 @@ +import subprocess + +from charmhelpers.core.hookenv import cached +from charmhelpers.core.strutils import BasicStringComparator + + +UBUNTU_RELEASES = ( + 'lucid', + 'maverick', + 'natty', + 'oneiric', + 'precise', + 'quantal', + 'raring', + 'saucy', + 'trusty', + 'utopic', + 'vivid', + 'wily', + 'xenial', + 'yakkety', + 'zesty', + 'artful', + 'bionic', + 'cosmic', + 'disco', + 'eoan', + 'focal' +) + + +class CompareHostReleases(BasicStringComparator): + """Provide comparisons of Ubuntu releases. + + Use in the form of + + if CompareHostReleases(release) > 'trusty': + # do something with mitaka + """ + _list = UBUNTU_RELEASES + + +def service_available(service_name): + """Determine whether a system service is available""" + try: + subprocess.check_output( + ['service', service_name, 'status'], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError as e: + return b'unrecognized service' not in e.output + else: + return True + + +def add_new_group(group_name, system_group=False, gid=None): + cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) + if system_group: + cmd.append('--system') + else: + cmd.extend([ + '--group', + ]) + cmd.append(group_name) + subprocess.check_call(cmd) + + +def lsb_release(): + """Return /etc/lsb-release in a dict""" + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d + + +def get_distrib_codename(): + """Return the codename of the distribution + :returns: The codename + :rtype: str + """ + return lsb_release()['DISTRIB_CODENAME'].lower() + + +def cmp_pkgrevno(package, revno, pkgcache=None): + """Compare supplied revno with the revno of the installed package. + + * 1 => Installed revno is greater than supplied arg + * 0 => Installed revno is the same as supplied arg + * -1 => Installed revno is less than supplied arg + + This function imports apt_cache function from charmhelpers.fetch if + the pkgcache argument is None. Be sure to add charmhelpers.fetch if + you call this function, or pass an apt_pkg.Cache() instance. + """ + from charmhelpers.fetch import apt_pkg + if not pkgcache: + from charmhelpers.fetch import apt_cache + pkgcache = apt_cache() + pkg = pkgcache[package] + return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@cached +def arch(): + """Return the package architecture as a string. + + :returns: the architecture + :rtype: str + :raises: subprocess.CalledProcessError if dpkg command fails + """ + return subprocess.check_output( + ['dpkg', '--print-architecture'] + ).rstrip().decode('UTF-8') diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/hugepage.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..54b5b5e2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + if max_map_count < 2 * nr_hugepages: + max_map_count = 2 * nr_hugepages + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel.py new file mode 100644 index 00000000..e01f4f8b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +import subprocess + +from charmhelpers.osplatform import get_platform +from charmhelpers.core.hookenv import ( + log, + INFO +) + +__platform__ = get_platform() +if __platform__ == "ubuntu": + from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import +elif __platform__ == "centos": + from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 + persistent_modprobe, + update_initramfs, + ) # flake8: noqa -- ignore F401 for this import + +__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + subprocess.check_call(cmd) + if persist: + persistent_modprobe(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return subprocess.check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return subprocess.check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/centos.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/centos.py new file mode 100644 index 00000000..1c402c11 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/centos.py @@ -0,0 +1,17 @@ +import subprocess +import os + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + if not os.path.exists('/etc/rc.modules'): + open('/etc/rc.modules', 'a') + os.chmod('/etc/rc.modules', 111) + with open('/etc/rc.modules', 'r+') as modules: + if module not in modules.read(): + modules.write('modprobe %s\n' % module) + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["dracut", "-f", version]) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/ubuntu.py new file mode 100644 index 00000000..3de372fd --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/kernel_factory/ubuntu.py @@ -0,0 +1,13 @@ +import subprocess + + +def persistent_modprobe(module): + """Load a kernel module and configure for auto-load on reboot.""" + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module + "\n") + + +def update_initramfs(version='all'): + """Updates an initramfs image.""" + return subprocess.check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/__init__.py new file mode 100644 index 00000000..61fd074e --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .base import * # NOQA +from .helpers import * # NOQA diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/base.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/base.py new file mode 100644 index 00000000..179ad4f0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/base.py @@ -0,0 +1,362 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import json +from inspect import getargspec +from collections import Iterable, OrderedDict + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +__all__ = ['ServiceManager', 'ManagerCallback', + 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', + 'service_restart', 'service_stop'] + + +class ServiceManager(object): + def __init__(self, services=None): + """ + Register a list of services, given their definitions. + + Service definitions are dicts in the following formats (all keys except + 'service' are optional):: + + { + "service": <service name>, + "required_data": <list of required data contexts>, + "provided_data": <list of provided data contexts>, + "data_ready": <one or more callbacks>, + "data_lost": <one or more callbacks>, + "start": <one or more callbacks>, + "stop": <one or more callbacks>, + "ports": <list of ports to manage>, + } + + The 'required_data' list should contain dicts of required data (or + dependency managers that act like dicts and know how to collect the data). + Only when all items in the 'required_data' list are populated are the list + of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more + information. + + The 'provided_data' list should contain relation data providers, most likely + a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, + that will indicate a set of data to set on a given relation. + + The 'data_ready' value should be either a single callback, or a list of + callbacks, to be called when all items in 'required_data' pass `is_ready()`. + Each callback will be called with the service name as the only parameter. + After all of the 'data_ready' callbacks are called, the 'start' callbacks + are fired. + + The 'data_lost' value should be either a single callback, or a list of + callbacks, to be called when a 'required_data' item no longer passes + `is_ready()`. Each callback will be called with the service name as the + only parameter. After all of the 'data_lost' callbacks are called, + the 'stop' callbacks are fired. + + The 'start' value should be either a single callback, or a list of + callbacks, to be called when starting the service, after the 'data_ready' + callbacks are complete. Each callback will be called with the service + name as the only parameter. This defaults to + `[host.service_start, services.open_ports]`. + + The 'stop' value should be either a single callback, or a list of + callbacks, to be called when stopping the service. If the service is + being stopped because it no longer has all of its 'required_data', this + will be called after all of the 'data_lost' callbacks are complete. + Each callback will be called with the service name as the only parameter. + This defaults to `[services.close_ports, host.service_stop]`. + + The 'ports' value should be a list of ports to manage. The default + 'start' handler will open the ports after the service is started, + and the default 'stop' handler will close the ports prior to stopping + the service. + + + Examples: + + The following registers an Upstart service called bingod that depends on + a mongodb relation and which runs a custom `db_migrate` function prior to + restarting the service, and a Runit service called spadesd:: + + manager = services.ServiceManager([ + { + 'service': 'bingod', + 'ports': [80, 443], + 'required_data': [MongoRelation(), config(), {'my': 'data'}], + 'data_ready': [ + services.template(source='bingod.conf'), + services.template(source='bingod.ini', + target='/etc/bingod.ini', + owner='bingo', perms=0400), + ], + }, + { + 'service': 'spadesd', + 'data_ready': services.template(source='spadesd_run.j2', + target='/etc/sv/spadesd/run', + perms=0555), + 'start': runit_start, + 'stop': runit_stop, + }, + ]) + manager.manage() + """ + self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') + self._ready = None + self.services = OrderedDict() + for service in services or []: + service_name = service['service'] + self.services[service_name] = service + + def manage(self): + """ + Handle the current hook by doing The Right Thing with the registered services. + """ + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() + + def provide_data(self): + """ + Set the relation data for each provider in the ``provided_data`` list. + + A provider must have a `name` attribute, which indicates which relation + to set data on, and a `provide_data()` method, which returns a dict of + data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. + """ + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) + for provider in service.get('provided_data', []): + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) + + def reconfigure_services(self, *service_names): + """ + Update all files for one or more registered services, and, + if ready, optionally restart them. + + If no service names are given, reconfigures all registered services. + """ + for service_name in service_names or self.services.keys(): + if self.is_ready(service_name): + self.fire_event('data_ready', service_name) + self.fire_event('start', service_name, default=[ + service_restart, + manage_ports]) + self.save_ready(service_name) + else: + if self.was_ready(service_name): + self.fire_event('data_lost', service_name) + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + self.save_lost(service_name) + + def stop_services(self, *service_names): + """ + Stop one or more registered services, by name. + + If no service names are given, stops all registered services. + """ + for service_name in service_names or self.services.keys(): + self.fire_event('stop', service_name, default=[ + manage_ports, + service_stop]) + + def get_service(self, service_name): + """ + Given the name of a registered service, return its service definition. + """ + service = self.services.get(service_name) + if not service: + raise KeyError('Service not registered: %s' % service_name) + return service + + def fire_event(self, event_name, service_name, default=None): + """ + Fire a data_ready, data_lost, start, or stop event on a given service. + """ + service = self.get_service(service_name) + callbacks = service.get(event_name, default) + if not callbacks: + return + if not isinstance(callbacks, Iterable): + callbacks = [callbacks] + for callback in callbacks: + if isinstance(callback, ManagerCallback): + callback(self, service_name, event_name) + else: + callback(service_name) + + def is_ready(self, service_name): + """ + Determine if a registered service is ready, by checking its 'required_data'. + + A 'required_data' item can be any mapping type, and is considered ready + if `bool(item)` evaluates as True. + """ + service = self.get_service(service_name) + reqs = service.get('required_data', []) + return all(bool(req) for req in reqs) + + def _load_ready_file(self): + if self._ready is not None: + return + if os.path.exists(self._ready_file): + with open(self._ready_file) as fp: + self._ready = set(json.load(fp)) + else: + self._ready = set() + + def _save_ready_file(self): + if self._ready is None: + return + with open(self._ready_file, 'w') as fp: + json.dump(list(self._ready), fp) + + def save_ready(self, service_name): + """ + Save an indicator that the given service is now data_ready. + """ + self._load_ready_file() + self._ready.add(service_name) + self._save_ready_file() + + def save_lost(self, service_name): + """ + Save an indicator that the given service is no longer data_ready. + """ + self._load_ready_file() + self._ready.discard(service_name) + self._save_ready_file() + + def was_ready(self, service_name): + """ + Determine if the given service was previously data_ready. + """ + self._load_ready_file() + return service_name in self._ready + + +class ManagerCallback(object): + """ + Special case of a callback that takes the `ServiceManager` instance + in addition to the service name. + + Subclasses should implement `__call__` which should accept three parameters: + + * `manager` The `ServiceManager` instance + * `service_name` The name of the service it's being triggered for + * `event_name` The name of the event that this callback is handling + """ + def __call__(self, manager, service_name, event_name): + raise NotImplementedError() + + +class PortManagerCallback(ManagerCallback): + """ + Callback class that will open or close ports, for use as either + a start or stop action. + """ + def __call__(self, manager, service_name, event_name): + service = manager.get_service(service_name) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) + port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) + if os.path.exists(port_file): + with open(port_file) as fp: + old_ports = fp.read().split(',') + for old_port in old_ports: + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) + with open(port_file, 'w') as fp: + fp.write(','.join(str(port) for port in new_ports)) + for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' + if event_name == 'start': + hookenv.open_port(port, protocol) + elif event_name == 'stop': + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports + + +def service_stop(service_name): + """ + Wrapper around host.service_stop to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_running(service_name): + host.service_stop(service_name) + + +def service_restart(service_name): + """ + Wrapper around host.service_restart to prevent spurious "unknown service" + messages in the logs. + """ + if host.service_available(service_name): + if host.service_running(service_name): + host.service_restart(service_name) + else: + host.service_start(service_name) + + +# Convenience aliases +open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/helpers.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/helpers.py new file mode 100644 index 00000000..3e6e30d2 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/services/helpers.py @@ -0,0 +1,290 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import yaml + +from charmhelpers.core import hookenv +from charmhelpers.core import host +from charmhelpers.core import templating + +from charmhelpers.core.services.base import ManagerCallback + + +__all__ = ['RelationContext', 'TemplateCallback', + 'render_template', 'template'] + + +class RelationContext(dict): + """ + Base class for a context generator that gets relation data from juju. + + Subclasses must provide the attributes `name`, which is the name of the + interface of interest, `interface`, which is the type of the interface of + interest, and `required_keys`, which is the set of keys required for the + relation to be considered complete. The data for all interfaces matching + the `name` attribute that are complete will used to populate the dictionary + values (see `get_data`, below). + + The generated context will be namespaced under the relation :attr:`name`, + to prevent potential naming conflicts. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = None + interface = None + + def __init__(self, name=None, additional_required_keys=None): + if not hasattr(self, 'required_keys'): + self.required_keys = [] + + if name is not None: + self.name = name + if additional_required_keys: + self.required_keys.extend(additional_required_keys) + self.get_data() + + def __bool__(self): + """ + Returns True if all of the required_keys are available. + """ + return self.is_ready() + + __nonzero__ = __bool__ + + def __repr__(self): + return super(RelationContext, self).__repr__() + + def is_ready(self): + """ + Returns True if all of the `required_keys` are available from any units. + """ + ready = len(self.get(self.name, [])) > 0 + if not ready: + hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) + return ready + + def _is_ready(self, unit_data): + """ + Helper method that tests a set of relation data and returns True if + all of the `required_keys` are present. + """ + return set(unit_data.keys()).issuperset(set(self.required_keys)) + + def get_data(self): + """ + Retrieve the relation data for each unit involved in a relation and, + if complete, store it in a list under `self[self.name]`. This + is automatically called when the RelationContext is instantiated. + + The units are sorted lexographically first by the service ID, then by + the unit ID. Thus, if an interface has two other services, 'db:1' + and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', + and 'db:2' having one unit, 'mediawiki/0', all of which have a complete + set of data, the relation data for the units will be stored in the + order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. + + If you only care about a single unit on the relation, you can just + access it as `{{ interface[0]['key'] }}`. However, if you can at all + support multiple units on a relation, you should iterate over the list, + like:: + + {% for unit in interface -%} + {{ unit['key'] }}{% if not loop.last %},{% endif %} + {%- endfor %} + + Note that since all sets of relation data from all related services and + units are in a single list, if you need to know which service or unit a + set of data came from, you'll need to extend this class to preserve + that information. + """ + if not hookenv.relation_ids(self.name): + return + + ns = self.setdefault(self.name, []) + for rid in sorted(hookenv.relation_ids(self.name)): + for unit in sorted(hookenv.related_units(rid)): + reldata = hookenv.relation_get(rid=rid, unit=unit) + if self._is_ready(reldata): + ns.append(reldata) + + def provide_data(self): + """ + Return data to be relation_set for this interface. + """ + return {} + + +class MysqlRelation(RelationContext): + """ + Relation context for the `mysql` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'db' + interface = 'mysql' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'user', 'password', 'database'] + RelationContext.__init__(self, *args, **kwargs) + + +class HttpRelation(RelationContext): + """ + Relation context for the `http` interface. + + :param str name: Override the relation :attr:`name`, since it can vary from charm to charm + :param list additional_required_keys: Extend the list of :attr:`required_keys` + """ + name = 'website' + interface = 'http' + + def __init__(self, *args, **kwargs): + self.required_keys = ['host', 'port'] + RelationContext.__init__(self, *args, **kwargs) + + def provide_data(self): + return { + 'host': hookenv.unit_get('private-address'), + 'port': 80, + } + + +class RequiredConfig(dict): + """ + Data context that loads config options with one or more mandatory options. + + Once the required options have been changed from their default values, all + config options will be available, namespaced under `config` to prevent + potential naming conflicts (for example, between a config option and a + relation property). + + :param list *args: List of options that must be changed from their default values. + """ + + def __init__(self, *args): + self.required_options = args + self['config'] = hookenv.config() + with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: + self.config = yaml.load(fp).get('options', {}) + + def __bool__(self): + for option in self.required_options: + if option not in self['config']: + return False + current_value = self['config'][option] + default_value = self.config[option].get('default') + if current_value == default_value: + return False + if current_value in (None, '') and default_value in (None, ''): + return False + return True + + def __nonzero__(self): + return self.__bool__() + + +class StoredContext(dict): + """ + A data context that always returns the data that it was first created with. + + This is useful to do a one-time generation of things like passwords, that + will thereafter use the same value that was originally generated, instead + of generating a new value each time it is run. + """ + def __init__(self, file_name, config_data): + """ + If the file exists, populate `self` with the data from the file. + Otherwise, populate with the given data and persist it to the file. + """ + if os.path.exists(file_name): + self.update(self.read_context(file_name)) + else: + self.store_context(file_name, config_data) + self.update(config_data) + + def store_context(self, file_name, config_data): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'w') as file_stream: + os.fchmod(file_stream.fileno(), 0o600) + yaml.dump(config_data, file_stream) + + def read_context(self, file_name): + if not os.path.isabs(file_name): + file_name = os.path.join(hookenv.charm_dir(), file_name) + with open(file_name, 'r') as file_stream: + data = yaml.load(file_stream) + if not data: + raise OSError("%s is empty" % file_name) + return data + + +class TemplateCallback(ManagerCallback): + """ + Callback class that will render a Jinja2 template, for use as a ready + action. + + :param str source: The template source file, relative to + `$CHARM_DIR/templates` + + :param str target: The target to write the rendered template to (or None) + :param str owner: The owner of the rendered file + :param str group: The group of the rendered file + :param int perms: The permissions of the rendered file + :param partial on_change_action: functools partial to be executed when + rendered file changes + :param jinja2 loader template_loader: A jinja2 template loader + + :return str: The rendered template + """ + def __init__(self, source, target, + owner='root', group='root', perms=0o444, + on_change_action=None, template_loader=None): + self.source = source + self.target = target + self.owner = owner + self.group = group + self.perms = perms + self.on_change_action = on_change_action + self.template_loader = template_loader + + def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) + service = manager.get_service(service_name) + context = {'ctx': {}} + for ctx in service.get('required_data', []): + context.update(ctx) + context['ctx'].update(ctx) + + result = templating.render(self.source, self.target, context, + self.owner, self.group, self.perms, + template_loader=self.template_loader) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() + + return result + + +# Convenience aliases for templates +render_template = template = TemplateCallback diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/strutils.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/strutils.py new file mode 100644 index 00000000..e8df0452 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/strutils.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six +import re + + +def bool_from_string(value): + """Interpret string value as boolean. + + Returns True if value translates to True otherwise False. + """ + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + + value = value.strip().lower() + + if value in ['y', 'yes', 'true', 't', 'on']: + return True + elif value in ['n', 'no', 'false', 'f', 'off']: + return False + + msg = "Unable to interpret string value '%s' as boolean" % (value) + raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as bytes" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size + + +class BasicStringComparator(object): + """Provides a class that will compare strings from an iterator type object. + Used to provide > and < comparisons on strings that may not necessarily be + alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the + z-wrap. + """ + + _list = None + + def __init__(self, item): + if self._list is None: + raise Exception("Must define the _list in the class definition!") + try: + self.index = self._list.index(item) + except Exception: + raise KeyError("Item '{}' is not in list '{}'" + .format(item, self._list)) + + def __eq__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index == self._list.index(other) + + def __ne__(self, other): + return not self.__eq__(other) + + def __lt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index < self._list.index(other) + + def __ge__(self, other): + return not self.__lt__(other) + + def __gt__(self, other): + assert isinstance(other, str) or isinstance(other, self.__class__) + return self.index > self._list.index(other) + + def __le__(self, other): + return not self.__gt__(other) + + def __str__(self): + """Always give back the item at the index so it can be used in + comparisons like: + + s_mitaka = CompareOpenStack('mitaka') + s_newton = CompareOpenstack('newton') + + assert s_newton > s_mitaka + + @returns: <string> + """ + return self._list[self.index] diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/sysctl.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/sysctl.py new file mode 100644 index 00000000..386428d6 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/sysctl.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml + +from subprocess import check_call, CalledProcessError + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + ERROR, + WARNING, +) + +from charmhelpers.core.host import is_container + +__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' + + +def create(sysctl_dict, sysctl_file, ignore=False): + """Creates a sysctl.conf file from a YAML associative array + + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" + :type sysctl_dict: str + :param sysctl_file: path to the sysctl file to be saved + :type sysctl_file: str or unicode + :param ignore: If True, ignore "unknown variable" errors. + :type ignore: bool + :returns: None + """ + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict + + with open(sysctl_file, "w") as fd: + for key, value in sysctl_dict_parsed.items(): + fd.write("{}={}\n".format(key, value)) + + log("Updating sysctl_file: {} values: {}".format(sysctl_file, + sysctl_dict_parsed), + level=DEBUG) + + call = ["sysctl", "-p", sysctl_file] + if ignore: + call.append("-e") + + try: + check_call(call) + except CalledProcessError as e: + if is_container(): + log("Error setting some sysctl keys in this container: {}".format(e.output), + level=WARNING) + else: + raise e diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/templating.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/templating.py new file mode 100644 index 00000000..9014015c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/templating.py @@ -0,0 +1,93 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys + +from charmhelpers.core import host +from charmhelpers.core import hookenv + + +def render(source, target, context, owner='root', group='root', + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): + """ + Render a template. + + The `source` path, if not absolute, is relative to the `templates_dir`. + + The `target` path should be absolute. It can also be `None`, in which + case no file will be written. + + The context should be a dict containing the values to be replaced in the + template. + + config_template may be provided to render from a provided template instead + of loading from a file. + + The `owner`, `group`, and `perms` options will be passed to `write_file`. + + If omitted, `templates_dir` defaults to the `templates` folder in the charm. + + The rendered template will be written to the file as well as being returned + as a string. + + Note: Using this requires python-jinja2 or python3-jinja2; if it is not + installed, calling this will attempt to use charmhelpers.fetch.apt_install + to install it. + """ + try: + from jinja2 import FileSystemLoader, Environment, exceptions + except ImportError: + try: + from charmhelpers.fetch import apt_install + except ImportError: + hookenv.log('Could not import jinja2, and could not import ' + 'charmhelpers.fetch to install it', + level=hookenv.ERROR) + raise + if sys.version_info.major == 2: + apt_install('python-jinja2', fatal=True) + else: + apt_install('python3-jinja2', fatal=True) + from jinja2 import FileSystemLoader, Environment, exceptions + + if template_loader: + template_env = Environment(loader=template_loader) + else: + if templates_dir is None: + templates_dir = os.path.join(hookenv.charm_dir(), 'templates') + template_env = Environment(loader=FileSystemLoader(templates_dir)) + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e + content = template.render(context) + if target is not None: + target_dir = os.path.dirname(target) + if not os.path.exists(target_dir): + # This is a terrible default directory permission, as the file + # or its siblings will often contain secrets. + host.mkdir(os.path.dirname(target), owner, group, perms=0o755) + host.write_file(target, content.encode(encoding), owner, group, perms) + return content diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/unitdata.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/unitdata.py new file mode 100644 index 00000000..ab554327 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/core/unitdata.py @@ -0,0 +1,525 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Authors: +# Kapil Thangavelu <kapil.foss@gmail.com> +# +""" +Intro +----- + +A simple way to store state in units. This provides a key value +storage with support for versioned, transactional operation, +and can calculate deltas from previous values to simplify unit logic +when processing changes. + + +Hook Integration +---------------- + +There are several extant frameworks for hook execution, including + + - charmhelpers.core.hookenv.Hooks + - charmhelpers.core.services.ServiceManager + +The storage classes are framework agnostic, one simple integration is +via the HookData contextmanager. It will record the current hook +execution environment (including relation data, config data, etc.), +setup a transaction and allow easy access to the changes from +previously seen values. One consequence of the integration is the +reservation of particular keys ('rels', 'unit', 'env', 'config', +'charm_revisions') for their respective values. + +Here's a fully worked integration example using hookenv.Hooks:: + + from charmhelper.core import hookenv, unitdata + + hook_data = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # Print all changes to configuration from previously seen + # values. + for changed, (prev, cur) in hook_data.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + # Directly access all charm config as a mapping. + conf = db.getrange('config', True) + + # Directly access all relation data as a mapping + rels = db.getrange('rels', True) + + if __name__ == '__main__': + with hook_data(): + hook.execute() + + +A more basic integration is via the hook_scope context manager which simply +manages transaction scope (and records hook name, and timestamp):: + + >>> from unitdata import kv + >>> db = kv() + >>> with db.hook_scope('install'): + ... # do work, in transactional scope. + ... db.set('x', 1) + >>> db.get('x') + 1 + + +Usage +----- + +Values are automatically json de/serialized to preserve basic typing +and complex data struct capabilities (dicts, lists, ints, booleans, etc). + +Individual values can be manipulated via get/set:: + + >>> kv.set('y', True) + >>> kv.get('y') + True + + # We can set complex values (dicts, lists) as a single key. + >>> kv.set('config', {'a': 1, 'b': True'}) + + # Also supports returning dictionaries as a record which + # provides attribute access. + >>> config = kv.get('config', record=True) + >>> config.b + True + + +Groups of keys can be manipulated with update/getrange:: + + >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") + >>> kv.getrange('gui.', strip=True) + {'z': 1, 'y': 2} + +When updating values, its very helpful to understand which values +have actually changed and how have they changed. The storage +provides a delta method to provide for this:: + + >>> data = {'debug': True, 'option': 2} + >>> delta = kv.delta(data, 'config.') + >>> delta.debug.previous + None + >>> delta.debug.current + True + >>> delta + {'debug': (None, True), 'option': (None, 2)} + +Note the delta method does not persist the actual change, it needs to +be explicitly saved via 'update' method:: + + >>> kv.update(data, 'config.') + +Values modified in the context of a hook scope retain historical values +associated to the hookname. + + >>> with db.hook_scope('config-changed'): + ... db.set('x', 42) + >>> db.gethistory('x') + [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), + (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] + +""" + +import collections +import contextlib +import datetime +import itertools +import json +import os +import pprint +import sqlite3 +import sys + +__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>' + + +class Storage(object): + """Simple key value database for local unit state within charms. + + Modifications are not persisted unless :meth:`flush` is called. + + To support dicts, lists, integer, floats, and booleans values + are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. + """ + def __init__(self, path=None): + self.db_path = path + if path is None: + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) + self.conn = sqlite3.connect('%s' % self.db_path) + self.cursor = self.conn.cursor() + self.revision = None + self._closed = False + self._init() + + def close(self): + if self._closed: + return + self.flush(False) + self.cursor.close() + self.conn.close() + self._closed = True + + def get(self, key, default=None, record=False): + self.cursor.execute('select data from kv where key=?', [key]) + result = self.cursor.fetchone() + if not result: + return default + if record: + return Record(json.loads(result[0])) + return json.loads(result[0]) + + def getrange(self, key_prefix, strip=False): + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) + result = self.cursor.fetchall() + + if not result: + return {} + if not strip: + key_prefix = '' + return dict([ + (k[len(key_prefix):], json.loads(v)) for k, v in result]) + + def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ + for k, v in mapping.items(): + self.set("%s%s" % (prefix, k), v) + + def unset(self, key): + """ + Remove a key from the database entirely. + """ + self.cursor.execute('delete from kv where key=?', [key]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + [key, self.revision, json.dumps('DELETED')]) + + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ + serialized = json.dumps(value) + + self.cursor.execute('select data from kv where key=?', [key]) + exists = self.cursor.fetchone() + + # Skip mutations to the same value + if exists: + if exists[0] == serialized: + return value + + if not exists: + self.cursor.execute( + 'insert into kv (key, data) values (?, ?)', + (key, serialized)) + else: + self.cursor.execute(''' + update kv + set data = ? + where key = ?''', [serialized, key]) + + # Save + if not self.revision: + return value + + self.cursor.execute( + 'select 1 from kv_revisions where key=? and revision=?', + [key, self.revision]) + exists = self.cursor.fetchone() + + if not exists: + self.cursor.execute( + '''insert into kv_revisions ( + revision, key, data) values (?, ?, ?)''', + (self.revision, key, serialized)) + else: + self.cursor.execute( + ''' + update kv_revisions + set data = ? + where key = ? + and revision = ?''', + [serialized, key, self.revision]) + + return value + + def delta(self, mapping, prefix): + """ + return a delta containing values that have changed. + """ + previous = self.getrange(prefix, strip=True) + if not previous: + pk = set() + else: + pk = set(previous.keys()) + ck = set(mapping.keys()) + delta = DeltaSet() + + # added + for k in ck.difference(pk): + delta[k] = Delta(None, mapping[k]) + + # removed + for k in pk.difference(ck): + delta[k] = Delta(previous[k], None) + + # changed + for k in pk.intersection(ck): + c = mapping[k] + p = previous[k] + if c != p: + delta[k] = Delta(p, c) + + return delta + + @contextlib.contextmanager + def hook_scope(self, name=""): + """Scope all future interactions to the current hook execution + revision.""" + assert not self.revision + self.cursor.execute( + 'insert into hooks (hook, date) values (?, ?)', + (name or sys.argv[0], + datetime.datetime.utcnow().isoformat())) + self.revision = self.cursor.lastrowid + try: + yield self.revision + self.revision = None + except Exception: + self.flush(False) + self.revision = None + raise + else: + self.flush() + + def flush(self, save=True): + if save: + self.conn.commit() + elif self._closed: + return + else: + self.conn.rollback() + + def _init(self): + self.cursor.execute(''' + create table if not exists kv ( + key text, + data text, + primary key (key) + )''') + self.cursor.execute(''' + create table if not exists kv_revisions ( + key text, + revision integer, + data text, + primary key (key, revision) + )''') + self.cursor.execute(''' + create table if not exists hooks ( + version integer primary key autoincrement, + hook text, + date text + )''') + self.conn.commit() + + def gethistory(self, key, deserialize=False): + self.cursor.execute( + ''' + select kv.revision, kv.key, kv.data, h.hook, h.date + from kv_revisions kv, + hooks h + where kv.key=? + and kv.revision = h.version + ''', [key]) + if deserialize is False: + return self.cursor.fetchall() + return map(_parse_history, self.cursor.fetchall()) + + def debug(self, fh=sys.stderr): + self.cursor.execute('select * from kv') + pprint.pprint(self.cursor.fetchall(), stream=fh) + self.cursor.execute('select * from kv_revisions') + pprint.pprint(self.cursor.fetchall(), stream=fh) + + +def _parse_history(d): + return (d[0], d[1], json.loads(d[2]), d[3], + datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) + + +class HookData(object): + """Simple integration for existing hook exec frameworks. + + Records all unit information, and stores deltas for processing + by the hook. + + Sample:: + + from charmhelper.core import hookenv, unitdata + + changes = unitdata.HookData() + db = unitdata.kv() + hooks = hookenv.Hooks() + + @hooks.hook + def config_changed(): + # View all changes to configuration + for changed, (prev, cur) in changes.conf.items(): + print('config changed', changed, + 'previous value', prev, + 'current value', cur) + + # Get some unit specific bookeeping + if not db.get('pkg_key'): + key = urllib.urlopen('https://example.com/pkg_key').read() + db.set('pkg_key', key) + + if __name__ == '__main__': + with changes(): + hook.execute() + + """ + def __init__(self): + self.kv = kv() + self.conf = None + self.rels = None + + @contextlib.contextmanager + def __call__(self): + from charmhelpers.core import hookenv + hook_name = hookenv.hook_name() + + with self.kv.hook_scope(hook_name): + self._record_charm_version(hookenv.charm_dir()) + delta_config, delta_relation = self._record_hook(hookenv) + yield self.kv, delta_config, delta_relation + + def _record_charm_version(self, charm_dir): + # Record revisions.. charm revisions are meaningless + # to charm authors as they don't control the revision. + # so logic dependnent on revision is not particularly + # useful, however it is useful for debugging analysis. + charm_rev = open( + os.path.join(charm_dir, 'revision')).read().strip() + charm_rev = charm_rev or '0' + revs = self.kv.get('charm_revisions', []) + if charm_rev not in revs: + revs.append(charm_rev.strip() or '0') + self.kv.set('charm_revisions', revs) + + def _record_hook(self, hookenv): + data = hookenv.execution_environment() + self.conf = conf_delta = self.kv.delta(data['conf'], 'config') + self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') + self.kv.set('env', dict(data['env'])) + self.kv.set('unit', data['unit']) + self.kv.set('relid', data.get('relid')) + return conf_delta, rels_delta + + +class Record(dict): + + __slots__ = () + + def __getattr__(self, k): + if k in self: + return self[k] + raise AttributeError(k) + + +class DeltaSet(Record): + + __slots__ = () + + +Delta = collections.namedtuple('Delta', ['previous', 'current']) + + +_KV = None + + +def kv(): + global _KV + if _KV is None: + _KV = Storage() + return _KV diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/__init__.py new file mode 100644 index 00000000..0cc7fc85 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/__init__.py @@ -0,0 +1,209 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +from charmhelpers.osplatform import get_platform +from yaml import safe_load +from charmhelpers.core.hookenv import ( + config, + log, +) + +import six +if six.PY3: + from urllib.parse import urlparse, urlunparse +else: + from urlparse import urlparse, urlunparse + + +# The order of this list is very important. Handlers should be listed in from +# least- to most-specific URL matching. +FETCH_HANDLERS = ( + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', + 'charmhelpers.fetch.giturl.GitUrlFetchHandler', +) + + +class SourceConfigError(Exception): + pass + + +class UnhandledSource(Exception): + pass + + +class AptLockError(Exception): + pass + + +class GPGKeyError(Exception): + """Exception occurs when a GPG key cannot be fetched or used. The message + indicates what the problem is. + """ + pass + + +class BaseFetchHandler(object): + + """Base class for FetchHandler implementations in fetch plugins""" + + def can_handle(self, source): + """Returns True if the source can be handled. Otherwise returns + a string explaining why it cannot""" + return "Wrong source type" + + def install(self, source): + """Try to download and unpack the source. Return the path to the + unpacked files or raise UnhandledSource.""" + raise UnhandledSource("Wrong source type {}".format(source)) + + def parse_url(self, url): + return urlparse(url) + + def base_url(self, url): + """Return url without querystring or fragment""" + parts = list(self.parse_url(url)) + parts[4:] = ['' for i in parts[4:]] + return urlunparse(parts) + + +__platform__ = get_platform() +module = "charmhelpers.fetch.%s" % __platform__ +fetch = importlib.import_module(module) + +filter_installed_packages = fetch.filter_installed_packages +filter_missing_packages = fetch.filter_missing_packages +install = fetch.apt_install +upgrade = fetch.apt_upgrade +update = _fetch_update = fetch.apt_update +purge = fetch.apt_purge +add_source = fetch.add_source + +if __platform__ == "ubuntu": + apt_cache = fetch.apt_cache + apt_install = fetch.apt_install + apt_update = fetch.apt_update + apt_upgrade = fetch.apt_upgrade + apt_purge = fetch.apt_purge + apt_autoremove = fetch.apt_autoremove + apt_mark = fetch.apt_mark + apt_hold = fetch.apt_hold + apt_unhold = fetch.apt_unhold + import_key = fetch.import_key + get_upstream_version = fetch.get_upstream_version + apt_pkg = fetch.ubuntu_apt_pkg + get_apt_dpkg_env = fetch.get_apt_dpkg_env +elif __platform__ == "centos": + yum_search = fetch.yum_search + + +def configure_sources(update=False, + sources_var='install_sources', + keys_var='install_keys'): + """Configure multiple sources from charm configuration. + + The lists are encoded as yaml fragments in the configuration. + The fragment needs to be included as a string. Sources and their + corresponding keys are of the types supported by add_source(). + + Example config: + install_sources: | + - "ppa:foo" + - "http://example.com/repo precise main" + install_keys: | + - null + - "a1b2c3d4" + + Note that 'null' (a.k.a. None) should not be quoted. + """ + sources = safe_load((config(sources_var) or '').strip()) or [] + keys = safe_load((config(keys_var) or '').strip()) or None + + if isinstance(sources, six.string_types): + sources = [sources] + + if keys is None: + for source in sources: + add_source(source, None) + else: + if isinstance(keys, six.string_types): + keys = [keys] + + if len(sources) != len(keys): + raise SourceConfigError( + 'Install sources and keys lists are different lengths') + for source, key in zip(sources, keys): + add_source(source, key) + if update: + _fetch_update(fatal=True) + + +def install_remote(source, *args, **kwargs): + """Install a file tree from a remote source. + + The specified source should be a url of the form: + scheme://[host]/path[#[option=value][&...]] + + Schemes supported are based on this modules submodules. + Options supported are submodule-specific. + Additional arguments are passed through to the submodule. + + For example:: + + dest = install_remote('http://example.com/archive.tgz', + checksum='deadbeef', + hash_type='sha1') + + This will download `archive.tgz`, validate it using SHA1 and, if + the file is ok, extract it and return the directory in which it + was extracted. If the checksum fails, it will raise + :class:`charmhelpers.core.host.ChecksumError`. + """ + # We ONLY check for True here because can_handle may return a string + # explaining why it can't handle a given source. + handlers = [h for h in plugins() if h.can_handle(source) is True] + for handler in handlers: + try: + return handler.install(source, *args, **kwargs) + except UnhandledSource as e: + log('Install source attempt unsuccessful: {}'.format(e), + level='WARNING') + raise UnhandledSource("No handler found for source {}".format(source)) + + +def install_from_config(config_var_name): + """Install a file from config.""" + charm_config = config() + source = charm_config[config_var_name] + return install_remote(source) + + +def plugins(fetch_handlers=None): + if not fetch_handlers: + fetch_handlers = FETCH_HANDLERS + plugin_list = [] + for handler_name in fetch_handlers: + package, classname = handler_name.rsplit('.', 1) + try: + handler_class = getattr( + importlib.import_module(package), + classname) + plugin_list.append(handler_class()) + except NotImplementedError: + # Skip missing plugins so that they can be ommitted from + # installation if desired + log("FetchHandler {} not found, skipping plugin".format( + handler_name)) + return plugin_list diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/archiveurl.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/archiveurl.py new file mode 100644 index 00000000..d25587ad --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/archiveurl.py @@ -0,0 +1,165 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import hashlib +import re + +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource +) +from charmhelpers.payload.archive import ( + get_archive_handler, + extract, +) +from charmhelpers.core.host import mkdir, check_hash + +import six +if six.PY3: + from urllib.request import ( + build_opener, install_opener, urlopen, urlretrieve, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + ) + from urllib.parse import urlparse, urlunparse, parse_qs + from urllib.error import URLError +else: + from urllib import urlretrieve + from urllib2 import ( + build_opener, install_opener, urlopen, + HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, + URLError + ) + from urlparse import urlparse, urlunparse, parse_qs + + +def splituser(host): + '''urllib.splituser(), but six's support of this seems broken''' + _userprog = re.compile('^(.*)@(.*)$') + match = _userprog.match(host) + if match: + return match.group(1, 2) + return None, host + + +def splitpasswd(user): + '''urllib.splitpasswd(), but six's support of this is missing''' + _passwdprog = re.compile('^([^:]*):(.*)$', re.S) + match = _passwdprog.match(user) + if match: + return match.group(1, 2) + return user, None + + +class ArchiveUrlFetchHandler(BaseFetchHandler): + """ + Handler to download archive files from arbitrary URLs. + + Can fetch from http, https, ftp, and file URLs. + + Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. + + Installs the contents of the archive in $CHARM_DIR/fetched/. + """ + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + # XXX: Why is this returning a boolean and a string? It's + # doomed to fail since "bool(can_handle('foo://'))" will be True. + return "Wrong source type" + if get_archive_handler(self.base_url(source)): + return True + return False + + def download(self, source, dest): + """ + Download an archive file. + + :param str source: URL pointing to an archive file. + :param str dest: Local path location to download archive file to. + """ + # propagate all exceptions + # URLError, OSError, etc + proto, netloc, path, params, query, fragment = urlparse(source) + if proto in ('http', 'https'): + auth, barehost = splituser(netloc) + if auth is not None: + source = urlunparse((proto, barehost, path, params, query, fragment)) + username, password = splitpasswd(auth) + passman = HTTPPasswordMgrWithDefaultRealm() + # Realm is set to None in add_password to force the username and password + # to be used whatever the realm + passman.add_password(None, source, username, password) + authhandler = HTTPBasicAuthHandler(passman) + opener = build_opener(authhandler) + install_opener(opener) + response = urlopen(source) + try: + with open(dest, 'wb') as dest_file: + dest_file.write(response.read()) + except Exception as e: + if os.path.isfile(dest): + os.unlink(dest) + raise e + + # Mandatory file validation via Sha1 or MD5 hashing. + def download_and_validate(self, url, hashsum, validate="sha1"): + tempfile, headers = urlretrieve(url) + check_hash(tempfile, hashsum, validate) + return tempfile + + def install(self, source, dest=None, checksum=None, hash_type='sha1'): + """ + Download and install an archive file, with optional checksum validation. + + The checksum can also be given on the `source` URL's fragment. + For example:: + + handler.install('http://example.com/file.tgz#sha1=deadbeef') + + :param str source: URL pointing to an archive file. + :param str dest: Local destination path to install to. If not given, + installs to `$CHARM_DIR/archives/archive_file_name`. + :param str checksum: If given, validate the archive file after download. + :param str hash_type: Algorithm used to generate `checksum`. + Can be any hash alrgorithm supported by :mod:`hashlib`, + such as md5, sha1, sha256, sha512, etc. + + """ + url_parts = self.parse_url(source) + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') + if not os.path.exists(dest_dir): + mkdir(dest_dir, perms=0o755) + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) + try: + self.download(source, dld_file) + except URLError as e: + raise UnhandledSource(e.reason) + except OSError as e: + raise UnhandledSource(e.strerror) + options = parse_qs(url_parts.fragment) + for key, value in options.items(): + if not six.PY3: + algorithms = hashlib.algorithms + else: + algorithms = hashlib.algorithms_available + if key in algorithms: + if len(value) != 1: + raise TypeError( + "Expected 1 hash value, not %d" % len(value)) + expected = value[0] + check_hash(dld_file, expected, key) + if checksum: + check_hash(dld_file, checksum, hash_type) + return extract(dld_file, dest) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/bzrurl.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/bzrurl.py new file mode 100644 index 00000000..c4ab3ff1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/bzrurl.py @@ -0,0 +1,76 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from subprocess import STDOUT, check_output +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource, + filter_installed_packages, + install, +) +from charmhelpers.core.host import mkdir + + +if filter_installed_packages(['bzr']) != []: + install(['bzr']) + if filter_installed_packages(['bzr']) != []: + raise NotImplementedError('Unable to install bzr') + + +class BzrUrlFetchHandler(BaseFetchHandler): + """Handler for bazaar branches via generic and lp URLs.""" + + def can_handle(self, source): + url_parts = self.parse_url(source) + if url_parts.scheme not in ('bzr+ssh', 'lp', ''): + return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.bzr')) + else: + return True + + def branch(self, source, dest, revno=None): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + cmd_opts = [] + if revno: + cmd_opts += ['-r', str(revno)] + if os.path.exists(dest): + cmd = ['bzr', 'pull'] + cmd += cmd_opts + cmd += ['--overwrite', '-d', dest, source] + else: + cmd = ['bzr', 'branch'] + cmd += cmd_opts + cmd += [source, dest] + check_output(cmd, stderr=STDOUT) + + def install(self, source, dest=None, revno=None): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + + if dest and not os.path.exists(dest): + mkdir(dest, perms=0o755) + + try: + self.branch(source, dest_dir, revno) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/centos.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/centos.py new file mode 100644 index 00000000..a91dcff0 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/centos.py @@ -0,0 +1,171 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import os +import time +import six +import yum + +from tempfile import NamedTemporaryFile +from charmhelpers.core.hookenv import log + +YUM_NO_LOCK = 1 # The return code for "couldn't acquire lock" in YUM. +YUM_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. +YUM_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + yb = yum.YumBase() + package_list = yb.doPackageLists() + temp_cache = {p.base_package_name: 1 for p in package_list['installed']} + + _pkgs = [p for p in packages if not temp_cache.get(p, False)] + return _pkgs + + +def install(packages, options=None, fatal=False): + """Install one or more packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_yum_command(cmd, fatal) + + +def upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages.""" + cmd = ['yum', '--assumeyes'] + if options is not None: + cmd.extend(options) + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_yum_command(cmd, fatal) + + +def update(fatal=False): + """Update local yum cache.""" + cmd = ['yum', '--assumeyes', 'update'] + log("Update with fatal: {}".format(fatal)) + _run_yum_command(cmd, fatal) + + +def purge(packages, fatal=False): + """Purge one or more packages.""" + cmd = ['yum', '--assumeyes', 'remove'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_yum_command(cmd, fatal) + + +def yum_search(packages): + """Search for a package.""" + output = {} + cmd = ['yum', 'search'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Searching for {}".format(packages)) + result = subprocess.check_output(cmd) + for package in list(packages): + output[package] = package in result + return output + + +def add_source(source, key=None): + """Add a package source to this system. + + @param source: a URL with a rpm package + + @param key: A key to be added to the system's keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. + """ + if source is None: + log('Source is not present. Skipping') + return + + if source.startswith('http'): + directory = '/etc/yum.repos.d/' + for filename in os.listdir(directory): + with open(directory + filename, 'r') as rpm_file: + if source in rpm_file.read(): + break + else: + log("Add source: {!r}".format(source)) + # write in the charms.repo + with open(directory + 'Charms.repo', 'a') as rpm_file: + rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) + rpm_file.write('name=%s\n' % source[7:]) + rpm_file.write('baseurl=%s\n\n' % source) + else: + log("Unknown source: {!r}".format(source)) + + if key: + if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: + with NamedTemporaryFile('w+') as key_file: + key_file.write(key) + key_file.flush() + key_file.seek(0) + subprocess.check_call(['rpm', '--import', key_file.name]) + else: + subprocess.check_call(['rpm', '--import', key]) + + +def _run_yum_command(cmd, fatal=False): + """Run an YUM command. + + Checks the output and retry if the fatal flag is set to True. + + :param: cmd: str: The yum command to run. + :param: fatal: bool: Whether the command's output should be checked and + retried. + """ + env = os.environ.copy() + + if fatal: + retry_count = 0 + result = None + + # If the command is considered "fatal", we need to retry if the yum + # lock was not acquired. + + while result is None or result == YUM_NO_LOCK: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > YUM_NO_LOCK_RETRY_COUNT: + raise + result = e.returncode + log("Couldn't acquire YUM lock. Will retry in {} seconds." + "".format(YUM_NO_LOCK_RETRY_DELAY)) + time.sleep(YUM_NO_LOCK_RETRY_DELAY) + + else: + subprocess.call(cmd, env=env) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/giturl.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/giturl.py new file mode 100644 index 00000000..070ca9bb --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/giturl.py @@ -0,0 +1,69 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from subprocess import check_output, CalledProcessError, STDOUT +from charmhelpers.fetch import ( + BaseFetchHandler, + UnhandledSource, + filter_installed_packages, + install, +) + +if filter_installed_packages(['git']) != []: + install(['git']) + if filter_installed_packages(['git']) != []: + raise NotImplementedError('Unable to install git') + + +class GitUrlFetchHandler(BaseFetchHandler): + """Handler for git branches via generic and github URLs.""" + + def can_handle(self, source): + url_parts = self.parse_url(source) + # TODO (mattyw) no support for ssh git@ yet + if url_parts.scheme not in ('http', 'https', 'git', ''): + return False + elif not url_parts.scheme: + return os.path.exists(os.path.join(source, '.git')) + else: + return True + + def clone(self, source, dest, branch="master", depth=None): + if not self.can_handle(source): + raise UnhandledSource("Cannot handle {}".format(source)) + + if os.path.exists(dest): + cmd = ['git', '-C', dest, 'pull', source, branch] + else: + cmd = ['git', 'clone', source, dest, '--branch', branch] + if depth: + cmd.extend(['--depth', depth]) + check_output(cmd, stderr=STDOUT) + + def install(self, source, branch="master", dest=None, depth=None): + url_parts = self.parse_url(source) + branch_name = url_parts.path.strip("/").split("/")[-1] + if dest: + dest_dir = os.path.join(dest, branch_name) + else: + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", + branch_name) + try: + self.clone(source, dest_dir, branch, depth) + except CalledProcessError as e: + raise UnhandledSource(e) + except OSError as e: + raise UnhandledSource(e.strerror) + return dest_dir diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/__init__.py new file mode 100644 index 00000000..bff99dc9 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2014-2019 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/debug.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/debug.py new file mode 100644 index 00000000..757135ee --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/debug.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import atexit +import sys + +from charmhelpers.fetch.python.rpdb import Rpdb +from charmhelpers.core.hookenv import ( + open_port, + close_port, + ERROR, + log +) + +__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" + +DEFAULT_ADDR = "0.0.0.0" +DEFAULT_PORT = 4444 + + +def _error(message): + log(message, level=ERROR) + + +def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): + """ + Set a trace point using the remote debugger + """ + atexit.register(close_port, port) + try: + log("Starting a remote python debugger session on %s:%s" % (addr, + port)) + open_port(port) + debugger = Rpdb(addr=addr, port=port) + debugger.set_trace(sys._getframe().f_back) + except Exception: + _error("Cannot start a remote debug session on %s:%s" % (addr, + port)) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/packages.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/packages.py new file mode 100644 index 00000000..6e95028b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/packages.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import six +import subprocess +import sys + +from charmhelpers.fetch import apt_install, apt_update +from charmhelpers.core.hookenv import charm_dir, log + +__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" + + +def pip_execute(*args, **kwargs): + """Overriden pip_execute() to stop sys.path being changed. + + The act of importing main from the pip module seems to cause add wheels + from the /usr/share/python-wheels which are installed by various tools. + This function ensures that sys.path remains the same after the call is + executed. + """ + try: + _path = sys.path + try: + from pip import main as _pip_execute + except ImportError: + apt_update() + if six.PY2: + apt_install('python-pip') + else: + apt_install('python3-pip') + from pip import main as _pip_execute + _pip_execute(*args, **kwargs) + finally: + sys.path = _path + + +def parse_options(given, available): + """Given a set of options, check if available""" + for key, value in sorted(given.items()): + if not value: + continue + if key in available: + yield "--{0}={1}".format(key, value) + + +def pip_install_requirements(requirements, constraints=None, **options): + """Install a requirements file. + + :param constraints: Path to pip constraints file. + http://pip.readthedocs.org/en/stable/user_guide/#constraints-files + """ + command = ["install"] + + available_options = ('proxy', 'src', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + command.append("-r {0}".format(requirements)) + if constraints: + command.append("-c {0}".format(constraints)) + log("Installing from file: {} with constraints {} " + "and options: {}".format(requirements, constraints, command)) + else: + log("Installing from file: {} with options: {}".format(requirements, + command)) + pip_execute(command) + + +def pip_install(package, fatal=False, upgrade=False, venv=None, + constraints=None, **options): + """Install a python package""" + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] + + available_options = ('proxy', 'src', 'log', 'index-url', ) + for option in parse_options(options, available_options): + command.append(option) + + if upgrade: + command.append('--upgrade') + + if constraints: + command.extend(['-c', constraints]) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Installing {} package with options: {}".format(package, + command)) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) + + +def pip_uninstall(package, **options): + """Uninstall a python package""" + command = ["uninstall", "-q", "-y"] + + available_options = ('proxy', 'log', ) + for option in parse_options(options, available_options): + command.append(option) + + if isinstance(package, list): + command.extend(package) + else: + command.append(package) + + log("Uninstalling {} package with options: {}".format(package, + command)) + pip_execute(command) + + +def pip_list(): + """Returns the list of current python installed packages + """ + return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + if six.PY2: + apt_install('python-virtualenv') + else: + apt_install('python3-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/rpdb.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/rpdb.py new file mode 100644 index 00000000..9b31610c --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/rpdb.py @@ -0,0 +1,56 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Remote Python Debugger (pdb wrapper).""" + +import pdb +import socket +import sys + +__author__ = "Bertrand Janin <b@janin.com>" +__version__ = "0.1.3" + + +class Rpdb(pdb.Pdb): + + def __init__(self, addr="127.0.0.1", port=4444): + """Initialize the socket and initialize pdb.""" + + # Backup stdin and stdout before replacing them by the socket handle + self.old_stdout = sys.stdout + self.old_stdin = sys.stdin + + # Open a 'reusable' socket to let the webapp reload on the same port + self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + self.skt.bind((addr, port)) + self.skt.listen(1) + (clientsocket, address) = self.skt.accept() + handle = clientsocket.makefile('rw') + pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) + sys.stdout = sys.stdin = handle + + def shutdown(self): + """Revert stdin and stdout, close the socket.""" + sys.stdout = self.old_stdout + sys.stdin = self.old_stdin + self.skt.close() + self.set_continue() + + def do_continue(self, arg): + """Stop all operation on ``continue``.""" + self.shutdown() + return 1 + + do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/version.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/version.py new file mode 100644 index 00000000..3eb42103 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/python/version.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" + + +def current_version(): + """Current system python version""" + return sys.version_info + + +def current_version_string(): + """Current system python version as string major.minor.micro""" + return "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/snap.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/snap.py new file mode 100644 index 00000000..fc70aa94 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/snap.py @@ -0,0 +1,150 @@ +# Copyright 2014-2017 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Charm helpers snap for classic charms. + +If writing reactive charms, use the snap layer: +https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html +""" +import subprocess +import os +from time import sleep +from charmhelpers.core.hookenv import log + +__author__ = 'Joseph Borg <joseph.borg@canonical.com>' + +# The return code for "couldn't acquire lock" in Snap +# (hopefully this will be improved). +SNAP_NO_LOCK = 1 +SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. +SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +SNAP_CHANNELS = [ + 'edge', + 'beta', + 'candidate', + 'stable', +] + + +class CouldNotAcquireLockException(Exception): + pass + + +class InvalidSnapChannel(Exception): + pass + + +def _snap_exec(commands): + """ + Execute snap commands. + + :param commands: List commands + :return: Integer exit code + """ + assert type(commands) == list + + retry_count = 0 + return_code = None + + while return_code is None or return_code == SNAP_NO_LOCK: + try: + return_code = subprocess.check_call(['snap'] + commands, + env=os.environ) + except subprocess.CalledProcessError as e: + retry_count += + 1 + if retry_count > SNAP_NO_LOCK_RETRY_COUNT: + raise CouldNotAcquireLockException( + 'Could not aquire lock after {} attempts' + .format(SNAP_NO_LOCK_RETRY_COUNT)) + return_code = e.returncode + log('Snap failed to acquire lock, trying again in {} seconds.' + .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') + sleep(SNAP_NO_LOCK_RETRY_DELAY) + + return return_code + + +def snap_install(packages, *flags): + """ + Install a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to install command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Installing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with option(s) "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['install'] + flags + packages) + + +def snap_remove(packages, *flags): + """ + Remove a snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to remove command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Removing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['remove'] + flags + packages) + + +def snap_refresh(packages, *flags): + """ + Refresh / Update snap package. + + :param packages: String or List String package name + :param flags: List String flags to pass to refresh command + :return: Integer return code from snap + """ + if type(packages) is not list: + packages = [packages] + + flags = list(flags) + + message = 'Refreshing snap(s) "%s"' % ', '.join(packages) + if flags: + message += ' with options "%s"' % ', '.join(flags) + + log(message, level='INFO') + return _snap_exec(['refresh'] + flags + packages) + + +def valid_snap_channel(channel): + """ Validate snap channel exists + + :raises InvalidSnapChannel: When channel does not exist + :return: Boolean + """ + if channel.lower() in SNAP_CHANNELS: + return True + else: + raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/ubuntu.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/ubuntu.py new file mode 100644 index 00000000..3ddaf0dd --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/ubuntu.py @@ -0,0 +1,805 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import OrderedDict +import platform +import re +import six +import subprocess +import sys +import time + +from charmhelpers.core.host import get_distrib_codename, get_system_env + +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, + env_proxy_settings, +) +from charmhelpers.fetch import SourceConfigError, GPGKeyError +from charmhelpers.fetch import ubuntu_apt_pkg + +PROPOSED_POCKET = ( + "# Proposed\n" + "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " + "multiverse restricted\n") +PROPOSED_PORTS_POCKET = ( + "# Proposed\n" + "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " + "multiverse restricted\n") +# Only supports 64bit and ppc64 at the moment. +ARCH_TO_PROPOSED_POCKET = { + 'x86_64': PROPOSED_POCKET, + 'ppc64le': PROPOSED_PORTS_POCKET, + 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, +} +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main +""" +CLOUD_ARCHIVE_POCKETS = { + # Folsom + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'precise-folsom': 'precise-updates/folsom', + 'precise-folsom/updates': 'precise-updates/folsom', + 'precise-updates/folsom': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'precise-folsom/proposed': 'precise-proposed/folsom', + 'precise-proposed/folsom': 'precise-proposed/folsom', + # Grizzly + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'precise-grizzly': 'precise-updates/grizzly', + 'precise-grizzly/updates': 'precise-updates/grizzly', + 'precise-updates/grizzly': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'precise-grizzly/proposed': 'precise-proposed/grizzly', + 'precise-proposed/grizzly': 'precise-proposed/grizzly', + # Havana + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'precise-havana': 'precise-updates/havana', + 'precise-havana/updates': 'precise-updates/havana', + 'precise-updates/havana': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + 'precise-havana/proposed': 'precise-proposed/havana', + 'precise-proposed/havana': 'precise-proposed/havana', + # Icehouse + 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', + 'precise-icehouse': 'precise-updates/icehouse', + 'precise-icehouse/updates': 'precise-updates/icehouse', + 'precise-updates/icehouse': 'precise-updates/icehouse', + 'icehouse/proposed': 'precise-proposed/icehouse', + 'precise-icehouse/proposed': 'precise-proposed/icehouse', + 'precise-proposed/icehouse': 'precise-proposed/icehouse', + # Juno + 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', + 'trusty-juno': 'trusty-updates/juno', + 'trusty-juno/updates': 'trusty-updates/juno', + 'trusty-updates/juno': 'trusty-updates/juno', + 'juno/proposed': 'trusty-proposed/juno', + 'trusty-juno/proposed': 'trusty-proposed/juno', + 'trusty-proposed/juno': 'trusty-proposed/juno', + # Kilo + 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', + 'trusty-kilo': 'trusty-updates/kilo', + 'trusty-kilo/updates': 'trusty-updates/kilo', + 'trusty-updates/kilo': 'trusty-updates/kilo', + 'kilo/proposed': 'trusty-proposed/kilo', + 'trusty-kilo/proposed': 'trusty-proposed/kilo', + 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', + # Mitaka + 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', + 'trusty-mitaka': 'trusty-updates/mitaka', + 'trusty-mitaka/updates': 'trusty-updates/mitaka', + 'trusty-updates/mitaka': 'trusty-updates/mitaka', + 'mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', + 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', + # Ocata + 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', + 'xenial-ocata': 'xenial-updates/ocata', + 'xenial-ocata/updates': 'xenial-updates/ocata', + 'xenial-updates/ocata': 'xenial-updates/ocata', + 'ocata/proposed': 'xenial-proposed/ocata', + 'xenial-ocata/proposed': 'xenial-proposed/ocata', + 'xenial-proposed/ocata': 'xenial-proposed/ocata', + # Pike + 'pike': 'xenial-updates/pike', + 'xenial-pike': 'xenial-updates/pike', + 'xenial-pike/updates': 'xenial-updates/pike', + 'xenial-updates/pike': 'xenial-updates/pike', + 'pike/proposed': 'xenial-proposed/pike', + 'xenial-pike/proposed': 'xenial-proposed/pike', + 'xenial-proposed/pike': 'xenial-proposed/pike', + # Queens + 'queens': 'xenial-updates/queens', + 'xenial-queens': 'xenial-updates/queens', + 'xenial-queens/updates': 'xenial-updates/queens', + 'xenial-updates/queens': 'xenial-updates/queens', + 'queens/proposed': 'xenial-proposed/queens', + 'xenial-queens/proposed': 'xenial-proposed/queens', + 'xenial-proposed/queens': 'xenial-proposed/queens', + # Rocky + 'rocky': 'bionic-updates/rocky', + 'bionic-rocky': 'bionic-updates/rocky', + 'bionic-rocky/updates': 'bionic-updates/rocky', + 'bionic-updates/rocky': 'bionic-updates/rocky', + 'rocky/proposed': 'bionic-proposed/rocky', + 'bionic-rocky/proposed': 'bionic-proposed/rocky', + 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', + # Train + 'train': 'bionic-updates/train', + 'bionic-train': 'bionic-updates/train', + 'bionic-train/updates': 'bionic-updates/train', + 'bionic-updates/train': 'bionic-updates/train', + 'train/proposed': 'bionic-proposed/train', + 'bionic-train/proposed': 'bionic-proposed/train', + 'bionic-proposed/train': 'bionic-proposed/train', + # Ussuri + 'ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri/updates': 'bionic-updates/ussuri', + 'bionic-updates/ussuri': 'bionic-updates/ussuri', + 'ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', +} + + +APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. +CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. +CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. + + +def filter_installed_packages(packages): + """Return a list of packages that require installation.""" + cache = apt_cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def filter_missing_packages(packages): + """Return a list of packages that are installed. + + :param packages: list of packages to evaluate. + :returns list: Packages that are installed. + """ + return list( + set(packages) - + set(filter_installed_packages(packages)) + ) + + +def apt_cache(*_, **__): + """Shim returning an object simulating the apt_pkg Cache. + + :param _: Accept arguments for compability, not used. + :type _: any + :param __: Accept keyword arguments for compability, not used. + :type __: any + :returns:Object used to interrogate the system apt and dpkg databases. + :rtype:ubuntu_apt_pkg.Cache + """ + if 'apt_pkg' in sys.modules: + # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module + # in conjunction with the apt_cache helper function, they may expect us + # to call ``apt_pkg.init()`` for them. + # + # Detect this situation, log a warning and make the call to + # ``apt_pkg.init()`` to avoid the consumer Python interpreter from + # crashing with a segmentation fault. + log('Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + sys.modules['apt_pkg'].init() + return ubuntu_apt_pkg.Cache() + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + _run_apt_command(cmd, fatal) + + +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages. + + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` + :type dist: bool + :raises: subprocess.CalledProcessError + """ + if options is None: + options = ['--option=Dpkg::Options::=--force-confold'] + + cmd = ['apt-get', '--assume-yes'] + cmd.extend(options) + if dist: + cmd.append('dist-upgrade') + else: + cmd.append('upgrade') + log("Upgrading with options: {}".format(options)) + _run_apt_command(cmd, fatal) + + +def apt_update(fatal=False): + """Update local apt cache.""" + cmd = ['apt-get', 'update'] + _run_apt_command(cmd, fatal) + + +def apt_purge(packages, fatal=False): + """Purge one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['apt-get', '--assume-yes', 'purge'] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + log("Purging {}".format(packages)) + _run_apt_command(cmd, fatal) + + +def apt_autoremove(purge=True, fatal=False): + """Purge one or more packages. + :param purge: Whether the ``--purge`` option should be passed on or not. + :type purge: bool + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['apt-get', '--assume-yes', 'autoremove'] + if purge: + cmd.append('--purge') + _run_apt_command(cmd, fatal) + + +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark.""" + log("Marking {} as {}".format(packages, mark)) + cmd = ['apt-mark', mark] + if isinstance(packages, six.string_types): + cmd.append(packages) + else: + cmd.extend(packages) + + if fatal: + subprocess.check_call(cmd, universal_newlines=True) + else: + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) + + +def import_key(key): + """Import an ASCII Armor key. + + A Radix64 format keyid is also supported for backwards + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + :param key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + :type key: (bytes, str) + :raises: GPGKeyError if the key could not be imported + """ + key = key.strip() + if '-' in key or '\n' in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. + log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and + '-----END PGP PUBLIC KEY BLOCK-----' in key): + log("Writing provided PGP key in the binary format", level=DEBUG) + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key + key_name = _get_keyid_by_gpg_key(key_bytes) + key_gpg = _dearmor_gpg_key(key_bytes) + _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") + else: + log("PGP key found (looks like Radix64 format)", level=WARNING) + log("SECURELY importing PGP key from keyserver; " + "full key not provided.", level=WARNING) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = _get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = _dearmor_gpg_key(key_asc) + _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + +def _get_keyid_by_gpg_key(key_material): + """Get a GPG key fingerprint by GPG key material. + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + + :param key_material: ASCII armor-encoded or binary GPG key material + :type key_material: bytes + :raises: GPGKeyError if invalid key material has been provided + :returns: A GPG key fingerprint + :rtype: str + """ + # Use the same gpg command for both Xenial and Bionic + cmd = 'gpg --with-colons --with-fingerprint' + ps = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_material) + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material provided') + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + +def _get_key_by_keyid(keyid): + """Get a key via HTTPS from the Ubuntu keyserver. + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + :param keyid: An 8, 16 or 40 hex digit keyid to find a key for + :type keyid: (bytes, str) + :returns: A key material for the specified GPG key id + :rtype: (str, bytes) + :raises: subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') + curl_cmd = ['curl', keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return subprocess.check_output(curl_cmd, + env=env_proxy_settings(['https'])) + + +def _dearmor_gpg_key(key_asc): + """Converts a GPG key in the ASCII armor format to the binary format. + + :param key_asc: A GPG key in ASCII armor format. + :type key_asc: (str, bytes) + :returns: A GPG key in binary format + :rtype: (str, bytes) + :raises: GPGKeyError + """ + ps = subprocess.Popen(['gpg', '--dearmor'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_asc) + # no need to decode output as it is binary (invalid utf-8), only error + if six.PY3: + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + else: + return out + + +def _write_apt_gpg_keyfile(key_name, key_material): + """Writes GPG key material into a file at a provided path. + + :param key_name: A key name to use for a key file (could be a fingerprint) + :type key_name: str + :param key_material: A GPG key material (binary) + :type key_material: (str, bytes) + """ + with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), + 'wb') as keyf: + keyf.write(key_material) + + +def add_source(source, key=None, fail_invalid=False): + """Add a package source to this system. + + @param source: a URL or sources.list entry, as supported by + add-apt-repository(1). Examples:: + + ppa:charmers/example + deb https://stub:key@private.example.com/ubuntu trusty main + + In addition: + 'proposed:' may be used to enable the standard 'proposed' + pocket for the release. + 'cloud:' may be used to activate official cloud archive pockets, + such as 'cloud:icehouse' + 'distro' may be used as a noop + + Full list of source specifications supported by the function are: + + 'distro': A NOP; i.e. it has no effect. + 'proposed': the proposed deb spec [2] is wrtten to + /etc/apt/sources.list/proposed + 'distro-proposed': adds <version>-proposed to the debs [2] + 'ppa:<ppa-name>': add-apt-repository --yes <ppa_name> + 'deb <deb-spec>': add-apt-repository --yes deb <deb-spec> + 'http://....': add-apt-repository --yes http://... + 'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec> + 'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with + optional staging version. If staging is used then the staging PPA [2] + with be used. If staging is NOT used then the cloud archive [3] will be + added, and the 'ubuntu-cloud-keyring' package will be added for the + current distro. + + Otherwise the source is not recognised and this is logged to the juju log. + However, no error is raised, unless sys_error_on_exit is True. + + [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main + where {} is replaced with the derived pocket name. + [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ + main universe multiverse restricted + where {} is replaced with the lsb_release codename (e.g. xenial) + [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket> + to /etc/apt/sources.list.d/cloud-archive-list + + @param key: A key to be added to the system's APT keyring and used + to verify the signatures on packages. Ideally, this should be an + ASCII format GPG public key including the block headers. A GPG key + id may also be used, but be aware that only insecure protocols are + available to retrieve the actual public key from a public keyserver + placing your Juju environment at risk. ppa and cloud archive keys + are securely added automtically, so sould not be provided. + + @param fail_invalid: (boolean) if True, then the function raises a + SourceConfigError is there is no matching installation source. + + @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a + valid pocket in CLOUD_ARCHIVE_POCKETS + """ + _mapping = OrderedDict([ + (r"^distro$", lambda: None), # This is a NOP + (r"^(?:proposed|distro-proposed)$", _add_proposed), + (r"^cloud-archive:(.*)$", _add_apt_repository), + (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), + (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), + (r"^cloud:(.*)$", _add_cloud_pocket), + (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + ]) + if source is None: + source = '' + for r, fn in six.iteritems(_mapping): + m = re.match(r, source) + if m: + if key: + # Import key before adding the source which depends on it, + # as refreshing packages could fail otherwise. + try: + import_key(key) + except GPGKeyError as e: + raise SourceConfigError(str(e)) + # call the associated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) + break + else: + # nothing matched. log an error and maybe sys.exit + err = "Unknown source: {!r}".format(source) + log(err) + if fail_invalid: + raise SourceConfigError(err) + + +def _add_proposed(): + """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list + + Uses get_distrib_codename to determine the correct stanza for + the deb line. + + For intel architecutres PROPOSED_POCKET is used for the release, but for + other architectures PROPOSED_PORTS_POCKET is used for the release. + """ + release = get_distrib_codename() + arch = platform.machine() + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + raise SourceConfigError("Arch {} not supported for (distro-)proposed" + .format(arch)) + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) + + +def _add_apt_repository(spec): + """Add the spec using add_apt_repository + + :param spec: the parameter to pass to add_apt_repository + :type spec: str + """ + if '{series}' in spec: + series = get_distrib_codename() + spec = spec.replace('{series}', series) + # software-properties package for bionic properly reacts to proxy settings + # passed as environment variables (See lp:1433761). This is not the case + # LTS and non-LTS releases below bionic. + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https'])) + + +def _add_cloud_pocket(pocket): + """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list + + Note that this overwrites the existing file if there is one. + + This function also converts the simple pocket in to the actual pocket using + the CLOUD_ARCHIVE_POCKETS mapping. + + :param pocket: string representing the pocket to add a deb spec for. + :raises: SourceConfigError if the cloud pocket doesn't exist or the + requested release doesn't match the current distro version. + """ + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + +def _add_cloud_staging(cloud_archive_release, openstack_release): + """Add the cloud staging repository which is in + ppa:ubuntu-cloud-archive/<openstack_release>-staging + + This function checks that the cloud_archive_release matches the current + codename for the distro that charm is being installed on. + + :param cloud_archive_release: string, codename for the release. + :param openstack_release: String, codename for the openstack release. + :raises: SourceConfigError if the cloud_archive_release doesn't match the + current version of the os. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) + cmd = 'add-apt-repository -y {}'.format(ppa) + _run_with_retries(cmd.split(' ')) + + +def _add_cloud_distro_check(cloud_archive_release, openstack_release): + """Add the cloud pocket, but also check the cloud_archive_release against + the current distro, and use the openstack_release as the full lookup. + + This just calls _add_cloud_pocket() with the openstack_release as pocket + to get the correct cloud-archive.list for dpkg to work with. + + :param cloud_archive_release:String, codename for the distro release. + :param openstack_release: String, spec for the release to look up in the + CLOUD_ARCHIVE_POCKETS + :raises: SourceConfigError if this is the wrong distro, or the pocket spec + doesn't exist. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) + + +def _verify_is_ubuntu_rel(release, os_release): + """Verify that the release is in the same as the current ubuntu release. + + :param release: String, lowercase for the release. + :param os_release: String, the os_release being asked for + :raises: SourceConfigError if the release is not the same as the ubuntu + release. + """ + ubuntu_rel = get_distrib_codename() + if release != ubuntu_rel: + raise SourceConfigError( + 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' + 'version ({})'.format(release, os_release, ubuntu_rel)) + + +def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), + retry_message="", cmd_env=None): + """Run a command and retry until success or max_retries is reached. + + :param cmd: The apt command to run. + :type cmd: str + :param max_retries: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :type max_retries: int + :param retry_exitcodes: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :type retry_exitcodes: tuple + :param retry_message: Optional log prefix emitted during retries. + :type retry_message: str + :param: cmd_env: Environment variables to add to the command run. + :type cmd_env: Option[None, Dict[str, str]] + """ + env = get_apt_dpkg_env() + if cmd_env: + env.update(cmd_env) + + if not retry_message: + retry_message = "Failed executing '{}'".format(" ".join(cmd)) + retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY) + + retry_count = 0 + result = None + + retry_results = (None,) + retry_exitcodes + while result in retry_results: + try: + result = subprocess.check_call(cmd, env=env) + except subprocess.CalledProcessError as e: + retry_count = retry_count + 1 + if retry_count > max_retries: + raise + result = e.returncode + log(retry_message) + time.sleep(CMD_RETRY_DELAY) + + +def _run_apt_command(cmd, fatal=False): + """Run an apt command with optional retries. + + :param cmd: The apt command to run. + :type cmd: str + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + """ + if fatal: + _run_with_retries( + cmd, retry_exitcodes=(1, APT_NO_LOCK,), + retry_message="Couldn't acquire DPKG lock") + else: + subprocess.call(cmd, env=get_apt_dpkg_env()) + + +def get_upstream_version(package): + """Determine upstream version based on installed package + + @returns None (if not installed) or the upstream version + """ + cache = apt_cache() + try: + pkg = cache[package] + except Exception: + # the package is unknown to the current apt cache. + return None + + if not pkg.current_ver: + # package is known, but no version is currently installed. + return None + + return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) + + +def get_apt_dpkg_env(): + """Get environment suitable for execution of APT and DPKG tools. + + We keep this in a helper function instead of in a global constant to + avoid execution on import of the library. + :returns: Environment suitable for execution of APT and DPKG tools. + :rtype: Dict[str, str] + """ + # The fallback is used in the event of ``/etc/environment`` not containing + # avalid PATH variable. + return {'DEBIAN_FRONTEND': 'noninteractive', + 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/ubuntu_apt_pkg.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/ubuntu_apt_pkg.py new file mode 100644 index 00000000..929a75d7 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -0,0 +1,267 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a subset of the ``python-apt`` module API. + +Data collection is done through subprocess calls to ``apt-cache`` and +``dpkg-query`` commands. + +The main purpose for this module is to avoid dependency on the +``python-apt`` python module. + +The indicated python module is a wrapper around the ``apt`` C++ library +which is tightly connected to the version of the distribution it was +shipped on. It is not developed in a backward/forward compatible manner. + +This in turn makes it incredibly hard to distribute as a wheel for a piece +of python software that supports a span of distro releases [0][1]. + +Upstream feedback like [2] does not give confidence in this ever changing, +so with this we get rid of the dependency. + +0: https://github.com/juju-solutions/layer-basic/pull/135 +1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 +2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 +""" + +import locale +import os +import subprocess +import sys + + +class _container(dict): + """Simple container for attributes.""" + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class Package(_container): + """Simple container for package attributes.""" + + +class Version(_container): + """Simple container for version attributes.""" + + +class Cache(object): + """Simulation of ``apt_pkg`` Cache object.""" + def __init__(self, progress=None): + pass + + def __contains__(self, package): + try: + pkg = self.__getitem__(package) + return pkg is not None + except KeyError: + return False + + def __getitem__(self, package): + """Get information about a package from apt and dpkg databases. + + :param package: Name of package + :type package: str + :returns: Package object + :rtype: object + :raises: KeyError, subprocess.CalledProcessError + """ + apt_result = self._apt_cache_show([package])[package] + apt_result['name'] = apt_result.pop('package') + pkg = Package(apt_result) + dpkg_result = self._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + if installed_version: + current_ver = Version({'ver_str': installed_version}) + pkg.current_ver = current_ver + pkg.architecture = dpkg_result.get('architecture') + return pkg + + def _dpkg_list(self, packages): + """Get data from system dpkg database for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about installed packages, keys like + ``dpkg-query --list`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['dpkg-query', '--list'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError as cp: + # ``dpkg-query`` may return error and at the same time have + # produced useful output, for example when asked for multiple + # packages where some are not installed + if cp.returncode != 1: + raise + output = cp.output + headings = [] + for line in output.splitlines(): + if line.startswith('||/'): + headings = line.split() + headings.pop(0) + continue + elif (line.startswith('|') or line.startswith('+') or + line.startswith('dpkg-query:')): + continue + else: + data = line.split(None, 4) + status = data.pop(0) + if status != 'ii': + continue + pkg = {} + pkg.update({k.lower(): v for k, v in zip(headings, data)}) + if 'name' in pkg: + pkgs.update({pkg['name']: pkg}) + return pkgs + + def _apt_cache_show(self, packages): + """Get data from system apt cache for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about package, keys like + ``apt-cache show`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['apt-cache', 'show', '--no-all-versions'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + previous = None + pkg = {} + for line in output.splitlines(): + if not line: + if 'package' in pkg: + pkgs.update({pkg['package']: pkg}) + pkg = {} + continue + if line.startswith(' '): + if previous and previous in pkg: + pkg[previous] += os.linesep + line.lstrip() + continue + if ':' in line: + kv = line.split(':', 1) + key = kv[0].lower() + if key == 'n': + continue + previous = key + pkg.update({key: kv[1].lstrip()}) + except subprocess.CalledProcessError as cp: + # ``apt-cache`` returns 100 if none of the packages asked for + # exist in the apt cache. + if cp.returncode != 100: + raise + return pkgs + + +class Config(_container): + def __init__(self): + super(Config, self).__init__(self._populate()) + + def _populate(self): + cfgs = {} + cmd = ['apt-config', 'dump'] + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in output.splitlines(): + if not line.startswith("CommandLine"): + k, v = line.split(" ", 1) + cfgs[k] = v.strip(";").strip("\"") + + return cfgs + + +# Backwards compatibility with old apt_pkg module +sys.modules[__name__].config = Config() + + +def init(): + """Compability shim that does nothing.""" + pass + + +def upstream_version(version): + """Extracts upstream version from a version string. + + Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ + apt-pkg/deb/debversion.cc#L259 + + :param version: Version string + :type version: str + :returns: Upstream version + :rtype: str + """ + if version: + version = version.split(':')[-1] + version = version.split('-')[0] + return version + + +def version_compare(a, b): + """Compare the given versions. + + Call out to ``dpkg`` to make sure the code doing the comparison is + compatible with what the ``apt`` library would do. Mimic the return + values. + + Upstream reference: + https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html + ?highlight=version_compare#apt_pkg.version_compare + + :param a: version string + :type a: str + :param b: version string + :type b: str + :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, + <0 if ``a`` is smaller than ``b`` + :rtype: int + :raises: subprocess.CalledProcessError, RuntimeError + """ + for op in ('gt', 1), ('eq', 0), ('lt', -1): + try: + subprocess.check_call(['dpkg', '--compare-versions', + a, op[0], b], + stderr=subprocess.STDOUT, + universal_newlines=True) + return op[1] + except subprocess.CalledProcessError as cp: + if cp.returncode == 1: + continue + raise + else: + raise RuntimeError('Unable to compare "{}" and "{}", according to ' + 'our logic they are neither greater, equal nor ' + 'less than each other.') diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/osplatform.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/osplatform.py new file mode 100644 index 00000000..78c81af5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/osplatform.py @@ -0,0 +1,46 @@ +import platform +import os + + +def get_platform(): + """Return the current OS platform. + + For example: if current os platform is Ubuntu then a string "ubuntu" + will be returned (which is the name of the module). + This string is used to decide which platform module should be imported. + """ + # linux_distribution is deprecated and will be removed in Python 3.7 + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + + if "Ubuntu" in current_platform: + return "ubuntu" + elif "CentOS" in current_platform: + return "centos" + elif "debian" in current_platform: + # Stock Python does not detect Ubuntu and instead returns debian. + # Or at least it does in some build environments like Travis CI + return "ubuntu" + elif "elementary" in current_platform: + # ElementaryOS fails to run tests locally without this. + return "ubuntu" + else: + raise RuntimeError("This module is not supported on {}." + .format(current_platform)) + + +def _get_platform_from_fs(): + """Get Platform from /etc/os-release.""" + with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: + content = dict( + line.split('=', 1) + for line in fin.read().splitlines() + if '=' in line + ) + for k, v in content.items(): + content[k] = v.strip('"') + return content["NAME"] diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/__init__.py new file mode 100644 index 00000000..ee55cb3d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"Tools for working with files injected into a charm just before deployment." diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/archive.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/archive.py new file mode 100644 index 00000000..7fc453f5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/archive.py @@ -0,0 +1,71 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import tarfile +import zipfile +from charmhelpers.core import ( + host, + hookenv, +) + + +class ArchiveError(Exception): + pass + + +def get_archive_handler(archive_name): + if os.path.isfile(archive_name): + if tarfile.is_tarfile(archive_name): + return extract_tarfile + elif zipfile.is_zipfile(archive_name): + return extract_zipfile + else: + # look at the file name + for ext in ('.tar', '.tar.gz', '.tgz', 'tar.bz2', '.tbz2', '.tbz'): + if archive_name.endswith(ext): + return extract_tarfile + for ext in ('.zip', '.jar'): + if archive_name.endswith(ext): + return extract_zipfile + + +def archive_dest_default(archive_name): + archive_file = os.path.basename(archive_name) + return os.path.join(hookenv.charm_dir(), "archives", archive_file) + + +def extract(archive_name, destpath=None): + handler = get_archive_handler(archive_name) + if handler: + if not destpath: + destpath = archive_dest_default(archive_name) + if not os.path.isdir(destpath): + host.mkdir(destpath) + handler(archive_name, destpath) + return destpath + else: + raise ArchiveError("No handler for archive") + + +def extract_tarfile(archive_name, destpath): + "Unpack a tar archive, optionally compressed" + archive = tarfile.open(archive_name) + archive.extractall(destpath) + + +def extract_zipfile(archive_name, destpath): + "Unpack a zip file" + archive = zipfile.ZipFile(archive_name) + archive.extractall(destpath) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/execd.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/execd.py new file mode 100644 index 00000000..1502aa0b --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charmhelpers/payload/execd.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import subprocess +from charmhelpers.core import hookenv + + +def default_execd_dir(): + return os.path.join(os.environ['CHARM_DIR'], 'exec.d') + + +def execd_module_paths(execd_dir=None): + """Generate a list of full paths to modules within execd_dir.""" + if not execd_dir: + execd_dir = default_execd_dir() + + if not os.path.exists(execd_dir): + return + + for subpath in os.listdir(execd_dir): + module = os.path.join(execd_dir, subpath) + if os.path.isdir(module): + yield module + + +def execd_submodule_paths(command, execd_dir=None): + """Generate a list of full paths to the specified command within exec_dir. + """ + for module_path in execd_module_paths(execd_dir): + path = os.path.join(module_path, command) + if os.access(path, os.X_OK) and os.path.isfile(path): + yield path + + +def execd_run(command, execd_dir=None, die_on_error=True, stderr=subprocess.STDOUT): + """Run command for each module within execd_dir which defines it.""" + for submodule_path in execd_submodule_paths(command, execd_dir): + try: + subprocess.check_output(submodule_path, stderr=stderr, + universal_newlines=True) + except subprocess.CalledProcessError as e: + hookenv.log("Error ({}) running {}. Output: {}".format( + e.returncode, e.cmd, e.output)) + if die_on_error: + sys.exit(e.returncode) + + +def execd_preinstall(execd_dir=None): + """Run charm-pre-install for each module within execd_dir.""" + execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/libansible.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/libansible.py new file mode 100644 index 00000000..811ec444 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/libansible.py @@ -0,0 +1,108 @@ +## +# Copyright ETSI OSM +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +import fnmatch +import os +import subprocess +import sys +import yaml + +import charmhelpers.fetch + + +sys.path.append("lib") + + +ansible_hosts_path = "/etc/ansible/hosts" + + +def install_ansible_support(from_ppa=True, ppa_location="ppa:ansible/ansible"): + """Installs the ansible package. + + By default it is installed from the `PPA`_ linked from + the ansible `website`_ or from a ppa specified by a charm config.. + + .. _PPA: https://launchpad.net/~rquillo/+archive/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If from_ppa is empty, you must ensure that the package is available + from a configured repository. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install("ansible") + with open(ansible_hosts_path, "w+") as hosts_file: + hosts_file.write("localhost ansible_connection=local") + + +def create_hosts(hostname, username, password, hosts): + inventory_path = "/etc/ansible/hosts" + with open(inventory_path, 'w') as f: + f.write('[{}]\n'.format(hosts)) + h1 = '{0} ansible_connection=network_cli ansible_network_os=vyos ' \ + 'ansible_ssh_user={1} ansible_ssh_pass={2} ' \ + 'ansible_python_interpreter=/usr/bin/python\n'.format(hostname, username, password) + f.write(h1) + + +def create_ansible_cfg(): + ansible_config_path = "/etc/ansible/ansible.cfg" + with open(ansible_config_path, 'w') as f: + f.write('[defaults]\n') + f.write('host_key_checking = False\n') + # logs playbook execution attempts to the specified path + f.write('log_path = /var/log/ansible.log\n') + f.write('[ssh_connection]\n') + f.write('control_path=%(directory)s/%%h-%%r\n') + f.write('control_path_dir=~/.ansible/cp\n') + + +# Function to find the playbook path +def find(pattern, path): + result = "" + for root, dirs, files in os.walk(path): + for name in files: + if fnmatch.fnmatch(name, pattern): + result = os.path.join(root, name) + return result + + +def execute_playbook(playbook_file, hostname, user, password, vars_dict=None): + playbook_path = find(playbook_file, '/var/lib/juju/agents/') + print("Playbook " + playbook_file + " is " + playbook_path) + with open(playbook_path, 'r') as f: + playbook_data = yaml.load(f, Loader=yaml.SafeLoader) + hosts = 'all' + if 'hosts' in playbook_data[0].keys() and playbook_data[0]['hosts']: + hosts = playbook_data[0]['hosts'] + create_ansible_cfg() + create_hosts(hostname, user, password, hosts) + call = 'ansible-playbook -vvvv %s ' % playbook_path + if vars_dict and isinstance(vars_dict, dict) and len(vars_dict) > 0: + call += '--extra-vars ' + string_var = '' + for v in vars_dict.items(): + string_var += '%s=%s ' % v + string_var = string_var.strip() + call += '"%s"' % string_var + call = call.strip() + print(call) + result = subprocess.check_output(call, shell=True) + lastline = result.decode('utf-8').splitlines()[-2] + print("Result is " + lastline) + return lastline diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/ns.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/ns.py new file mode 100644 index 00000000..25be4056 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/ns.py @@ -0,0 +1,301 @@ +# A prototype of a library to aid in the development and operation of +# OSM Network Service charms + +import asyncio +import logging +import os +import os.path +import re +import subprocess +import sys +import time +import yaml + +try: + import juju +except ImportError: + # Not all cloud images are created equal + if not os.path.exists("/usr/bin/python3") or not os.path.exists("/usr/bin/pip3"): + # Update the apt cache + subprocess.check_call(["apt-get", "update"]) + + # Install the Python3 package + subprocess.check_call(["apt-get", "install", "-y", "python3", "python3-pip"],) + + + # Install the libjuju build dependencies + subprocess.check_call(["apt-get", "install", "-y", "libffi-dev", "libssl-dev"],) + + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "juju"], + ) + +from juju.controller import Controller + +# Quiet the debug logging +logging.getLogger('websockets.protocol').setLevel(logging.INFO) +logging.getLogger('juju.client.connection').setLevel(logging.WARN) +logging.getLogger('juju.model').setLevel(logging.WARN) +logging.getLogger('juju.machine').setLevel(logging.WARN) + + +class NetworkService: + """A lightweight interface to the Juju controller. + + This NetworkService client is specifically designed to allow a higher-level + "NS" charm to interoperate with "VNF" charms, allowing for the execution of + Primitives across other charms within the same model. + """ + endpoint = None + user = 'admin' + secret = None + port = 17070 + loop = None + client = None + model = None + cacert = None + + def __init__(self, user, secret, endpoint=None): + + self.user = user + self.secret = secret + if endpoint is None: + addresses = os.environ['JUJU_API_ADDRESSES'] + for address in addresses.split(' '): + self.endpoint = address + else: + self.endpoint = endpoint + + # Stash the name of the model + self.model = os.environ['JUJU_MODEL_NAME'] + + # Load the ca-cert from agent.conf + AGENT_PATH = os.path.dirname(os.environ['JUJU_CHARM_DIR']) + with open("{}/agent.conf".format(AGENT_PATH), "r") as f: + try: + y = yaml.safe_load(f) + self.cacert = y['cacert'] + except yaml.YAMLError as exc: + print("Unable to find Juju ca-cert.") + raise exc + + # Create our event loop + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + async def connect(self): + """Connect to the Juju controller.""" + controller = Controller() + + print( + "Connecting to controller... ws://{}:{} as {}/{}".format( + self.endpoint, + self.port, + self.user, + self.secret[-4:].rjust(len(self.secret), "*"), + ) + ) + await controller.connect( + endpoint=self.endpoint, + username=self.user, + password=self.secret, + cacert=self.cacert, + ) + + return controller + + def __del__(self): + self.logout() + + async def disconnect(self): + """Disconnect from the Juju controller.""" + if self.client: + print("Disconnecting Juju controller") + await self.client.disconnect() + + def login(self): + """Login to the Juju controller.""" + if not self.client: + # Connect to the Juju API server + self.client = self.loop.run_until_complete(self.connect()) + return self.client + + def logout(self): + """Logout of the Juju controller.""" + + if self.loop: + print("Disconnecting from API") + self.loop.run_until_complete(self.disconnect()) + + def FormatApplicationName(self, *args): + """ + Generate a Juju-compatible Application name + + :param args tuple: Positional arguments to be used to construct the + application name. + + Limitations:: + - Only accepts characters a-z and non-consequitive dashes (-) + - Application name should not exceed 50 characters + + Examples:: + + FormatApplicationName("ping_pong_ns", "ping_vnf", "a") + """ + appname = "" + for c in "-".join(list(args)): + if c.isdigit(): + c = chr(97 + int(c)) + elif not c.isalpha(): + c = "-" + appname += c + + return re.sub('-+', '-', appname.lower()) + + def GetApplicationName(self, nsr_name, vnf_name, vnf_member_index): + """Get the runtime application name of a VNF/VDU. + + This will generate an application name matching the name of the deployed charm, + given the right parameters. + + :param nsr_name str: The name of the running Network Service, as specified at instantiation. + :param vnf_name str: The name of the VNF or VDU + :param vnf_member_index: The vnf-member-index as specified in the descriptor + """ + + application_name = self.FormatApplicationName(nsr_name, vnf_member_index, vnf_name) + + # This matches the logic used by the LCM + application_name = application_name[0:48] + vca_index = int(vnf_member_index) - 1 + application_name += '-' + chr(97 + vca_index // 26) + chr(97 + vca_index % 26) + + return application_name + + def ExecutePrimitiveGetOutput(self, application, primitive, params={}, timeout=600): + """Execute a single primitive and return it's output. + + This is a blocking method that will execute a single primitive and wait + for its completion before return it's output. + + :param application str: The application name provided by `GetApplicationName`. + :param primitive str: The name of the primitive to execute. + :param params list: A list of parameters. + :param timeout int: A timeout, in seconds, to wait for the primitive to finish. Defaults to 600 seconds. + """ + uuid = self.ExecutePrimitive(application, primitive, params) + + status = None + output = None + + starttime = time.time() + while(time.time() < starttime + timeout): + status = self.GetPrimitiveStatus(uuid) + if status in ['completed', 'failed']: + break + time.sleep(10) + + # When the primitive is done, get the output + if status in ['completed', 'failed']: + output = self.GetPrimitiveOutput(uuid) + + return output + + def ExecutePrimitive(self, application, primitive, params={}): + """Execute a primitive. + + This is a non-blocking method to execute a primitive. It will return + the UUID of the queued primitive execution, which you can use + for subsequent calls to `GetPrimitiveStatus` and `GetPrimitiveOutput`. + + :param application string: The name of the application + :param primitive string: The name of the Primitive. + :param params list: A list of parameters. + + :returns uuid string: The UUID of the executed Primitive + """ + uuid = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + # Get the application + if application in model.applications: + app = model.applications[application] + + # Execute the primitive + unit = app.units[0] + if unit: + action = self.loop.run_until_complete( + unit.run_action(primitive, **params) + ) + uuid = action.id + print("Executing action: {}".format(uuid)) + self.loop.run_until_complete( + model.disconnect() + ) + else: + # Invalid mapping: application not found. Raise exception + raise Exception("Application not found: {}".format(application)) + + return uuid + + def GetPrimitiveStatus(self, uuid): + """Get the status of a Primitive execution. + + This will return one of the following strings: + - pending + - running + - completed + - failed + + :param uuid string: The UUID of the executed Primitive. + :returns: The status of the executed Primitive + """ + status = None + + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + status = self.loop.run_until_complete( + model.get_action_status(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return status[uuid] + + def GetPrimitiveOutput(self, uuid): + """Get the output of a completed Primitive execution. + + + :param uuid string: The UUID of the executed Primitive. + :returns: The output of the execution, or None if it's still running. + """ + result = None + if not self.client: + self.login() + + model = self.loop.run_until_complete( + self.client.get_model(self.model) + ) + + result = self.loop.run_until_complete( + model.get_action_output(uuid) + ) + + self.loop.run_until_complete( + model.disconnect() + ) + + return result diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/proxy_cluster.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/proxy_cluster.py new file mode 100644 index 00000000..f323a3af --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/proxy_cluster.py @@ -0,0 +1,59 @@ +import socket + +from ops.framework import Object, StoredState + + +class ProxyCluster(Object): + + state = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self._relation_name = relation_name + self._relation = self.framework.model.get_relation(self._relation_name) + + self.framework.observe(charm.on.ssh_keys_initialized, self.on_ssh_keys_initialized) + + self.state.set_default(ssh_public_key=None) + self.state.set_default(ssh_private_key=None) + + def on_ssh_keys_initialized(self, event): + if not self.framework.model.unit.is_leader(): + raise RuntimeError("The initial unit of a cluster must also be a leader.") + + self.state.ssh_public_key = event.ssh_public_key + self.state.ssh_private_key = event.ssh_private_key + if not self.is_joined: + event.defer() + return + + self._relation.data[self.model.app][ + "ssh_public_key" + ] = self.state.ssh_public_key + self._relation.data[self.model.app][ + "ssh_private_key" + ] = self.state.ssh_private_key + + @property + def is_joined(self): + return self._relation is not None + + @property + def ssh_public_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_public_key") + + @property + def ssh_private_key(self): + if self.is_joined: + return self._relation.data[self.model.app].get("ssh_private_key") + + @property + def is_cluster_initialized(self): + return ( + True + if self.is_joined + and self._relation.data[self.model.app].get("ssh_public_key") + and self._relation.data[self.model.app].get("ssh_private_key") + else False + ) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/sshproxy.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/sshproxy.py new file mode 100644 index 00000000..e2c311e5 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/charms/osm/sshproxy.py @@ -0,0 +1,375 @@ +"""Module to help with executing commands over SSH.""" +## +# Copyright 2016 Canonical Ltd. +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +## + +# from charmhelpers.core import unitdata +# from charmhelpers.core.hookenv import log + +import io +import ipaddress +import subprocess +import os +import socket +import shlex +import traceback +import sys + +from subprocess import ( + check_call, + Popen, + CalledProcessError, + PIPE, +) + +from ops.charm import CharmBase, CharmEvents +from ops.framework import StoredState, EventBase, EventSource +from ops.main import main +from ops.model import ( + ActiveStatus, + BlockedStatus, + MaintenanceStatus, + WaitingStatus, + ModelError, +) +import os +import subprocess +from .proxy_cluster import ProxyCluster + +import logging + + +logger = logging.getLogger(__name__) + +class SSHKeysInitialized(EventBase): + def __init__(self, handle, ssh_public_key, ssh_private_key): + super().__init__(handle) + self.ssh_public_key = ssh_public_key + self.ssh_private_key = ssh_private_key + + def snapshot(self): + return { + "ssh_public_key": self.ssh_public_key, + "ssh_private_key": self.ssh_private_key, + } + + def restore(self, snapshot): + self.ssh_public_key = snapshot["ssh_public_key"] + self.ssh_private_key = snapshot["ssh_private_key"] + + +class ProxyClusterEvents(CharmEvents): + ssh_keys_initialized = EventSource(SSHKeysInitialized) + + +class SSHProxyCharm(CharmBase): + + state = StoredState() + on = ProxyClusterEvents() + + def __init__(self, framework, key): + super().__init__(framework, key) + + self.peers = ProxyCluster(self, "proxypeer") + + # SSH Proxy actions (primitives) + self.framework.observe(self.on.generate_ssh_key_action, self.on_generate_ssh_key_action) + self.framework.observe(self.on.get_ssh_public_key_action, self.on_get_ssh_public_key_action) + self.framework.observe(self.on.run_action, self.on_run_action) + self.framework.observe(self.on.verify_ssh_credentials_action, self.on_verify_ssh_credentials_action) + + self.framework.observe(self.on.proxypeer_relation_changed, self.on_proxypeer_relation_changed) + + def get_ssh_proxy(self): + """Get the SSHProxy instance""" + proxy = SSHProxy( + hostname=self.model.config["ssh-hostname"], + username=self.model.config["ssh-username"], + password=self.model.config["ssh-password"], + ) + return proxy + + def on_proxypeer_relation_changed(self, event): + if self.peers.is_cluster_initialized and not SSHProxy.has_ssh_key(): + pubkey = self.peers.ssh_public_key + privkey = self.peers.ssh_private_key + SSHProxy.write_ssh_keys(public=pubkey, private=privkey) + self.verify_credentials() + else: + event.defer() + + def on_config_changed(self, event): + """Handle changes in configuration""" + self.verify_credentials() + + def on_install(self, event): + SSHProxy.install() + + def on_start(self, event): + """Called when the charm is being installed""" + if not self.peers.is_joined: + event.defer() + return + + unit = self.model.unit + + if not SSHProxy.has_ssh_key(): + unit.status = MaintenanceStatus("Generating SSH keys...") + pubkey = None + privkey = None + if self.model.unit.is_leader(): + if self.peers.is_cluster_initialized: + SSHProxy.write_ssh_keys( + public=self.peers.ssh_public_key, + private=self.peers.ssh_private_key, + ) + else: + SSHProxy.generate_ssh_key() + self.on.ssh_keys_initialized.emit( + SSHProxy.get_ssh_public_key(), SSHProxy.get_ssh_private_key() + ) + self.verify_credentials() + + def verify_credentials(self): + unit = self.model.unit + + # Unit should go into a waiting state until verify_ssh_credentials is successful + unit.status = WaitingStatus("Waiting for SSH credentials") + proxy = self.get_ssh_proxy() + verified, _ = proxy.verify_credentials() + if verified: + unit.status = ActiveStatus() + else: + unit.status = BlockedStatus("Invalid SSH credentials.") + return verified + + ##################### + # SSH Proxy methods # + ##################### + def on_generate_ssh_key_action(self, event): + """Generate a new SSH keypair for this unit.""" + if self.model.unit.is_leader(): + if not SSHProxy.generate_ssh_key(): + event.fail("Unable to generate ssh key") + else: + event.fail("Unit is not leader") + return + + def on_get_ssh_public_key_action(self, event): + """Get the SSH public key for this unit.""" + if self.model.unit.is_leader(): + pubkey = SSHProxy.get_ssh_public_key() + event.set_results({"pubkey": SSHProxy.get_ssh_public_key()}) + else: + event.fail("Unit is not leader") + return + + def on_run_action(self, event): + """Run an arbitrary command on the remote host.""" + if self.model.unit.is_leader(): + cmd = event.params["command"] + proxy = self.get_ssh_proxy() + stdout, stderr = proxy.run(cmd) + event.set_results({"output": stdout}) + if len(stderr): + event.fail(stderr) + else: + event.fail("Unit is not leader") + return + + def on_verify_ssh_credentials_action(self, event): + """Verify the SSH credentials for this unit.""" + unit = self.model.unit + if unit.is_leader(): + proxy = self.get_ssh_proxy() + verified, stderr = proxy.verify_credentials() + if verified: + event.set_results({"verified": True}) + unit.status = ActiveStatus() + else: + event.set_results({"verified": False, "stderr": stderr}) + event.fail("Not verified") + unit.status = BlockedStatus("Invalid SSH credentials.") + + else: + event.fail("Unit is not leader") + return + + +class LeadershipError(ModelError): + def __init__(self): + super().__init__("not leader") + +class SSHProxy: + private_key_path = "/root/.ssh/id_sshproxy" + public_key_path = "/root/.ssh/id_sshproxy.pub" + key_type = "rsa" + key_bits = 4096 + + def __init__(self, hostname: str, username: str, password: str = ""): + self.hostname = hostname + self.username = username + self.password = password + + @staticmethod + def install(): + check_call("apt update && apt install -y openssh-client sshpass", shell=True) + + @staticmethod + def generate_ssh_key(): + """Generate a 4096-bit rsa keypair.""" + if not os.path.exists(SSHProxy.private_key_path): + cmd = "ssh-keygen -t {} -b {} -N '' -f {}".format( + SSHProxy.key_type, SSHProxy.key_bits, SSHProxy.private_key_path, + ) + + try: + check_call(cmd, shell=True) + except CalledProcessError: + return False + + return True + + @staticmethod + def write_ssh_keys(public, private): + """Write a 4096-bit rsa keypair.""" + with open(SSHProxy.public_key_path, "w") as f: + f.write(public) + f.close() + with open(SSHProxy.private_key_path, "w") as f: + f.write(private) + f.close() + + @staticmethod + def get_ssh_public_key(): + publickey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.public_key_path, "r") as f: + publickey = f.read() + return publickey + + @staticmethod + def get_ssh_private_key(): + privatekey = "" + if os.path.exists(SSHProxy.private_key_path): + with open(SSHProxy.private_key_path, "r") as f: + privatekey = f.read() + return privatekey + + @staticmethod + def has_ssh_key(): + return True if os.path.exists(SSHProxy.private_key_path) else False + + def run(self, cmd: str) -> (str, str): + """Run a command remotely via SSH. + + Note: The previous behavior was to run the command locally if SSH wasn't + configured, but that can lead to cases where execution succeeds when you'd + expect it not to. + """ + if isinstance(cmd, str): + cmd = shlex.split(cmd) + + host = self._get_hostname() + user = self.username + passwd = self.password + key = self.private_key_path + + # Make sure we have everything we need to connect + if host and user: + return self.ssh(cmd) + + raise Exception("Invalid SSH credentials.") + + def scp(self, source_file, destination_file): + """Execute an scp command. Requires a fully qualified source and + destination. + + :param str source_file: Path to the source file + :param str destination_file: Path to the destination file + :raises: :class:`CalledProcessError` if the command fails + """ + cmd = [ + "sshpass", + "-p", + self.password, + "scp", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + "-B", + ] + destination = "{}@{}:{}".format(self.username, self.hostname, destination_file) + cmd.extend([source_file, destination]) + subprocess.run(cmd, check=True) + + def ssh(self, command): + """Run a command remotely via SSH. + + :param list(str) command: The command to execute + :return: tuple: The stdout and stderr of the command execution + :raises: :class:`CalledProcessError` if the command fails + """ + + destination = "{}@{}".format(self.username, self.hostname) + cmd = [ + "sshpass", + "-p", + self.password, + "ssh", + "-i", + os.path.expanduser(self.private_key_path), + "-o", + "StrictHostKeyChecking=no", + "-q", + destination, + ] + cmd.extend(command) + output = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return (output.stdout.decode("utf-8").strip(), output.stderr.decode("utf-8").strip()) + + def verify_credentials(self): + """Verify the SSH credentials. + + :return (bool, str): Verified, Stderr + """ + verified = False + try: + (stdout, stderr) = self.run("hostname") + verified = True + except CalledProcessError as e: + stderr = "Command failed: {} ({})".format(" ".join(e.cmd), str(e.output)) + except (TimeoutError, socket.timeout): + stderr = "Timeout attempting to reach {}".format(self._get_hostname()) + except Exception as error: + tb = traceback.format_exc() + stderr = "Unhandled exception: {}".format(tb) + return verified, stderr + + ################### + # Private methods # + ################### + def _get_hostname(self): + """Get the hostname for the ssh target. + + HACK: This function was added to work around an issue where the + ssh-hostname was passed in the format of a.b.c.d;a.b.c.d, where the first + is the floating ip, and the second the non-floating ip, for an Openstack + instance. + """ + return self.hostname.split(";")[0] diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/__init__.py new file mode 100644 index 00000000..f17b2969 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Operator Framework.""" + +from .version import version as __version__ # noqa: F401 (imported but unused) + +# Import here the bare minimum to break the circular import between modules +from . import charm # noqa: F401 (imported but unused) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/charm.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/charm.py new file mode 100755 index 00000000..d898de85 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/charm.py @@ -0,0 +1,575 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum +import os +import pathlib +import typing + +import yaml + +from ops.framework import Object, EventSource, EventBase, Framework, ObjectEvents +from ops import model + + +def _loadYaml(source): + if yaml.__with_libyaml__: + return yaml.load(source, Loader=yaml.CSafeLoader) + return yaml.load(source, Loader=yaml.SafeLoader) + + +class HookEvent(EventBase): + """A base class for events that trigger because of a Juju hook firing.""" + + +class ActionEvent(EventBase): + """A base class for events that trigger when a user asks for an Action to be run. + + To read the parameters for the action, see the instance variable `params`. + To respond with the result of the action, call `set_results`. To add progress + messages that are visible as the action is progressing use `log`. + + :ivar params: The parameters passed to the action (read by action-get) + """ + + def defer(self): + """Action events are not deferable like other events. + + This is because an action runs synchronously and the user is waiting for the result. + """ + raise RuntimeError('cannot defer action events') + + def restore(self, snapshot: dict) -> None: + """Used by the operator framework to record the action. + + Not meant to be called directly by Charm code. + """ + env_action_name = os.environ.get('JUJU_ACTION_NAME') + event_action_name = self.handle.kind[:-len('_action')].replace('_', '-') + if event_action_name != env_action_name: + # This could only happen if the dev manually emits the action, or from a bug. + raise RuntimeError('action event kind does not match current action') + # Params are loaded at restore rather than __init__ because + # the model is not available in __init__. + self.params = self.framework.model._backend.action_get() + + def set_results(self, results: typing.Mapping) -> None: + """Report the result of the action. + + Args: + results: The result of the action as a Dict + """ + self.framework.model._backend.action_set(results) + + def log(self, message: str) -> None: + """Send a message that a user will see while the action is running. + + Args: + message: The message for the user. + """ + self.framework.model._backend.action_log(message) + + def fail(self, message: str = '') -> None: + """Report that this action has failed. + + Args: + message: Optional message to record why it has failed. + """ + self.framework.model._backend.action_fail(message) + + +class InstallEvent(HookEvent): + """Represents the `install` hook from Juju.""" + + +class StartEvent(HookEvent): + """Represents the `start` hook from Juju.""" + + +class StopEvent(HookEvent): + """Represents the `stop` hook from Juju.""" + + +class RemoveEvent(HookEvent): + """Represents the `remove` hook from Juju. """ + + +class ConfigChangedEvent(HookEvent): + """Represents the `config-changed` hook from Juju.""" + + +class UpdateStatusEvent(HookEvent): + """Represents the `update-status` hook from Juju.""" + + +class UpgradeCharmEvent(HookEvent): + """Represents the `upgrade-charm` hook from Juju. + + This will be triggered when a user has run `juju upgrade-charm`. It is run after Juju + has unpacked the upgraded charm code, and so this event will be handled with new code. + """ + + +class PreSeriesUpgradeEvent(HookEvent): + """Represents the `pre-series-upgrade` hook from Juju. + + This happens when a user has run `juju upgrade-series MACHINE prepare` and + will fire for each unit that is running on the machine, telling them that + the user is preparing to upgrade the Machine's series (eg trusty->bionic). + The charm should take actions to prepare for the upgrade (a database charm + would want to write out a version-independent dump of the database, so that + when a new version of the database is available in a new series, it can be + used.) + Once all units on a machine have run `pre-series-upgrade`, the user will + initiate the steps to actually upgrade the machine (eg `do-release-upgrade`). + When the upgrade has been completed, the :class:`PostSeriesUpgradeEvent` will fire. + """ + + +class PostSeriesUpgradeEvent(HookEvent): + """Represents the `post-series-upgrade` hook from Juju. + + This is run after the user has done a distribution upgrade (or rolled back + and kept the same series). It is called in response to + `juju upgrade-series MACHINE complete`. Charms are expected to do whatever + steps are necessary to reconfigure their applications for the new series. + """ + + +class LeaderElectedEvent(HookEvent): + """Represents the `leader-elected` hook from Juju. + + Juju will trigger this when a new lead unit is chosen for a given application. + This represents the leader of the charm information (not necessarily the primary + of a running application). The main utility is that charm authors can know + that only one unit will be a leader at any given time, so they can do + configuration, etc, that would otherwise require coordination between units. + (eg, selecting a password for a new relation) + """ + + +class LeaderSettingsChangedEvent(HookEvent): + """Represents the `leader-settings-changed` hook from Juju. + + Deprecated. This represents when a lead unit would call `leader-set` to inform + the other units of an application that they have new information to handle. + This has been deprecated in favor of using a Peer relation, and having the + leader set a value in the Application data bag for that peer relation. + (see :class:`RelationChangedEvent`). + """ + + +class CollectMetricsEvent(HookEvent): + """Represents the `collect-metrics` hook from Juju. + + Note that events firing during a CollectMetricsEvent are currently + sandboxed in how they can interact with Juju. To report metrics + use :meth:`.add_metrics`. + """ + + def add_metrics(self, metrics: typing.Mapping, labels: typing.Mapping = None) -> None: + """Record metrics that have been gathered by the charm for this unit. + + Args: + metrics: A collection of {key: float} pairs that contains the + metrics that have been gathered + labels: {key:value} strings that can be applied to the + metrics that are being gathered + """ + self.framework.model._backend.add_metrics(metrics, labels) + + +class RelationEvent(HookEvent): + """A base class representing the various relation lifecycle events. + + Charmers should not be creating RelationEvents directly. The events will be + generated by the framework from Juju related events. Users can observe them + from the various `CharmBase.on[relation_name].relation_*` events. + + Attributes: + relation: The Relation involved in this event + app: The remote application that has triggered this event + unit: The remote unit that has triggered this event. This may be None + if the relation event was triggered as an Application level event + """ + + def __init__(self, handle, relation, app=None, unit=None): + super().__init__(handle) + + if unit is not None and unit.app != app: + raise RuntimeError( + 'cannot create RelationEvent with application {} and unit {}'.format(app, unit)) + + self.relation = relation + self.app = app + self.unit = unit + + def snapshot(self) -> dict: + """Used by the framework to serialize the event to disk. + + Not meant to be called by Charm code. + """ + snapshot = { + 'relation_name': self.relation.name, + 'relation_id': self.relation.id, + } + if self.app: + snapshot['app_name'] = self.app.name + if self.unit: + snapshot['unit_name'] = self.unit.name + return snapshot + + def restore(self, snapshot: dict) -> None: + """Used by the framework to deserialize the event from disk. + + Not meant to be called by Charm code. + """ + self.relation = self.framework.model.get_relation( + snapshot['relation_name'], snapshot['relation_id']) + + app_name = snapshot.get('app_name') + if app_name: + self.app = self.framework.model.get_app(app_name) + else: + self.app = None + + unit_name = snapshot.get('unit_name') + if unit_name: + self.unit = self.framework.model.get_unit(unit_name) + else: + self.unit = None + + +class RelationCreatedEvent(RelationEvent): + """Represents the `relation-created` hook from Juju. + + This is triggered when a new relation to another app is added in Juju. This + can occur before units for those applications have started. All existing + relations should be established before start. + """ + + +class RelationJoinedEvent(RelationEvent): + """Represents the `relation-joined` hook from Juju. + + This is triggered whenever a new unit of a related application joins the relation. + (eg, a unit was added to an existing related app, or a new relation was established + with an application that already had units.) + """ + + +class RelationChangedEvent(RelationEvent): + """Represents the `relation-changed` hook from Juju. + + This is triggered whenever there is a change to the data bucket for a related + application or unit. Look at `event.relation.data[event.unit/app]` to see the + new information. + """ + + +class RelationDepartedEvent(RelationEvent): + """Represents the `relation-departed` hook from Juju. + + This is the inverse of the RelationJoinedEvent, representing when a unit + is leaving the relation (the unit is being removed, the app is being removed, + the relation is being removed). It is fired once for each unit that is + going away. + """ + + +class RelationBrokenEvent(RelationEvent): + """Represents the `relation-broken` hook from Juju. + + If a relation is being removed (`juju remove-relation` or `juju remove-application`), + once all the units have been removed, RelationBrokenEvent will fire to signal + that the relationship has been fully terminated. + """ + + +class StorageEvent(HookEvent): + """Base class representing Storage related events.""" + + +class StorageAttachedEvent(StorageEvent): + """Represents the `storage-attached` hook from Juju. + + Called when new storage is available for the charm to use. + """ + + +class StorageDetachingEvent(StorageEvent): + """Represents the `storage-detaching` hook from Juju. + + Called when storage a charm has been using is going away. + """ + + +class CharmEvents(ObjectEvents): + """The events that are generated by Juju in response to the lifecycle of an application.""" + + install = EventSource(InstallEvent) + start = EventSource(StartEvent) + stop = EventSource(StopEvent) + remove = EventSource(RemoveEvent) + update_status = EventSource(UpdateStatusEvent) + config_changed = EventSource(ConfigChangedEvent) + upgrade_charm = EventSource(UpgradeCharmEvent) + pre_series_upgrade = EventSource(PreSeriesUpgradeEvent) + post_series_upgrade = EventSource(PostSeriesUpgradeEvent) + leader_elected = EventSource(LeaderElectedEvent) + leader_settings_changed = EventSource(LeaderSettingsChangedEvent) + collect_metrics = EventSource(CollectMetricsEvent) + + +class CharmBase(Object): + """Base class that represents the Charm overall. + + Usually this initialization is done by ops.main.main() rather than Charm authors + directly instantiating a Charm. + + Args: + framework: The framework responsible for managing the Model and events for this + Charm. + key: Ignored; will remove after deprecation period of the signature change. + """ + + on = CharmEvents() + + def __init__(self, framework: Framework, key: typing.Optional = None): + super().__init__(framework, None) + + for relation_name in self.framework.meta.relations: + relation_name = relation_name.replace('-', '_') + self.on.define_event(relation_name + '_relation_created', RelationCreatedEvent) + self.on.define_event(relation_name + '_relation_joined', RelationJoinedEvent) + self.on.define_event(relation_name + '_relation_changed', RelationChangedEvent) + self.on.define_event(relation_name + '_relation_departed', RelationDepartedEvent) + self.on.define_event(relation_name + '_relation_broken', RelationBrokenEvent) + + for storage_name in self.framework.meta.storages: + storage_name = storage_name.replace('-', '_') + self.on.define_event(storage_name + '_storage_attached', StorageAttachedEvent) + self.on.define_event(storage_name + '_storage_detaching', StorageDetachingEvent) + + for action_name in self.framework.meta.actions: + action_name = action_name.replace('-', '_') + self.on.define_event(action_name + '_action', ActionEvent) + + @property + def app(self) -> model.Application: + """Application that this unit is part of.""" + return self.framework.model.app + + @property + def unit(self) -> model.Unit: + """Unit that this execution is responsible for.""" + return self.framework.model.unit + + @property + def meta(self) -> 'CharmMeta': + """CharmMeta of this charm. + """ + return self.framework.meta + + @property + def charm_dir(self) -> pathlib.Path: + """Root directory of the Charm as it is running. + """ + return self.framework.charm_dir + + +class CharmMeta: + """Object containing the metadata for the charm. + + This is read from metadata.yaml and/or actions.yaml. Generally charms will + define this information, rather than reading it at runtime. This class is + mostly for the framework to understand what the charm has defined. + + The maintainers, tags, terms, series, and extra_bindings attributes are all + lists of strings. The requires, provides, peers, relations, storage, + resources, and payloads attributes are all mappings of names to instances + of the respective RelationMeta, StorageMeta, ResourceMeta, or PayloadMeta. + + The relations attribute is a convenience accessor which includes all of the + requires, provides, and peers RelationMeta items. If needed, the role of + the relation definition can be obtained from its role attribute. + + Attributes: + name: The name of this charm + summary: Short description of what this charm does + description: Long description for this charm + maintainers: A list of strings of the email addresses of the maintainers + of this charm. + tags: Charm store tag metadata for categories associated with this charm. + terms: Charm store terms that should be agreed to before this charm can + be deployed. (Used for things like licensing issues.) + series: The list of supported OS series that this charm can support. + The first entry in the list is the default series that will be + used by deploy if no other series is requested by the user. + subordinate: True/False whether this charm is intended to be used as a + subordinate charm. + min_juju_version: If supplied, indicates this charm needs features that + are not available in older versions of Juju. + requires: A dict of {name: :class:`RelationMeta` } for each 'requires' relation. + provides: A dict of {name: :class:`RelationMeta` } for each 'provides' relation. + peers: A dict of {name: :class:`RelationMeta` } for each 'peer' relation. + relations: A dict containing all :class:`RelationMeta` attributes (merged from other + sections) + storages: A dict of {name: :class:`StorageMeta`} for each defined storage. + resources: A dict of {name: :class:`ResourceMeta`} for each defined resource. + payloads: A dict of {name: :class:`PayloadMeta`} for each defined payload. + extra_bindings: A dict of additional named bindings that a charm can use + for network configuration. + actions: A dict of {name: :class:`ActionMeta`} for actions that the charm has defined. + Args: + raw: a mapping containing the contents of metadata.yaml + actions_raw: a mapping containing the contents of actions.yaml + """ + + def __init__(self, raw: dict = {}, actions_raw: dict = {}): + self.name = raw.get('name', '') + self.summary = raw.get('summary', '') + self.description = raw.get('description', '') + self.maintainers = [] + if 'maintainer' in raw: + self.maintainers.append(raw['maintainer']) + if 'maintainers' in raw: + self.maintainers.extend(raw['maintainers']) + self.tags = raw.get('tags', []) + self.terms = raw.get('terms', []) + self.series = raw.get('series', []) + self.subordinate = raw.get('subordinate', False) + self.min_juju_version = raw.get('min-juju-version') + self.requires = {name: RelationMeta(RelationRole.requires, name, rel) + for name, rel in raw.get('requires', {}).items()} + self.provides = {name: RelationMeta(RelationRole.provides, name, rel) + for name, rel in raw.get('provides', {}).items()} + self.peers = {name: RelationMeta(RelationRole.peer, name, rel) + for name, rel in raw.get('peers', {}).items()} + self.relations = {} + self.relations.update(self.requires) + self.relations.update(self.provides) + self.relations.update(self.peers) + self.storages = {name: StorageMeta(name, storage) + for name, storage in raw.get('storage', {}).items()} + self.resources = {name: ResourceMeta(name, res) + for name, res in raw.get('resources', {}).items()} + self.payloads = {name: PayloadMeta(name, payload) + for name, payload in raw.get('payloads', {}).items()} + self.extra_bindings = raw.get('extra-bindings', {}) + self.actions = {name: ActionMeta(name, action) for name, action in actions_raw.items()} + + @classmethod + def from_yaml( + cls, metadata: typing.Union[str, typing.TextIO], + actions: typing.Optional[typing.Union[str, typing.TextIO]] = None): + """Instantiate a CharmMeta from a YAML description of metadata.yaml. + + Args: + metadata: A YAML description of charm metadata (name, relations, etc.) + This can be a simple string, or a file-like object. (passed to `yaml.safe_load`). + actions: YAML description of Actions for this charm (eg actions.yaml) + """ + meta = _loadYaml(metadata) + raw_actions = {} + if actions is not None: + raw_actions = _loadYaml(actions) + return cls(meta, raw_actions) + + +class RelationRole(enum.Enum): + peer = 'peer' + requires = 'requires' + provides = 'provides' + + def is_peer(self) -> bool: + """Return whether the current role is peer. + + A convenience to avoid having to import charm. + """ + return self is RelationRole.peer + + +class RelationMeta: + """Object containing metadata about a relation definition. + + Should not be constructed directly by Charm code. Is gotten from one of + :attr:`CharmMeta.peers`, :attr:`CharmMeta.requires`, :attr:`CharmMeta.provides`, + or :attr:`CharmMeta.relations`. + + Attributes: + role: This is one of peer/requires/provides + relation_name: Name of this relation from metadata.yaml + interface_name: Optional definition of the interface protocol. + scope: "global" or "container" scope based on how the relation should be used. + """ + + def __init__(self, role: RelationRole, relation_name: str, raw: dict): + if not isinstance(role, RelationRole): + raise TypeError("role should be a Role, not {!r}".format(role)) + self.role = role + self.relation_name = relation_name + self.interface_name = raw['interface'] + self.scope = raw.get('scope') + + +class StorageMeta: + """Object containing metadata about a storage definition.""" + + def __init__(self, name, raw): + self.storage_name = name + self.type = raw['type'] + self.description = raw.get('description', '') + self.shared = raw.get('shared', False) + self.read_only = raw.get('read-only', False) + self.minimum_size = raw.get('minimum-size') + self.location = raw.get('location') + self.multiple_range = None + if 'multiple' in raw: + range = raw['multiple']['range'] + if '-' not in range: + self.multiple_range = (int(range), int(range)) + else: + range = range.split('-') + self.multiple_range = (int(range[0]), int(range[1]) if range[1] else None) + + +class ResourceMeta: + """Object containing metadata about a resource definition.""" + + def __init__(self, name, raw): + self.resource_name = name + self.type = raw['type'] + self.filename = raw.get('filename', None) + self.description = raw.get('description', '') + + +class PayloadMeta: + """Object containing metadata about a payload definition.""" + + def __init__(self, name, raw): + self.payload_name = name + self.type = raw['type'] + + +class ActionMeta: + """Object containing metadata about an action's definition.""" + + def __init__(self, name, raw=None): + raw = raw or {} + self.name = name + self.title = raw.get('title', '') + self.description = raw.get('description', '') + self.parameters = raw.get('params', {}) # {<parameter name>: <JSON Schema definition>} + self.required = raw.get('required', []) # [<parameter name>, ...] diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/framework.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/framework.py new file mode 100755 index 00000000..b7c4749f --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/framework.py @@ -0,0 +1,1067 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import collections.abc +import inspect +import keyword +import logging +import marshal +import os +import pathlib +import pdb +import re +import sys +import types +import weakref + +from ops import charm +from ops.storage import ( + NoSnapshotError, + SQLiteStorage, +) + +logger = logging.getLogger(__name__) + + +class Handle: + """Handle defines a name for an object in the form of a hierarchical path. + + The provided parent is the object (or that object's handle) that this handle + sits under, or None if the object identified by this handle stands by itself + as the root of its own hierarchy. + + The handle kind is a string that defines a namespace so objects with the + same parent and kind will have unique keys. + + The handle key is a string uniquely identifying the object. No other objects + under the same parent and kind may have the same key. + """ + + def __init__(self, parent, kind, key): + if parent and not isinstance(parent, Handle): + parent = parent.handle + self._parent = parent + self._kind = kind + self._key = key + if parent: + if key: + self._path = "{}/{}[{}]".format(parent, kind, key) + else: + self._path = "{}/{}".format(parent, kind) + else: + if key: + self._path = "{}[{}]".format(kind, key) + else: + self._path = "{}".format(kind) + + def nest(self, kind, key): + return Handle(self, kind, key) + + def __hash__(self): + return hash((self.parent, self.kind, self.key)) + + def __eq__(self, other): + return (self.parent, self.kind, self.key) == (other.parent, other.kind, other.key) + + def __str__(self): + return self.path + + @property + def parent(self): + return self._parent + + @property + def kind(self): + return self._kind + + @property + def key(self): + return self._key + + @property + def path(self): + return self._path + + @classmethod + def from_path(cls, path): + handle = None + for pair in path.split("/"): + pair = pair.split("[") + good = False + if len(pair) == 1: + kind, key = pair[0], None + good = True + elif len(pair) == 2: + kind, key = pair + if key and key[-1] == ']': + key = key[:-1] + good = True + if not good: + raise RuntimeError("attempted to restore invalid handle path {}".format(path)) + handle = Handle(handle, kind, key) + return handle + + +class EventBase: + + def __init__(self, handle): + self.handle = handle + self.deferred = False + + def defer(self): + self.deferred = True + + def snapshot(self): + """Return the snapshot data that should be persisted. + + Subclasses must override to save any custom state. + """ + return None + + def restore(self, snapshot): + """Restore the value state from the given snapshot. + + Subclasses must override to restore their custom state. + """ + self.deferred = False + + +class EventSource: + """EventSource wraps an event type with a descriptor to facilitate observing and emitting. + + It is generally used as: + + class SomethingHappened(EventBase): + pass + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + With that, instances of that type will offer the someobj.something_happened + attribute which is a BoundEvent and may be used to emit and observe the event. + """ + + def __init__(self, event_type): + if not isinstance(event_type, type) or not issubclass(event_type, EventBase): + raise RuntimeError( + 'Event requires a subclass of EventBase as an argument, got {}'.format(event_type)) + self.event_type = event_type + self.event_kind = None + self.emitter_type = None + + def _set_name(self, emitter_type, event_kind): + if self.event_kind is not None: + raise RuntimeError( + 'EventSource({}) reused as {}.{} and {}.{}'.format( + self.event_type.__name__, + self.emitter_type.__name__, + self.event_kind, + emitter_type.__name__, + event_kind, + )) + self.event_kind = event_kind + self.emitter_type = emitter_type + + def __get__(self, emitter, emitter_type=None): + if emitter is None: + return self + # Framework might not be available if accessed as CharmClass.on.event + # rather than charm_instance.on.event, but in that case it couldn't be + # emitted anyway, so there's no point to registering it. + framework = getattr(emitter, 'framework', None) + if framework is not None: + framework.register_type(self.event_type, emitter, self.event_kind) + return BoundEvent(emitter, self.event_type, self.event_kind) + + +class BoundEvent: + + def __repr__(self): + return '<BoundEvent {} bound to {}.{} at {}>'.format( + self.event_type.__name__, + type(self.emitter).__name__, + self.event_kind, + hex(id(self)), + ) + + def __init__(self, emitter, event_type, event_kind): + self.emitter = emitter + self.event_type = event_type + self.event_kind = event_kind + + def emit(self, *args, **kwargs): + """Emit event to all registered observers. + + The current storage state is committed before and after each observer is notified. + """ + framework = self.emitter.framework + key = framework._next_event_key() + event = self.event_type(Handle(self.emitter, self.event_kind, key), *args, **kwargs) + framework._emit(event) + + +class HandleKind: + """Helper descriptor to define the Object.handle_kind field. + + The handle_kind for an object defaults to its type name, but it may + be explicitly overridden if desired. + """ + + def __get__(self, obj, obj_type): + kind = obj_type.__dict__.get("handle_kind") + if kind: + return kind + return obj_type.__name__ + + +class _Metaclass(type): + """Helper class to ensure proper instantiation of Object-derived classes. + + This class currently has a single purpose: events derived from EventSource + that are class attributes of Object-derived classes need to be told what + their name is in that class. For example, in + + class SomeObject(Object): + something_happened = EventSource(SomethingHappened) + + the instance of EventSource needs to know it's called 'something_happened'. + + Starting from python 3.6 we could use __set_name__ on EventSource for this, + but until then this (meta)class does the equivalent work. + + TODO: when we drop support for 3.5 drop this class, and rename _set_name in + EventSource to __set_name__; everything should continue to work. + + """ + + def __new__(typ, *a, **kw): + k = super().__new__(typ, *a, **kw) + # k is now the Object-derived class; loop over its class attributes + for n, v in vars(k).items(): + # we could do duck typing here if we want to support + # non-EventSource-derived shenanigans. We don't. + if isinstance(v, EventSource): + # this is what 3.6+ does automatically for us: + v._set_name(k, n) + return k + + +class Object(metaclass=_Metaclass): + + handle_kind = HandleKind() + + def __init__(self, parent, key): + kind = self.handle_kind + if isinstance(parent, Framework): + self.framework = parent + # Avoid Framework instances having a circular reference to themselves. + if self.framework is self: + self.framework = weakref.proxy(self.framework) + self.handle = Handle(None, kind, key) + else: + self.framework = parent.framework + self.handle = Handle(parent, kind, key) + self.framework._track(self) + + # TODO Detect conflicting handles here. + + @property + def model(self): + return self.framework.model + + +class ObjectEvents(Object): + """Convenience type to allow defining .on attributes at class level.""" + + handle_kind = "on" + + def __init__(self, parent=None, key=None): + if parent is not None: + super().__init__(parent, key) + else: + self._cache = weakref.WeakKeyDictionary() + + def __get__(self, emitter, emitter_type): + if emitter is None: + return self + instance = self._cache.get(emitter) + if instance is None: + # Same type, different instance, more data. Doing this unusual construct + # means people can subclass just this one class to have their own 'on'. + instance = self._cache[emitter] = type(self)(emitter) + return instance + + @classmethod + def define_event(cls, event_kind, event_type): + """Define an event on this type at runtime. + + cls: a type to define an event on. + + event_kind: an attribute name that will be used to access the + event. Must be a valid python identifier, not be a keyword + or an existing attribute. + + event_type: a type of the event to define. + + """ + prefix = 'unable to define an event with event_kind that ' + if not event_kind.isidentifier(): + raise RuntimeError(prefix + 'is not a valid python identifier: ' + event_kind) + elif keyword.iskeyword(event_kind): + raise RuntimeError(prefix + 'is a python keyword: ' + event_kind) + try: + getattr(cls, event_kind) + raise RuntimeError( + prefix + 'overlaps with an existing type {} attribute: {}'.format(cls, event_kind)) + except AttributeError: + pass + + event_descriptor = EventSource(event_type) + event_descriptor._set_name(cls, event_kind) + setattr(cls, event_kind, event_descriptor) + + def events(self): + """Return a mapping of event_kinds to bound_events for all available events. + """ + events_map = {} + # We have to iterate over the class rather than instance to allow for properties which + # might call this method (e.g., event views), leading to infinite recursion. + for attr_name, attr_value in inspect.getmembers(type(self)): + if isinstance(attr_value, EventSource): + # We actually care about the bound_event, however, since it + # provides the most info for users of this method. + event_kind = attr_name + bound_event = getattr(self, event_kind) + events_map[event_kind] = bound_event + return events_map + + def __getitem__(self, key): + return PrefixedEvents(self, key) + + +class PrefixedEvents: + + def __init__(self, emitter, key): + self._emitter = emitter + self._prefix = key.replace("-", "_") + '_' + + def __getattr__(self, name): + return getattr(self._emitter, self._prefix + name) + + +class PreCommitEvent(EventBase): + pass + + +class CommitEvent(EventBase): + pass + + +class FrameworkEvents(ObjectEvents): + pre_commit = EventSource(PreCommitEvent) + commit = EventSource(CommitEvent) + + +class NoTypeError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return "cannot restore {} since no class was registered for it".format(self.handle_path) + + +# the message to show to the user when a pdb breakpoint goes active +_BREAKPOINT_WELCOME_MESSAGE = """ +Starting pdb to debug charm operator. +Run `h` for help, `c` to continue, or `exit`/CTRL-d to abort. +Future breakpoints may interrupt execution again. +More details at https://discourse.jujucharms.com/t/debugging-charm-hooks + +""" + + +_event_regex = r'^(|.*/)on/[a-zA-Z_]+\[\d+\]$' + + +class Framework(Object): + + on = FrameworkEvents() + + # Override properties from Object so that we can set them in __init__. + model = None + meta = None + charm_dir = None + + def __init__(self, storage, charm_dir, meta, model): + + super().__init__(self, None) + + self.charm_dir = charm_dir + self.meta = meta + self.model = model + self._observers = [] # [(observer_path, method_name, parent_path, event_key)] + self._observer = weakref.WeakValueDictionary() # {observer_path: observer} + self._objects = weakref.WeakValueDictionary() + self._type_registry = {} # {(parent_path, kind): cls} + self._type_known = set() # {cls} + + if isinstance(storage, (str, pathlib.Path)): + logger.warning( + "deprecated: Framework now takes a Storage not a path") + storage = SQLiteStorage(storage) + self._storage = storage + + # We can't use the higher-level StoredState because it relies on events. + self.register_type(StoredStateData, None, StoredStateData.handle_kind) + stored_handle = Handle(None, StoredStateData.handle_kind, '_stored') + try: + self._stored = self.load_snapshot(stored_handle) + except NoSnapshotError: + self._stored = StoredStateData(self, '_stored') + self._stored['event_count'] = 0 + + # Hook into builtin breakpoint, so if Python >= 3.7, devs will be able to just do + # breakpoint(); if Python < 3.7, this doesn't affect anything + sys.breakpointhook = self.breakpoint + + # Flag to indicate that we already presented the welcome message in a debugger breakpoint + self._breakpoint_welcomed = False + + # Parse once the env var, which may be used multiple times later + debug_at = os.environ.get('JUJU_DEBUG_AT') + self._juju_debug_at = debug_at.split(',') if debug_at else () + + def close(self): + self._storage.close() + + def _track(self, obj): + """Track object and ensure it is the only object created using its handle path.""" + if obj is self: + # Framework objects don't track themselves + return + if obj.handle.path in self.framework._objects: + raise RuntimeError( + 'two objects claiming to be {} have been created'.format(obj.handle.path)) + self._objects[obj.handle.path] = obj + + def _forget(self, obj): + """Stop tracking the given object. See also _track.""" + self._objects.pop(obj.handle.path, None) + + def commit(self): + # Give a chance for objects to persist data they want to before a commit is made. + self.on.pre_commit.emit() + # Make sure snapshots are saved by instances of StoredStateData. Any possible state + # modifications in on_commit handlers of instances of other classes will not be persisted. + self.on.commit.emit() + # Save our event count after all events have been emitted. + self.save_snapshot(self._stored) + self._storage.commit() + + def register_type(self, cls, parent, kind=None): + if parent and not isinstance(parent, Handle): + parent = parent.handle + if parent: + parent_path = parent.path + else: + parent_path = None + if not kind: + kind = cls.handle_kind + self._type_registry[(parent_path, kind)] = cls + self._type_known.add(cls) + + def save_snapshot(self, value): + """Save a persistent snapshot of the provided value. + + The provided value must implement the following interface: + + value.handle = Handle(...) + value.snapshot() => {...} # Simple builtin types only. + value.restore(snapshot) # Restore custom state from prior snapshot. + """ + if type(value) not in self._type_known: + raise RuntimeError( + 'cannot save {} values before registering that type'.format(type(value).__name__)) + data = value.snapshot() + + # Use marshal as a validator, enforcing the use of simple types, as we later the + # information is really pickled, which is too error prone for future evolution of the + # stored data (e.g. if the developer stores a custom object and later changes its + # class name; when unpickling the original class will not be there and event + # data loading will fail). + try: + marshal.dumps(data) + except ValueError: + msg = "unable to save the data for {}, it must contain only simple types: {!r}" + raise ValueError(msg.format(value.__class__.__name__, data)) + + self._storage.save_snapshot(value.handle.path, data) + + def load_snapshot(self, handle): + parent_path = None + if handle.parent: + parent_path = handle.parent.path + cls = self._type_registry.get((parent_path, handle.kind)) + if not cls: + raise NoTypeError(handle.path) + data = self._storage.load_snapshot(handle.path) + obj = cls.__new__(cls) + obj.framework = self + obj.handle = handle + obj.restore(data) + self._track(obj) + return obj + + def drop_snapshot(self, handle): + self._storage.drop_snapshot(handle.path) + + def observe(self, bound_event: BoundEvent, observer: types.MethodType): + """Register observer to be called when bound_event is emitted. + + The bound_event is generally provided as an attribute of the object that emits + the event, and is created in this style: + + class SomeObject: + something_happened = Event(SomethingHappened) + + That event may be observed as: + + framework.observe(someobj.something_happened, self._on_something_happened) + + Raises: + RuntimeError: if bound_event or observer are the wrong type. + """ + if not isinstance(bound_event, BoundEvent): + raise RuntimeError( + 'Framework.observe requires a BoundEvent as second parameter, got {}'.format( + bound_event)) + if not isinstance(observer, types.MethodType): + # help users of older versions of the framework + if isinstance(observer, charm.CharmBase): + raise TypeError( + 'observer methods must now be explicitly provided;' + ' please replace observe(self.on.{0}, self)' + ' with e.g. observe(self.on.{0}, self._on_{0})'.format( + bound_event.event_kind)) + raise RuntimeError( + 'Framework.observe requires a method as third parameter, got {}'.format(observer)) + + event_type = bound_event.event_type + event_kind = bound_event.event_kind + emitter = bound_event.emitter + + self.register_type(event_type, emitter, event_kind) + + if hasattr(emitter, "handle"): + emitter_path = emitter.handle.path + else: + raise RuntimeError( + 'event emitter {} must have a "handle" attribute'.format(type(emitter).__name__)) + + # Validate that the method has an acceptable call signature. + sig = inspect.signature(observer) + # Self isn't included in the params list, so the first arg will be the event. + extra_params = list(sig.parameters.values())[1:] + + method_name = observer.__name__ + observer = observer.__self__ + if not sig.parameters: + raise TypeError( + '{}.{} must accept event parameter'.format(type(observer).__name__, method_name)) + elif any(param.default is inspect.Parameter.empty for param in extra_params): + # Allow for additional optional params, since there's no reason to exclude them, but + # required params will break. + raise TypeError( + '{}.{} has extra required parameter'.format(type(observer).__name__, method_name)) + + # TODO Prevent the exact same parameters from being registered more than once. + + self._observer[observer.handle.path] = observer + self._observers.append((observer.handle.path, method_name, emitter_path, event_kind)) + + def _next_event_key(self): + """Return the next event key that should be used, incrementing the internal counter.""" + # Increment the count first; this means the keys will start at 1, and 0 + # means no events have been emitted. + self._stored['event_count'] += 1 + return str(self._stored['event_count']) + + def _emit(self, event): + """See BoundEvent.emit for the public way to call this.""" + + saved = False + event_path = event.handle.path + event_kind = event.handle.kind + parent_path = event.handle.parent.path + # TODO Track observers by (parent_path, event_kind) rather than as a list of + # all observers. Avoiding linear search through all observers for every event + for observer_path, method_name, _parent_path, _event_kind in self._observers: + if _parent_path != parent_path: + continue + if _event_kind and _event_kind != event_kind: + continue + if not saved: + # Save the event for all known observers before the first notification + # takes place, so that either everyone interested sees it, or nobody does. + self.save_snapshot(event) + saved = True + # Again, only commit this after all notices are saved. + self._storage.save_notice(event_path, observer_path, method_name) + if saved: + self._reemit(event_path) + + def reemit(self): + """Reemit previously deferred events to the observers that deferred them. + + Only the specific observers that have previously deferred the event will be + notified again. Observers that asked to be notified about events after it's + been first emitted won't be notified, as that would mean potentially observing + events out of order. + """ + self._reemit() + + def _reemit(self, single_event_path=None): + last_event_path = None + deferred = True + for event_path, observer_path, method_name in self._storage.notices(single_event_path): + event_handle = Handle.from_path(event_path) + + if last_event_path != event_path: + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + last_event_path = event_path + deferred = False + + try: + event = self.load_snapshot(event_handle) + except NoTypeError: + self._storage.drop_notice(event_path, observer_path, method_name) + continue + + event.deferred = False + observer = self._observer.get(observer_path) + if observer: + custom_handler = getattr(observer, method_name, None) + if custom_handler: + event_is_from_juju = isinstance(event, charm.HookEvent) + event_is_action = isinstance(event, charm.ActionEvent) + if (event_is_from_juju or event_is_action) and 'hook' in self._juju_debug_at: + # Present the welcome message and run under PDB. + self._show_debug_code_message() + pdb.runcall(custom_handler, event) + else: + # Regular call to the registered method. + custom_handler(event) + + if event.deferred: + deferred = True + else: + self._storage.drop_notice(event_path, observer_path, method_name) + # We intentionally consider this event to be dead and reload it from + # scratch in the next path. + self.framework._forget(event) + + if not deferred and last_event_path is not None: + self._storage.drop_snapshot(last_event_path) + + def _show_debug_code_message(self): + """Present the welcome message (only once!) when using debugger functionality.""" + if not self._breakpoint_welcomed: + self._breakpoint_welcomed = True + print(_BREAKPOINT_WELCOME_MESSAGE, file=sys.stderr, end='') + + def breakpoint(self, name=None): + """Add breakpoint, optionally named, at the place where this method is called. + + For the breakpoint to be activated the JUJU_DEBUG_AT environment variable + must be set to "all" or to the specific name parameter provided, if any. In every + other situation calling this method does nothing. + + The framework also provides a standard breakpoint named "hook", that will + stop execution when a hook event is about to be handled. + + For those reasons, the "all" and "hook" breakpoint names are reserved. + """ + # If given, validate the name comply with all the rules + if name is not None: + if not isinstance(name, str): + raise TypeError('breakpoint names must be strings') + if name in ('hook', 'all'): + raise ValueError('breakpoint names "all" and "hook" are reserved') + if not re.match(r'^[a-z0-9]([a-z0-9\-]*[a-z0-9])?$', name): + raise ValueError('breakpoint names must look like "foo" or "foo-bar"') + + indicated_breakpoints = self._juju_debug_at + if not indicated_breakpoints: + return + + if 'all' in indicated_breakpoints or name in indicated_breakpoints: + self._show_debug_code_message() + + # If we call set_trace() directly it will open the debugger *here*, so indicating + # it to use our caller's frame + code_frame = inspect.currentframe().f_back + pdb.Pdb().set_trace(code_frame) + else: + logger.warning( + "Breakpoint %r skipped (not found in the requested breakpoints: %s)", + name, indicated_breakpoints) + + def remove_unreferenced_events(self): + """Remove events from storage that are not referenced. + + In older versions of the framework, events that had no observers would get recorded but + never deleted. This makes a best effort to find these events and remove them from the + database. + """ + event_regex = re.compile(_event_regex) + to_remove = [] + for handle_path in self._storage.list_snapshots(): + if event_regex.match(handle_path): + notices = self._storage.notices(handle_path) + if next(notices, None) is None: + # There are no notices for this handle_path, it is valid to remove it + to_remove.append(handle_path) + for handle_path in to_remove: + self._storage.drop_snapshot(handle_path) + + +class StoredStateData(Object): + + def __init__(self, parent, attr_name): + super().__init__(parent, attr_name) + self._cache = {} + self.dirty = False + + def __getitem__(self, key): + return self._cache.get(key) + + def __setitem__(self, key, value): + self._cache[key] = value + self.dirty = True + + def __contains__(self, key): + return key in self._cache + + def snapshot(self): + return self._cache + + def restore(self, snapshot): + self._cache = snapshot + self.dirty = False + + def on_commit(self, event): + if self.dirty: + self.framework.save_snapshot(self) + self.dirty = False + + +class BoundStoredState: + + def __init__(self, parent, attr_name): + parent.framework.register_type(StoredStateData, parent) + + handle = Handle(parent, StoredStateData.handle_kind, attr_name) + try: + data = parent.framework.load_snapshot(handle) + except NoSnapshotError: + data = StoredStateData(parent, attr_name) + + # __dict__ is used to avoid infinite recursion. + self.__dict__["_data"] = data + self.__dict__["_attr_name"] = attr_name + + parent.framework.observe(parent.framework.on.commit, self._data.on_commit) + + def __getattr__(self, key): + # "on" is the only reserved key that can't be used in the data map. + if key == "on": + return self._data.on + if key not in self._data: + raise AttributeError("attribute '{}' is not stored".format(key)) + return _wrap_stored(self._data, self._data[key]) + + def __setattr__(self, key, value): + if key == "on": + raise AttributeError("attribute 'on' is reserved and cannot be set") + + value = _unwrap_stored(self._data, value) + + if not isinstance(value, (type(None), int, float, str, bytes, list, dict, set)): + raise AttributeError( + 'attribute {!r} cannot be a {}: must be int/float/dict/list/etc'.format( + key, type(value).__name__)) + + self._data[key] = _unwrap_stored(self._data, value) + + def set_default(self, **kwargs): + """"Set the value of any given key if it has not already been set""" + for k, v in kwargs.items(): + if k not in self._data: + self._data[k] = v + + +class StoredState: + """A class used to store data the charm needs persisted across invocations. + + Example:: + + class MyClass(Object): + _stored = StoredState() + + Instances of `MyClass` can transparently save state between invocations by + setting attributes on `_stored`. Initial state should be set with + `set_default` on the bound object, that is:: + + class MyClass(Object): + _stored = StoredState() + + def __init__(self, parent, key): + super().__init__(parent, key) + self._stored.set_default(seen=set()) + self.framework.observe(self.on.seen, self._on_seen) + + def _on_seen(self, event): + self._stored.seen.add(event.uuid) + + """ + + def __init__(self): + self.parent_type = None + self.attr_name = None + + def __get__(self, parent, parent_type=None): + if self.parent_type is not None and self.parent_type not in parent_type.mro(): + # the StoredState instance is being shared between two unrelated classes + # -> unclear what is exepcted of us -> bail out + raise RuntimeError( + 'StoredState shared by {} and {}'.format( + self.parent_type.__name__, parent_type.__name__)) + + if parent is None: + # accessing via the class directly (e.g. MyClass.stored) + return self + + bound = None + if self.attr_name is not None: + bound = parent.__dict__.get(self.attr_name) + if bound is not None: + # we already have the thing from a previous pass, huzzah + return bound + + # need to find ourselves amongst the parent's bases + for cls in parent_type.mro(): + for attr_name, attr_value in cls.__dict__.items(): + if attr_value is not self: + continue + # we've found ourselves! is it the first time? + if bound is not None: + # the StoredState instance is being stored in two different + # attributes -> unclear what is expected of us -> bail out + raise RuntimeError("StoredState shared by {0}.{1} and {0}.{2}".format( + cls.__name__, self.attr_name, attr_name)) + # we've found ourselves for the first time; save where, and bind the object + self.attr_name = attr_name + self.parent_type = cls + bound = BoundStoredState(parent, attr_name) + + if bound is not None: + # cache the bound object to avoid the expensive lookup the next time + # (don't use setattr, to keep things symmetric with the fast-path lookup above) + parent.__dict__[self.attr_name] = bound + return bound + + raise AttributeError( + 'cannot find {} attribute in type {}'.format( + self.__class__.__name__, parent_type.__name__)) + + +def _wrap_stored(parent_data, value): + t = type(value) + if t is dict: + return StoredDict(parent_data, value) + if t is list: + return StoredList(parent_data, value) + if t is set: + return StoredSet(parent_data, value) + return value + + +def _unwrap_stored(parent_data, value): + t = type(value) + if t is StoredDict or t is StoredList or t is StoredSet: + return value._under + return value + + +class StoredDict(collections.abc.MutableMapping): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, key): + return _wrap_stored(self._stored_data, self._under[key]) + + def __setitem__(self, key, value): + self._under[key] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, key): + del self._under[key] + self._stored_data.dirty = True + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + def __eq__(self, other): + if isinstance(other, StoredDict): + return self._under == other._under + elif isinstance(other, collections.abc.Mapping): + return self._under == other + else: + return NotImplemented + + +class StoredList(collections.abc.MutableSequence): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def __getitem__(self, index): + return _wrap_stored(self._stored_data, self._under[index]) + + def __setitem__(self, index, value): + self._under[index] = _unwrap_stored(self._stored_data, value) + self._stored_data.dirty = True + + def __delitem__(self, index): + del self._under[index] + self._stored_data.dirty = True + + def __len__(self): + return len(self._under) + + def insert(self, index, value): + self._under.insert(index, value) + self._stored_data.dirty = True + + def append(self, value): + self._under.append(value) + self._stored_data.dirty = True + + def __eq__(self, other): + if isinstance(other, StoredList): + return self._under == other._under + elif isinstance(other, collections.abc.Sequence): + return self._under == other + else: + return NotImplemented + + def __lt__(self, other): + if isinstance(other, StoredList): + return self._under < other._under + elif isinstance(other, collections.abc.Sequence): + return self._under < other + else: + return NotImplemented + + def __le__(self, other): + if isinstance(other, StoredList): + return self._under <= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under <= other + else: + return NotImplemented + + def __gt__(self, other): + if isinstance(other, StoredList): + return self._under > other._under + elif isinstance(other, collections.abc.Sequence): + return self._under > other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredList): + return self._under >= other._under + elif isinstance(other, collections.abc.Sequence): + return self._under >= other + else: + return NotImplemented + + +class StoredSet(collections.abc.MutableSet): + + def __init__(self, stored_data, under): + self._stored_data = stored_data + self._under = under + + def add(self, key): + self._under.add(key) + self._stored_data.dirty = True + + def discard(self, key): + self._under.discard(key) + self._stored_data.dirty = True + + def __contains__(self, key): + return key in self._under + + def __iter__(self): + return self._under.__iter__() + + def __len__(self): + return len(self._under) + + @classmethod + def _from_iterable(cls, it): + """Construct an instance of the class from any iterable input. + + Per https://docs.python.org/3/library/collections.abc.html + if the Set mixin is being used in a class with a different constructor signature, + you will need to override _from_iterable() with a classmethod that can construct + new instances from an iterable argument. + """ + return set(it) + + def __le__(self, other): + if isinstance(other, StoredSet): + return self._under <= other._under + elif isinstance(other, collections.abc.Set): + return self._under <= other + else: + return NotImplemented + + def __ge__(self, other): + if isinstance(other, StoredSet): + return self._under >= other._under + elif isinstance(other, collections.abc.Set): + return self._under >= other + else: + return NotImplemented + + def __eq__(self, other): + if isinstance(other, StoredSet): + return self._under == other._under + elif isinstance(other, collections.abc.Set): + return self._under == other + else: + return NotImplemented diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/jujuversion.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/jujuversion.py new file mode 100755 index 00000000..b2b8177d --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/jujuversion.py @@ -0,0 +1,98 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from functools import total_ordering + + +@total_ordering +class JujuVersion: + + PATTERN = r'''^ + (?P<major>\d{1,9})\.(?P<minor>\d{1,9}) # <major> and <minor> numbers are always there + ((?:\.|-(?P<tag>[a-z]+))(?P<patch>\d{1,9}))? # sometimes with .<patch> or -<tag><patch> + (\.(?P<build>\d{1,9}))?$ # and sometimes with a <build> number. + ''' + + def __init__(self, version): + m = re.match(self.PATTERN, version, re.VERBOSE) + if not m: + raise RuntimeError('"{}" is not a valid Juju version string'.format(version)) + + d = m.groupdict() + self.major = int(m.group('major')) + self.minor = int(m.group('minor')) + self.tag = d['tag'] or '' + self.patch = int(d['patch'] or 0) + self.build = int(d['build'] or 0) + + def __repr__(self): + if self.tag: + s = '{}.{}-{}{}'.format(self.major, self.minor, self.tag, self.patch) + else: + s = '{}.{}.{}'.format(self.major, self.minor, self.patch) + if self.build > 0: + s += '.{}'.format(self.build) + return s + + def __eq__(self, other): + if self is other: + return True + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + return ( + self.major == other.major + and self.minor == other.minor + and self.tag == other.tag + and self.build == other.build + and self.patch == other.patch) + + def __lt__(self, other): + if self is other: + return False + if isinstance(other, str): + other = type(self)(other) + elif not isinstance(other, JujuVersion): + raise RuntimeError('cannot compare Juju version "{}" with "{}"'.format(self, other)) + + if self.major != other.major: + return self.major < other.major + elif self.minor != other.minor: + return self.minor < other.minor + elif self.tag != other.tag: + if not self.tag: + return False + elif not other.tag: + return True + return self.tag < other.tag + elif self.patch != other.patch: + return self.patch < other.patch + elif self.build != other.build: + return self.build < other.build + return False + + @classmethod + def from_environ(cls) -> 'JujuVersion': + """Build a JujuVersion from JUJU_VERSION.""" + v = os.environ.get('JUJU_VERSION') + if not v: + raise RuntimeError('environ has no JUJU_VERSION') + return cls(v) + + def has_app_data(self) -> bool: + """Determine whether this juju version knows about app data.""" + return (self.major, self.minor, self.patch) >= (2, 7, 0) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/lib/__init__.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/lib/__init__.py new file mode 100644 index 00000000..edb9fcac --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/lib/__init__.py @@ -0,0 +1,194 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import re + +from ast import literal_eval +from importlib.util import module_from_spec +from importlib.machinery import ModuleSpec +from pkgutil import get_importer +from types import ModuleType + + +_libraries = None + +_libline_re = re.compile(r'''^LIB([A-Z]+)\s*=\s*([0-9]+|['"][a-zA-Z0-9_.\-@]+['"])''') +_libname_re = re.compile(r'''^[a-z][a-z0-9]+$''') + +# Not perfect, but should do for now. +_libauthor_re = re.compile(r'''^[A-Za-z0-9_+.-]+@[a-z0-9_-]+(?:\.[a-z0-9_-]+)*\.[a-z]{2,3}$''') + + +def use(name: str, api: int, author: str) -> ModuleType: + """Use a library from the ops libraries. + + Args: + name: the name of the library requested. + api: the API version of the library. + author: the author of the library. If not given, requests the + one in the standard library. + Raises: + ImportError: if the library cannot be found. + TypeError: if the name, api, or author are the wrong type. + ValueError: if the name, api, or author are invalid. + """ + if not isinstance(name, str): + raise TypeError("invalid library name: {!r} (must be a str)".format(name)) + if not isinstance(author, str): + raise TypeError("invalid library author: {!r} (must be a str)".format(author)) + if not isinstance(api, int): + raise TypeError("invalid library API: {!r} (must be an int)".format(api)) + if api < 0: + raise ValueError('invalid library api: {} (must be ≥0)'.format(api)) + if not _libname_re.match(name): + raise ValueError("invalid library name: {!r} (chars and digits only)".format(name)) + if not _libauthor_re.match(author): + raise ValueError("invalid library author email: {!r}".format(author)) + + if _libraries is None: + autoimport() + + versions = _libraries.get((name, author), ()) + for lib in versions: + if lib.api == api: + return lib.import_module() + + others = ', '.join(str(lib.api) for lib in versions) + if others: + msg = 'cannot find "{}" from "{}" with API version {} (have {})'.format( + name, author, api, others) + else: + msg = 'cannot find library "{}" from "{}"'.format(name, author) + + raise ImportError(msg, name=name) + + +def autoimport(): + """Find all libs in the path and enable use of them. + + You only need to call this if you've installed a package or + otherwise changed sys.path in the current run, and need to see the + changes. Otherwise libraries are found on first call of `use`. + """ + global _libraries + _libraries = {} + for spec in _find_all_specs(sys.path): + lib = _parse_lib(spec) + if lib is None: + continue + + versions = _libraries.setdefault((lib.name, lib.author), []) + versions.append(lib) + versions.sort(reverse=True) + + +def _find_all_specs(path): + for sys_dir in path: + if sys_dir == "": + sys_dir = "." + try: + top_dirs = os.listdir(sys_dir) + except OSError: + continue + for top_dir in top_dirs: + opslib = os.path.join(sys_dir, top_dir, 'opslib') + try: + lib_dirs = os.listdir(opslib) + except OSError: + continue + finder = get_importer(opslib) + if finder is None or not hasattr(finder, 'find_spec'): + continue + for lib_dir in lib_dirs: + spec = finder.find_spec(lib_dir) + if spec is None: + continue + if spec.loader is None: + # a namespace package; not supported + continue + yield spec + + +# only the first this many lines of a file are looked at for the LIB* constants +_MAX_LIB_LINES = 99 + + +def _parse_lib(spec): + if spec.origin is None: + return None + + _expected = {'NAME': str, 'AUTHOR': str, 'API': int, 'PATCH': int} + + try: + with open(spec.origin, 'rt', encoding='utf-8') as f: + libinfo = {} + for n, line in enumerate(f): + if len(libinfo) == len(_expected): + break + if n > _MAX_LIB_LINES: + return None + m = _libline_re.match(line) + if m is None: + continue + key, value = m.groups() + if key in _expected: + value = literal_eval(value) + if not isinstance(value, _expected[key]): + return None + libinfo[key] = value + else: + if len(libinfo) != len(_expected): + return None + except Exception: + return None + + return _Lib(spec, libinfo['NAME'], libinfo['AUTHOR'], libinfo['API'], libinfo['PATCH']) + + +class _Lib: + + def __init__(self, spec: ModuleSpec, name: str, author: str, api: int, patch: int): + self.spec = spec + self.name = name + self.author = author + self.api = api + self.patch = patch + + self._module = None + + def __repr__(self): + return "<_Lib {0.name} by {0.author}, API {0.api}, patch {0.patch}>".format(self) + + def import_module(self) -> ModuleType: + if self._module is None: + module = module_from_spec(self.spec) + self.spec.loader.exec_module(module) + self._module = module + return self._module + + def __eq__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a == b + + def __lt__(self, other): + if not isinstance(other, _Lib): + return NotImplemented + a = (self.name, self.author, self.api, self.patch) + b = (other.name, other.author, other.api, other.patch) + return a < b diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/log.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/log.py new file mode 100644 index 00000000..4aac5543 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/log.py @@ -0,0 +1,51 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import logging + + +class JujuLogHandler(logging.Handler): + """A handler for sending logs to Juju via juju-log.""" + + def __init__(self, model_backend, level=logging.DEBUG): + super().__init__(level) + self.model_backend = model_backend + + def emit(self, record): + self.model_backend.juju_log(record.levelname, self.format(record)) + + +def setup_root_logging(model_backend, debug=False): + """Setup python logging to forward messages to juju-log. + + By default, logging is set to DEBUG level, and messages will be filtered by Juju. + Charmers can also set their own default log level with:: + + logging.getLogger().setLevel(logging.INFO) + + model_backend -- a ModelBackend to use for juju-log + debug -- if True, write logs to stderr as well as to juju-log. + """ + logger = logging.getLogger() + logger.setLevel(logging.DEBUG) + logger.addHandler(JujuLogHandler(model_backend)) + if debug: + handler = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + sys.excepthook = lambda etype, value, tb: logger.error( + "Uncaught exception while in charm code:", exc_info=(etype, value, tb)) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/main.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/main.py new file mode 100755 index 00000000..6dc31c35 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/main.py @@ -0,0 +1,348 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import logging +import os +import subprocess +import sys +import warnings +from pathlib import Path + +import yaml + +import ops.charm +import ops.framework +import ops.model +import ops.storage + +from ops.log import setup_root_logging + +CHARM_STATE_FILE = '.unit-state.db' + + +logger = logging.getLogger() + + +def _get_charm_dir(): + charm_dir = os.environ.get("JUJU_CHARM_DIR") + if charm_dir is None: + # Assume $JUJU_CHARM_DIR/lib/op/main.py structure. + charm_dir = Path('{}/../../..'.format(__file__)).resolve() + else: + charm_dir = Path(charm_dir).resolve() + return charm_dir + + +def _create_event_link(charm, bound_event): + """Create a symlink for a particular event. + + charm -- A charm object. + bound_event -- An event for which to create a symlink. + """ + if issubclass(bound_event.event_type, ops.charm.HookEvent): + event_dir = charm.framework.charm_dir / 'hooks' + event_path = event_dir / bound_event.event_kind.replace('_', '-') + elif issubclass(bound_event.event_type, ops.charm.ActionEvent): + if not bound_event.event_kind.endswith("_action"): + raise RuntimeError( + 'action event name {} needs _action suffix'.format(bound_event.event_kind)) + event_dir = charm.framework.charm_dir / 'actions' + # The event_kind is suffixed with "_action" while the executable is not. + event_path = event_dir / bound_event.event_kind[:-len('_action')].replace('_', '-') + else: + raise RuntimeError( + 'cannot create a symlink: unsupported event type {}'.format(bound_event.event_type)) + + event_dir.mkdir(exist_ok=True) + if not event_path.exists(): + # CPython has different implementations for populating sys.argv[0] for Linux and Windows. + # For Windows it is always an absolute path (any symlinks are resolved) + # while for Linux it can be a relative path. + target_path = os.path.relpath(os.path.realpath(sys.argv[0]), str(event_dir)) + + # Ignore the non-symlink files or directories + # assuming the charm author knows what they are doing. + logger.debug( + 'Creating a new relative symlink at %s pointing to %s', + event_path, target_path) + event_path.symlink_to(target_path) + + +def _setup_event_links(charm_dir, charm): + """Set up links for supported events that originate from Juju. + + Whether a charm can handle an event or not can be determined by + introspecting which events are defined on it. + + Hooks or actions are created as symlinks to the charm code file + which is determined by inspecting symlinks provided by the charm + author at hooks/install or hooks/start. + + charm_dir -- A root directory of the charm. + charm -- An instance of the Charm class. + + """ + for bound_event in charm.on.events().values(): + # Only events that originate from Juju need symlinks. + if issubclass(bound_event.event_type, (ops.charm.HookEvent, ops.charm.ActionEvent)): + _create_event_link(charm, bound_event) + + +def _emit_charm_event(charm, event_name): + """Emits a charm event based on a Juju event name. + + charm -- A charm instance to emit an event from. + event_name -- A Juju event name to emit on a charm. + """ + event_to_emit = None + try: + event_to_emit = getattr(charm.on, event_name) + except AttributeError: + logger.debug("Event %s not defined for %s.", event_name, charm) + + # If the event is not supported by the charm implementation, do + # not error out or try to emit it. This is to support rollbacks. + if event_to_emit is not None: + args, kwargs = _get_event_args(charm, event_to_emit) + logger.debug('Emitting Juju event %s.', event_name) + event_to_emit.emit(*args, **kwargs) + + +def _get_event_args(charm, bound_event): + event_type = bound_event.event_type + model = charm.framework.model + + if issubclass(event_type, ops.charm.RelationEvent): + relation_name = os.environ['JUJU_RELATION'] + relation_id = int(os.environ['JUJU_RELATION_ID'].split(':')[-1]) + relation = model.get_relation(relation_name, relation_id) + else: + relation = None + + remote_app_name = os.environ.get('JUJU_REMOTE_APP', '') + remote_unit_name = os.environ.get('JUJU_REMOTE_UNIT', '') + if remote_app_name or remote_unit_name: + if not remote_app_name: + if '/' not in remote_unit_name: + raise RuntimeError('invalid remote unit name: {}'.format(remote_unit_name)) + remote_app_name = remote_unit_name.split('/')[0] + args = [relation, model.get_app(remote_app_name)] + if remote_unit_name: + args.append(model.get_unit(remote_unit_name)) + return args, {} + elif relation: + return [relation], {} + return [], {} + + +class _Dispatcher: + """Encapsulate how to figure out what event Juju wants us to run. + + Also knows how to run “legacy†hooks when Juju called us via a top-level + ``dispatch`` binary. + + Args: + charm_dir: the toplevel directory of the charm + + Attributes: + event_name: the name of the event to run + is_dispatch_aware: are we running under a Juju that knows about the + dispatch binary? + + """ + + def __init__(self, charm_dir: Path): + self._charm_dir = charm_dir + self._exec_path = Path(sys.argv[0]) + + if 'JUJU_DISPATCH_PATH' in os.environ and (charm_dir / 'dispatch').exists(): + self._init_dispatch() + else: + self._init_legacy() + + def ensure_event_links(self, charm): + """Make sure necessary symlinks are present on disk""" + + if self.is_dispatch_aware: + # links aren't needed + return + + # When a charm is force-upgraded and a unit is in an error state Juju + # does not run upgrade-charm and instead runs the failed hook followed + # by config-changed. Given the nature of force-upgrading the hook setup + # code is not triggered on config-changed. + # + # 'start' event is included as Juju does not fire the install event for + # K8s charms (see LP: #1854635). + if (self.event_name in ('install', 'start', 'upgrade_charm') + or self.event_name.endswith('_storage_attached')): + _setup_event_links(self._charm_dir, charm) + + def run_any_legacy_hook(self): + """Run any extant legacy hook. + + If there is both a dispatch file and a legacy hook for the + current event, run the wanted legacy hook. + """ + + if not self.is_dispatch_aware: + # we *are* the legacy hook + return + + dispatch_path = self._charm_dir / self._dispatch_path + if not dispatch_path.exists(): + logger.debug("Legacy %s does not exist.", self._dispatch_path) + return + + # super strange that there isn't an is_executable + if not os.access(str(dispatch_path), os.X_OK): + logger.warning("Legacy %s exists but is not executable.", self._dispatch_path) + return + + if dispatch_path.resolve() == self._exec_path.resolve(): + logger.debug("Legacy %s is just a link to ourselves.", self._dispatch_path) + return + + argv = sys.argv.copy() + argv[0] = str(dispatch_path) + logger.info("Running legacy %s.", self._dispatch_path) + try: + subprocess.run(argv, check=True) + except subprocess.CalledProcessError as e: + logger.warning( + "Legacy %s exited with status %d.", + self._dispatch_path, e.returncode) + sys.exit(e.returncode) + else: + logger.debug("Legacy %s exited with status 0.", self._dispatch_path) + + def _set_name_from_path(self, path: Path): + """Sets the name attribute to that which can be inferred from the given path.""" + name = path.name.replace('-', '_') + if path.parent.name == 'actions': + name = '{}_action'.format(name) + self.event_name = name + + def _init_legacy(self): + """Set up the 'legacy' dispatcher. + + The current Juju doesn't know about 'dispatch' and calls hooks + explicitly. + """ + self.is_dispatch_aware = False + self._set_name_from_path(self._exec_path) + + def _init_dispatch(self): + """Set up the new 'dispatch' dispatcher. + + The current Juju will run 'dispatch' if it exists, and otherwise fall + back to the old behaviour. + + JUJU_DISPATCH_PATH will be set to the wanted hook, e.g. hooks/install, + in both cases. + """ + self._dispatch_path = Path(os.environ['JUJU_DISPATCH_PATH']) + + if 'OPERATOR_DISPATCH' in os.environ: + logger.debug("Charm called itself via %s.", self._dispatch_path) + sys.exit(0) + os.environ['OPERATOR_DISPATCH'] = '1' + + self.is_dispatch_aware = True + self._set_name_from_path(self._dispatch_path) + + def is_restricted_context(self): + """"Return True if we are running in a restricted Juju context. + + When in a restricted context, most commands (relation-get, config-get, + state-get) are not available. As such, we change how we interact with + Juju. + """ + return self.event_name in ('collect_metrics',) + + +def main(charm_class, use_juju_for_storage=False): + """Setup the charm and dispatch the observed event. + + The event name is based on the way this executable was called (argv[0]). + """ + charm_dir = _get_charm_dir() + + model_backend = ops.model._ModelBackend() + debug = ('JUJU_DEBUG' in os.environ) + setup_root_logging(model_backend, debug=debug) + logger.debug("Operator Framework %s up and running.", ops.__version__) + + dispatcher = _Dispatcher(charm_dir) + dispatcher.run_any_legacy_hook() + + metadata = (charm_dir / 'metadata.yaml').read_text() + actions_meta = charm_dir / 'actions.yaml' + if actions_meta.exists(): + actions_metadata = actions_meta.read_text() + else: + actions_metadata = None + + if not yaml.__with_libyaml__: + logger.debug('yaml does not have libyaml extensions, using slower pure Python yaml loader') + meta = ops.charm.CharmMeta.from_yaml(metadata, actions_metadata) + model = ops.model.Model(meta, model_backend) + + # TODO: If Juju unit agent crashes after exit(0) from the charm code + # the framework will commit the snapshot but Juju will not commit its + # operation. + charm_state_path = charm_dir / CHARM_STATE_FILE + if use_juju_for_storage: + if dispatcher.is_restricted_context(): + # TODO: jam 2020-06-30 This unconditionally avoids running a collect metrics event + # Though we eventually expect that juju will run collect-metrics in a + # non-restricted context. Once we can determine that we are running collect-metrics + # in a non-restricted context, we should fire the event as normal. + logger.debug('"%s" is not supported when using Juju for storage\n' + 'see: https://github.com/canonical/operator/issues/348', + dispatcher.event_name) + # Note that we don't exit nonzero, because that would cause Juju to rerun the hook + return + store = ops.storage.JujuStorage() + else: + store = ops.storage.SQLiteStorage(charm_state_path) + framework = ops.framework.Framework(store, charm_dir, meta, model) + try: + sig = inspect.signature(charm_class) + try: + sig.bind(framework) + except TypeError: + msg = ( + "the second argument, 'key', has been deprecated and will be " + "removed after the 0.7 release") + warnings.warn(msg, DeprecationWarning) + charm = charm_class(framework, None) + else: + charm = charm_class(framework) + dispatcher.ensure_event_links(charm) + + # TODO: Remove the collect_metrics check below as soon as the relevant + # Juju changes are made. + # + # Skip reemission of deferred events for collect-metrics events because + # they do not have the full access to all hook tools. + if not dispatcher.is_restricted_context(): + framework.reemit() + + _emit_charm_event(charm, dispatcher.event_name) + + framework.commit() + finally: + framework.close() diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/model.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/model.py new file mode 100644 index 00000000..b96e8915 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/model.py @@ -0,0 +1,1237 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import decimal +import ipaddress +import json +import os +import re +import shutil +import tempfile +import time +import typing +import weakref + +from abc import ABC, abstractmethod +from collections.abc import Mapping, MutableMapping +from pathlib import Path +from subprocess import run, PIPE, CalledProcessError + +import ops +from ops.jujuversion import JujuVersion + + +class Model: + """Represents the Juju Model as seen from this unit. + + This should not be instantiated directly by Charmers, but can be accessed as `self.model` + from any class that derives from Object. + + Attributes: + unit: A :class:`Unit` that represents the unit that is running this code (eg yourself) + app: A :class:`Application` that represents the application this unit is a part of. + relations: Mapping of endpoint to list of :class:`Relation` answering the question + "what am I currently related to". See also :meth:`.get_relation` + config: A dict of the config for the current application. + resources: Access to resources for this charm. Use ``model.resources.fetch(resource_name)`` + to get the path on disk where the resource can be found. + storages: Mapping of storage_name to :class:`Storage` for the storage points defined in + metadata.yaml + pod: Used to get access to ``model.pod.set_spec`` to set the container specification + for Kubernetes charms. + """ + + def __init__(self, meta: 'ops.charm.CharmMeta', backend: '_ModelBackend'): + self._cache = _ModelCache(backend) + self._backend = backend + self.unit = self.get_unit(self._backend.unit_name) + self.app = self.unit.app + self.relations = RelationMapping(meta.relations, self.unit, self._backend, self._cache) + self.config = ConfigData(self._backend) + self.resources = Resources(list(meta.resources), self._backend) + self.pod = Pod(self._backend) + self.storages = StorageMapping(list(meta.storages), self._backend) + self._bindings = BindingMapping(self._backend) + + @property + def name(self) -> str: + """Return the name of the Model that this unit is running in. + + This is read from the environment variable ``JUJU_MODEL_NAME``. + """ + return self._backend.model_name + + def get_unit(self, unit_name: str) -> 'Unit': + """Get an arbitrary unit by name. + + Internally this uses a cache, so asking for the same unit two times will + return the same object. + """ + return self._cache.get(Unit, unit_name) + + def get_app(self, app_name: str) -> 'Application': + """Get an application by name. + + Internally this uses a cache, so asking for the same application two times will + return the same object. + """ + return self._cache.get(Application, app_name) + + def get_relation( + self, relation_name: str, + relation_id: typing.Optional[int] = None) -> 'Relation': + """Get a specific Relation instance. + + If relation_id is not given, this will return the Relation instance if the + relation is established only once or None if it is not established. If this + same relation is established multiple times the error TooManyRelatedAppsError is raised. + + Args: + relation_name: The name of the endpoint for this charm + relation_id: An identifier for a specific relation. Used to disambiguate when a + given application has more than one relation on a given endpoint. + Raises: + TooManyRelatedAppsError: is raised if there is more than one relation to the + supplied relation_name and no relation_id was supplied + """ + return self.relations._get_unique(relation_name, relation_id) + + def get_binding(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a network space binding. + + Args: + binding_key: The relation name or instance to obtain bindings for. + Returns: + If ``binding_key`` is a relation name, the method returns the default binding + for that relation. If a relation instance is provided, the method first looks + up a more specific binding for that specific relation ID, and if none is found + falls back to the default binding for the relation name. + """ + return self._bindings.get(binding_key) + + +class _ModelCache: + + def __init__(self, backend): + self._backend = backend + self._weakrefs = weakref.WeakValueDictionary() + + def get(self, entity_type, *args): + key = (entity_type,) + args + entity = self._weakrefs.get(key) + if entity is None: + entity = entity_type(*args, backend=self._backend, cache=self) + self._weakrefs[key] = entity + return entity + + +class Application: + """Represents a named application in the model. + + This might be your application, or might be an application that you are related to. + Charmers should not instantiate Application objects directly, but should use + :meth:`Model.get_app` if they need a reference to a given application. + + Attributes: + name: The name of this application (eg, 'mysql'). This name may differ from the name of + the charm, if the user has deployed it to a different name. + """ + + def __init__(self, name, backend, cache): + self.name = name + self._backend = backend + self._cache = cache + self._is_our_app = self.name == self._backend.app_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of the overall application. + + Can only be read and set by the lead unit of the application. + + The status of remote units is always Unknown. + + Raises: + RuntimeError: if you try to set the status of another application, or if you try to + set the status of this application as a unit that is not the leader. + InvalidStatusError: if you try to set the status to something that is not a + :class:`StatusBase` + + Example:: + + self.model.app.status = BlockedStatus('I need a human to come help me') + """ + if not self._is_our_app: + return UnknownStatus() + + if not self._backend.is_leader(): + raise RuntimeError('cannot get application status as a non-leader unit') + + if self._status: + return self._status + + s = self._backend.status_get(is_app=True) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for application {} status: {}'.format(self, value) + ) + + if not self._is_our_app: + raise RuntimeError('cannot to set status for a remote application {}'.format(self)) + + if not self._backend.is_leader(): + raise RuntimeError('cannot set application status as a non-leader unit') + + self._backend.status_set(value.name, value.message, is_app=True) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + +class Unit: + """Represents a named unit in the model. + + This might be your unit, another unit of your application, or a unit of another application + that you are related to. + + Attributes: + name: The name of the unit (eg, 'mysql/0') + app: The Application the unit is a part of. + """ + + def __init__(self, name, backend, cache): + self.name = name + + app_name = name.split('/')[0] + self.app = cache.get(Application, app_name) + + self._backend = backend + self._cache = cache + self._is_our_unit = self.name == self._backend.unit_name + self._status = None + + def _invalidate(self): + self._status = None + + @property + def status(self) -> 'StatusBase': + """Used to report or read the status of a specific unit. + + The status of any unit other than yourself is always Unknown. + + Raises: + RuntimeError: if you try to set the status of a unit other than yourself. + InvalidStatusError: if you try to set the status to something other than + a :class:`StatusBase` + Example:: + + self.model.unit.status = MaintenanceStatus('reconfiguring the frobnicators') + """ + if not self._is_our_unit: + return UnknownStatus() + + if self._status: + return self._status + + s = self._backend.status_get(is_app=False) + self._status = StatusBase.from_name(s['status'], s['message']) + return self._status + + @status.setter + def status(self, value: 'StatusBase'): + if not isinstance(value, StatusBase): + raise InvalidStatusError( + 'invalid value provided for unit {} status: {}'.format(self, value) + ) + + if not self._is_our_unit: + raise RuntimeError('cannot set status for a remote unit {}'.format(self)) + + self._backend.status_set(value.name, value.message, is_app=False) + self._status = value + + def __repr__(self): + return '<{}.{} {}>'.format(type(self).__module__, type(self).__name__, self.name) + + def is_leader(self) -> bool: + """Return whether this unit is the leader of its application. + + This can only be called for your own unit. + Returns: + True if you are the leader, False otherwise + Raises: + RuntimeError: if called for a unit that is not yourself + """ + if self._is_our_unit: + # This value is not cached as it is not guaranteed to persist for the whole duration + # of a hook execution. + return self._backend.is_leader() + else: + raise RuntimeError( + 'leadership status of remote units ({}) is not visible to other' + ' applications'.format(self) + ) + + def set_workload_version(self, version: str) -> None: + """Record the version of the software running as the workload. + + This shouldn't be confused with the revision of the charm. This is informative only; + shown in the output of 'juju status'. + """ + if not isinstance(version, str): + raise TypeError("workload version must be a str, not {}: {!r}".format( + type(version).__name__, version)) + self._backend.application_version_set(version) + + +class LazyMapping(Mapping, ABC): + """Represents a dict that isn't populated until it is accessed. + + Charm authors should generally never need to use this directly, but it forms + the basis for many of the dicts that the framework tracks. + """ + + _lazy_data = None + + @abstractmethod + def _load(self): + raise NotImplementedError() + + @property + def _data(self): + data = self._lazy_data + if data is None: + data = self._lazy_data = self._load() + return data + + def _invalidate(self): + self._lazy_data = None + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +class RelationMapping(Mapping): + """Map of relation names to lists of :class:`Relation` instances.""" + + def __init__(self, relations_meta, our_unit, backend, cache): + self._peers = set() + for name, relation_meta in relations_meta.items(): + if relation_meta.role.is_peer(): + self._peers.add(name) + self._our_unit = our_unit + self._backend = backend + self._cache = cache + self._data = {relation_name: None for relation_name in relations_meta} + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, relation_name): + is_peer = relation_name in self._peers + relation_list = self._data[relation_name] + if relation_list is None: + relation_list = self._data[relation_name] = [] + for rid in self._backend.relation_ids(relation_name): + relation = Relation(relation_name, rid, is_peer, + self._our_unit, self._backend, self._cache) + relation_list.append(relation) + return relation_list + + def _invalidate(self, relation_name): + """Used to wipe the cache of a given relation_name. + + Not meant to be used by Charm authors. The content of relation data is + static for the lifetime of a hook, so it is safe to cache in memory once + accessed. + """ + self._data[relation_name] = None + + def _get_unique(self, relation_name, relation_id=None): + if relation_id is not None: + if not isinstance(relation_id, int): + raise ModelError('relation id {} must be int or None not {}'.format( + relation_id, + type(relation_id).__name__)) + for relation in self[relation_name]: + if relation.id == relation_id: + return relation + else: + # The relation may be dead, but it is not forgotten. + is_peer = relation_name in self._peers + return Relation(relation_name, relation_id, is_peer, + self._our_unit, self._backend, self._cache) + num_related = len(self[relation_name]) + if num_related == 0: + return None + elif num_related == 1: + return self[relation_name][0] + else: + # TODO: We need something in the framework to catch and gracefully handle + # errors, ideally integrating the error catching with Juju's mechanisms. + raise TooManyRelatedAppsError(relation_name, num_related, 1) + + +class BindingMapping: + """Mapping of endpoints to network bindings. + + Charm authors should not instantiate this directly, but access it via + :meth:`Model.get_binding` + """ + + def __init__(self, backend): + self._backend = backend + self._data = {} + + def get(self, binding_key: typing.Union[str, 'Relation']) -> 'Binding': + """Get a specific Binding for an endpoint/relation. + + Not used directly by Charm authors. See :meth:`Model.get_binding` + """ + if isinstance(binding_key, Relation): + binding_name = binding_key.name + relation_id = binding_key.id + elif isinstance(binding_key, str): + binding_name = binding_key + relation_id = None + else: + raise ModelError('binding key must be str or relation instance, not {}' + ''.format(type(binding_key).__name__)) + binding = self._data.get(binding_key) + if binding is None: + binding = Binding(binding_name, relation_id, self._backend) + self._data[binding_key] = binding + return binding + + +class Binding: + """Binding to a network space. + + Attributes: + name: The name of the endpoint this binding represents (eg, 'db') + """ + + def __init__(self, name, relation_id, backend): + self.name = name + self._relation_id = relation_id + self._backend = backend + self._network = None + + @property + def network(self) -> 'Network': + """The network information for this binding.""" + if self._network is None: + try: + self._network = Network(self._backend.network_get(self.name, self._relation_id)) + except RelationNotFoundError: + if self._relation_id is None: + raise + # If a relation is dead, we can still get network info associated with an + # endpoint itself + self._network = Network(self._backend.network_get(self.name)) + return self._network + + +class Network: + """Network space details. + + Charm authors should not instantiate this directly, but should get access to the Network + definition from :meth:`Model.get_binding` and its ``network`` attribute. + + Attributes: + interfaces: A list of :class:`NetworkInterface` details. This includes the + information about how your application should be configured (eg, what + IP addresses should you bind to.) + Note that multiple addresses for a single interface are represented as multiple + interfaces. (eg, ``[NetworKInfo('ens1', '10.1.1.1/32'), + NetworkInfo('ens1', '10.1.2.1/32'])``) + ingress_addresses: A list of :class:`ipaddress.ip_address` objects representing the IP + addresses that other units should use to get in touch with you. + egress_subnets: A list of :class:`ipaddress.ip_network` representing the subnets that + other units will see you connecting from. Due to things like NAT it isn't always + possible to narrow it down to a single address, but when it is clear, the CIDRs + will be constrained to a single address. (eg, 10.0.0.1/32) + Args: + network_info: A dict of network information as returned by ``network-get``. + """ + + def __init__(self, network_info: dict): + self.interfaces = [] + # Treat multiple addresses on an interface as multiple logical + # interfaces with the same name. + for interface_info in network_info['bind-addresses']: + interface_name = interface_info['interface-name'] + for address_info in interface_info['addresses']: + self.interfaces.append(NetworkInterface(interface_name, address_info)) + self.ingress_addresses = [] + for address in network_info['ingress-addresses']: + self.ingress_addresses.append(ipaddress.ip_address(address)) + self.egress_subnets = [] + for subnet in network_info['egress-subnets']: + self.egress_subnets.append(ipaddress.ip_network(subnet)) + + @property + def bind_address(self): + """A single address that your application should bind() to. + + For the common case where there is a single answer. This represents a single + address from :attr:`.interfaces` that can be used to configure where your + application should bind() and listen(). + """ + return self.interfaces[0].address + + @property + def ingress_address(self): + """The address other applications should use to connect to your unit. + + Due to things like public/private addresses, NAT and tunneling, the address you bind() + to is not always the address other people can use to connect() to you. + This is just the first address from :attr:`.ingress_addresses`. + """ + return self.ingress_addresses[0] + + +class NetworkInterface: + """Represents a single network interface that the charm needs to know about. + + Charmers should not instantiate this type directly. Instead use :meth:`Model.get_binding` + to get the network information for a given endpoint. + + Attributes: + name: The name of the interface (eg. 'eth0', or 'ens1') + subnet: An :class:`ipaddress.ip_network` representation of the IP for the network + interface. This may be a single address (eg '10.0.1.2/32') + """ + + def __init__(self, name: str, address_info: dict): + self.name = name + # TODO: expose a hardware address here, see LP: #1864070. + self.address = ipaddress.ip_address(address_info['value']) + cidr = address_info['cidr'] + if not cidr: + # The cidr field may be empty, see LP: #1864102. + # In this case, make it a /32 or /128 IP network. + self.subnet = ipaddress.ip_network(address_info['value']) + else: + self.subnet = ipaddress.ip_network(cidr) + # TODO: expose a hostname/canonical name for the address here, see LP: #1864086. + + +class Relation: + """Represents an established relation between this application and another application. + + This class should not be instantiated directly, instead use :meth:`Model.get_relation` + or :attr:`RelationEvent.relation`. + + Attributes: + name: The name of the local endpoint of the relation (eg 'db') + id: The identifier for a particular relation (integer) + app: An :class:`Application` representing the remote application of this relation. + For peer relations this will be the local application. + units: A set of :class:`Unit` for units that have started and joined this relation. + data: A :class:`RelationData` holding the data buckets for each entity + of a relation. Accessed via eg Relation.data[unit]['foo'] + """ + + def __init__( + self, relation_name: str, relation_id: int, is_peer: bool, our_unit: Unit, + backend: '_ModelBackend', cache: '_ModelCache'): + self.name = relation_name + self.id = relation_id + self.app = None + self.units = set() + + # For peer relations, both the remote and the local app are the same. + if is_peer: + self.app = our_unit.app + try: + for unit_name in backend.relation_list(self.id): + unit = cache.get(Unit, unit_name) + self.units.add(unit) + if self.app is None: + self.app = unit.app + except RelationNotFoundError: + # If the relation is dead, just treat it as if it has no remote units. + pass + self.data = RelationData(self, our_unit, backend) + + def __repr__(self): + return '<{}.{} {}:{}>'.format(type(self).__module__, + type(self).__name__, + self.name, + self.id) + + +class RelationData(Mapping): + """Represents the various data buckets of a given relation. + + Each unit and application involved in a relation has their own data bucket. + Eg: ``{entity: RelationDataContent}`` + where entity can be either a :class:`Unit` or a :class:`Application`. + + Units can read and write their own data, and if they are the leader, + they can read and write their application data. They are allowed to read + remote unit and application data. + + This class should not be created directly. It should be accessed via + :attr:`Relation.data` + """ + + def __init__(self, relation: Relation, our_unit: Unit, backend: '_ModelBackend'): + self.relation = weakref.proxy(relation) + self._data = { + our_unit: RelationDataContent(self.relation, our_unit, backend), + our_unit.app: RelationDataContent(self.relation, our_unit.app, backend), + } + self._data.update({ + unit: RelationDataContent(self.relation, unit, backend) + for unit in self.relation.units}) + # The relation might be dead so avoid a None key here. + if self.relation.app is not None: + self._data.update({ + self.relation.app: RelationDataContent(self.relation, self.relation.app, backend), + }) + + def __contains__(self, key): + return key in self._data + + def __len__(self): + return len(self._data) + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, key): + return self._data[key] + + +# We mix in MutableMapping here to get some convenience implementations, but whether it's actually +# mutable or not is controlled by the flag. +class RelationDataContent(LazyMapping, MutableMapping): + + def __init__(self, relation, entity, backend): + self.relation = relation + self._entity = entity + self._backend = backend + self._is_app = isinstance(entity, Application) + + def _load(self): + try: + return self._backend.relation_get(self.relation.id, self._entity.name, self._is_app) + except RelationNotFoundError: + # Dead relations tell no tales (and have no data). + return {} + + def _is_mutable(self): + if self._is_app: + is_our_app = self._backend.app_name == self._entity.name + if not is_our_app: + return False + # Whether the application data bag is mutable or not depends on + # whether this unit is a leader or not, but this is not guaranteed + # to be always true during the same hook execution. + return self._backend.is_leader() + else: + is_our_unit = self._backend.unit_name == self._entity.name + if is_our_unit: + return True + return False + + def __setitem__(self, key, value): + if not self._is_mutable(): + raise RelationDataError('cannot set relation data for {}'.format(self._entity.name)) + if not isinstance(value, str): + raise RelationDataError('relation data values must be strings') + + self._backend.relation_set(self.relation.id, key, value, self._is_app) + + # Don't load data unnecessarily if we're only updating. + if self._lazy_data is not None: + if value == '': + # Match the behavior of Juju, which is that setting the value to an + # empty string will remove the key entirely from the relation data. + del self._data[key] + else: + self._data[key] = value + + def __delitem__(self, key): + # Match the behavior of Juju, which is that setting the value to an empty + # string will remove the key entirely from the relation data. + self.__setitem__(key, '') + + +class ConfigData(LazyMapping): + + def __init__(self, backend): + self._backend = backend + + def _load(self): + return self._backend.config_get() + + +class StatusBase: + """Status values specific to applications and units. + + To access a status by name, see :meth:`StatusBase.from_name`, most use cases will just + directly use the child class to indicate their status. + """ + + _statuses = {} + name = None + + def __init__(self, message: str): + self.message = message + + def __new__(cls, *args, **kwargs): + if cls is StatusBase: + raise TypeError("cannot instantiate a base class") + return super().__new__(cls) + + def __eq__(self, other): + if not isinstance(self, type(other)): + return False + return self.message == other.message + + def __repr__(self): + return "{.__class__.__name__}({!r})".format(self, self.message) + + @classmethod + def from_name(cls, name: str, message: str): + if name == 'unknown': + # unknown is special + return UnknownStatus() + else: + return cls._statuses[name](message) + + @classmethod + def register(cls, child): + if child.name is None: + raise AttributeError('cannot register a Status which has no name') + cls._statuses[child.name] = child + return child + + +@StatusBase.register +class UnknownStatus(StatusBase): + """The unit status is unknown. + + A unit-agent has finished calling install, config-changed and start, but the + charm has not called status-set yet. + + """ + name = 'unknown' + + def __init__(self): + # Unknown status cannot be set and does not have a message associated with it. + super().__init__('') + + def __repr__(self): + return "UnknownStatus()" + + +@StatusBase.register +class ActiveStatus(StatusBase): + """The unit is ready. + + The unit believes it is correctly offering all the services it has been asked to offer. + """ + name = 'active' + + def __init__(self, message: str = ''): + super().__init__(message) + + +@StatusBase.register +class BlockedStatus(StatusBase): + """The unit requires manual intervention. + + An operator has to manually intervene to unblock the unit and let it proceed. + """ + name = 'blocked' + + +@StatusBase.register +class MaintenanceStatus(StatusBase): + """The unit is performing maintenance tasks. + + The unit is not yet providing services, but is actively doing work in preparation + for providing those services. This is a "spinning" state, not an error state. It + reflects activity on the unit itself, not on peers or related units. + + """ + name = 'maintenance' + + +@StatusBase.register +class WaitingStatus(StatusBase): + """A unit is unable to progress. + + The unit is unable to progress to an active state because an application to which + it is related is not running. + + """ + name = 'waiting' + + +class Resources: + """Object representing resources for the charm. + """ + + def __init__(self, names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._paths = {name: None for name in names} + + def fetch(self, name: str) -> Path: + """Fetch the resource from the controller or store. + + If successfully fetched, this returns a Path object to where the resource is stored + on disk, otherwise it raises a ModelError. + """ + if name not in self._paths: + raise RuntimeError('invalid resource name: {}'.format(name)) + if self._paths[name] is None: + self._paths[name] = Path(self._backend.resource_get(name)) + return self._paths[name] + + +class Pod: + """Represents the definition of a pod spec in Kubernetes models. + + Currently only supports simple access to setting the Juju pod spec via :attr:`.set_spec`. + """ + + def __init__(self, backend: '_ModelBackend'): + self._backend = backend + + def set_spec(self, spec: typing.Mapping, k8s_resources: typing.Mapping = None): + """Set the specification for pods that Juju should start in kubernetes. + + See `juju help-tool pod-spec-set` for details of what should be passed. + Args: + spec: The mapping defining the pod specification + k8s_resources: Additional kubernetes specific specification. + + Returns: + """ + if not self._backend.is_leader(): + raise ModelError('cannot set a pod spec as this unit is not a leader') + self._backend.pod_spec_set(spec, k8s_resources) + + +class StorageMapping(Mapping): + """Map of storage names to lists of Storage instances.""" + + def __init__(self, storage_names: typing.Iterable[str], backend: '_ModelBackend'): + self._backend = backend + self._storage_map = {storage_name: None for storage_name in storage_names} + + def __contains__(self, key: str): + return key in self._storage_map + + def __len__(self): + return len(self._storage_map) + + def __iter__(self): + return iter(self._storage_map) + + def __getitem__(self, storage_name: str) -> typing.List['Storage']: + storage_list = self._storage_map[storage_name] + if storage_list is None: + storage_list = self._storage_map[storage_name] = [] + for storage_id in self._backend.storage_list(storage_name): + storage_list.append(Storage(storage_name, storage_id, self._backend)) + return storage_list + + def request(self, storage_name: str, count: int = 1): + """Requests new storage instances of a given name. + + Uses storage-add tool to request additional storage. Juju will notify the unit + via <storage-name>-storage-attached events when it becomes available. + """ + if storage_name not in self._storage_map: + raise ModelError(('cannot add storage {!r}:' + ' it is not present in the charm metadata').format(storage_name)) + self._backend.storage_add(storage_name, count) + + +class Storage: + """"Represents a storage as defined in metadata.yaml + + Attributes: + name: Simple string name of the storage + id: The provider id for storage + """ + + def __init__(self, storage_name, storage_id, backend): + self.name = storage_name + self.id = storage_id + self._backend = backend + self._location = None + + @property + def location(self): + if self._location is None: + raw = self._backend.storage_get('{}/{}'.format(self.name, self.id), "location") + self._location = Path(raw) + return self._location + + +class ModelError(Exception): + """Base class for exceptions raised when interacting with the Model.""" + pass + + +class TooManyRelatedAppsError(ModelError): + """Raised by :meth:`Model.get_relation` if there is more than one related application.""" + + def __init__(self, relation_name, num_related, max_supported): + super().__init__('Too many remote applications on {} ({} > {})'.format( + relation_name, num_related, max_supported)) + self.relation_name = relation_name + self.num_related = num_related + self.max_supported = max_supported + + +class RelationDataError(ModelError): + """Raised by ``Relation.data[entity][key] = 'foo'`` if the data is invalid. + + This is raised if you're either trying to set a value to something that isn't a string, + or if you are trying to set a value in a bucket that you don't have access to. (eg, + another application/unit or setting your application data but you aren't the leader.) + """ + + +class RelationNotFoundError(ModelError): + """Backend error when querying juju for a given relation and that relation doesn't exist.""" + + +class InvalidStatusError(ModelError): + """Raised if trying to set an Application or Unit status to something invalid.""" + + +class _ModelBackend: + """Represents the connection between the Model representation and talking to Juju. + + Charm authors should not directly interact with the ModelBackend, it is a private + implementation of Model. + """ + + LEASE_RENEWAL_PERIOD = datetime.timedelta(seconds=30) + + def __init__(self, unit_name=None, model_name=None): + if unit_name is None: + self.unit_name = os.environ['JUJU_UNIT_NAME'] + else: + self.unit_name = unit_name + if model_name is None: + model_name = os.environ.get('JUJU_MODEL_NAME') + self.model_name = model_name + self.app_name = self.unit_name.split('/')[0] + + self._is_leader = None + self._leader_check_time = None + + def _run(self, *args, return_output=False, use_json=False): + kwargs = dict(stdout=PIPE, stderr=PIPE) + if use_json: + args += ('--format=json',) + try: + result = run(args, check=True, **kwargs) + except CalledProcessError as e: + raise ModelError(e.stderr) + if return_output: + if result.stdout is None: + return '' + else: + text = result.stdout.decode('utf8') + if use_json: + return json.loads(text) + else: + return text + + def relation_ids(self, relation_name): + relation_ids = self._run('relation-ids', relation_name, return_output=True, use_json=True) + return [int(relation_id.split(':')[-1]) for relation_id in relation_ids] + + def relation_list(self, relation_id): + try: + return self._run('relation-list', '-r', str(relation_id), + return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_get(self, relation_id, member_name, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_get must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'getting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-get', '-r', str(relation_id), '-', member_name] + if is_app: + args.append('--app') + + try: + return self._run(*args, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def relation_set(self, relation_id, key, value, is_app): + if not isinstance(is_app, bool): + raise TypeError('is_app parameter to relation_set must be a boolean') + + if is_app: + version = JujuVersion.from_environ() + if not version.has_app_data(): + raise RuntimeError( + 'setting application data is not supported on Juju version {}'.format(version)) + + args = ['relation-set', '-r', str(relation_id), '{}={}'.format(key, value)] + if is_app: + args.append('--app') + + try: + return self._run(*args) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def config_get(self): + return self._run('config-get', return_output=True, use_json=True) + + def is_leader(self): + """Obtain the current leadership status for the unit the charm code is executing on. + + The value is cached for the duration of a lease which is 30s in Juju. + """ + now = time.monotonic() + if self._leader_check_time is None: + check = True + else: + time_since_check = datetime.timedelta(seconds=now - self._leader_check_time) + check = (time_since_check > self.LEASE_RENEWAL_PERIOD or self._is_leader is None) + if check: + # Current time MUST be saved before running is-leader to ensure the cache + # is only used inside the window that is-leader itself asserts. + self._leader_check_time = now + self._is_leader = self._run('is-leader', return_output=True, use_json=True) + + return self._is_leader + + def resource_get(self, resource_name): + return self._run('resource-get', resource_name, return_output=True).strip() + + def pod_spec_set(self, spec, k8s_resources): + tmpdir = Path(tempfile.mkdtemp('-pod-spec-set')) + try: + spec_path = tmpdir / 'spec.json' + spec_path.write_text(json.dumps(spec)) + args = ['--file', str(spec_path)] + if k8s_resources: + k8s_res_path = tmpdir / 'k8s-resources.json' + k8s_res_path.write_text(json.dumps(k8s_resources)) + args.extend(['--k8s-resources', str(k8s_res_path)]) + self._run('pod-spec-set', *args) + finally: + shutil.rmtree(str(tmpdir)) + + def status_get(self, *, is_app=False): + """Get a status of a unit or an application. + + Args: + is_app: A boolean indicating whether the status should be retrieved for a unit + or an application. + """ + content = self._run( + 'status-get', '--include-data', '--application={}'.format(is_app), + use_json=True, + return_output=True) + # Unit status looks like (in YAML): + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # Application status looks like (in YAML): + # application-status: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + # units: + # uo/0: + # message: 'load: 0.28 0.26 0.26' + # status: active + # status-data: {} + + if is_app: + return {'status': content['application-status']['status'], + 'message': content['application-status']['message']} + else: + return content + + def status_set(self, status, message='', *, is_app=False): + """Set a status of a unit or an application. + + Args: + app: A boolean indicating whether the status should be set for a unit or an + application. + """ + if not isinstance(is_app, bool): + raise TypeError('is_app parameter must be boolean') + return self._run('status-set', '--application={}'.format(is_app), status, message) + + def storage_list(self, name): + return [int(s.split('/')[1]) for s in self._run('storage-list', name, + return_output=True, use_json=True)] + + def storage_get(self, storage_name_id, attribute): + return self._run('storage-get', '-s', storage_name_id, attribute, + return_output=True, use_json=True) + + def storage_add(self, name, count=1): + if not isinstance(count, int) or isinstance(count, bool): + raise TypeError('storage count must be integer, got: {} ({})'.format(count, + type(count))) + self._run('storage-add', '{}={}'.format(name, count)) + + def action_get(self): + return self._run('action-get', return_output=True, use_json=True) + + def action_set(self, results): + self._run('action-set', *["{}={}".format(k, v) for k, v in results.items()]) + + def action_log(self, message): + self._run('action-log', message) + + def action_fail(self, message=''): + self._run('action-fail', message) + + def application_version_set(self, version): + self._run('application-version-set', '--', version) + + def juju_log(self, level, message): + self._run('juju-log', '--log-level', level, message) + + def network_get(self, binding_name, relation_id=None): + """Return network info provided by network-get for a given binding. + + Args: + binding_name: A name of a binding (relation name or extra-binding name). + relation_id: An optional relation id to get network info for. + """ + cmd = ['network-get', binding_name] + if relation_id is not None: + cmd.extend(['-r', str(relation_id)]) + try: + return self._run(*cmd, return_output=True, use_json=True) + except ModelError as e: + if 'relation not found' in str(e): + raise RelationNotFoundError() from e + raise + + def add_metrics(self, metrics, labels=None): + cmd = ['add-metric'] + + if labels: + label_args = [] + for k, v in labels.items(): + _ModelBackendValidator.validate_metric_label(k) + _ModelBackendValidator.validate_label_value(k, v) + label_args.append('{}={}'.format(k, v)) + cmd.extend(['--labels', ','.join(label_args)]) + + metric_args = [] + for k, v in metrics.items(): + _ModelBackendValidator.validate_metric_key(k) + metric_value = _ModelBackendValidator.format_metric_value(v) + metric_args.append('{}={}'.format(k, metric_value)) + cmd.extend(metric_args) + self._run(*cmd) + + +class _ModelBackendValidator: + """Provides facilities for validating inputs and formatting them for model backends.""" + + METRIC_KEY_REGEX = re.compile(r'^[a-zA-Z](?:[a-zA-Z0-9-_]*[a-zA-Z0-9])?$') + + @classmethod + def validate_metric_key(cls, key): + if cls.METRIC_KEY_REGEX.match(key) is None: + raise ModelError( + 'invalid metric key {!r}: must match {}'.format( + key, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def validate_metric_label(cls, label_name): + if cls.METRIC_KEY_REGEX.match(label_name) is None: + raise ModelError( + 'invalid metric label name {!r}: must match {}'.format( + label_name, cls.METRIC_KEY_REGEX.pattern)) + + @classmethod + def format_metric_value(cls, value): + try: + decimal_value = decimal.Decimal.from_float(value) + except TypeError as e: + e2 = ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + raise e2 from e + if decimal_value.is_nan() or decimal_value.is_infinite() or decimal_value < 0: + raise ModelError('invalid metric value {!r} provided:' + ' must be a positive finite float'.format(value)) + return str(decimal_value) + + @classmethod + def validate_label_value(cls, label, value): + # Label values cannot be empty, contain commas or equal signs as those are + # used by add-metric as separators. + if not value: + raise ModelError( + 'metric label {} has an empty value, which is not allowed'.format(label)) + v = str(value) + if re.search('[,=]', v) is not None: + raise ModelError( + 'metric label values must not contain "," or "=": {}={!r}'.format(label, value)) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/storage.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/storage.py new file mode 100755 index 00000000..d4310ce1 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/storage.py @@ -0,0 +1,318 @@ +# Copyright 2019-2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import timedelta +import pickle +import shutil +import subprocess +import sqlite3 +import typing + +import yaml + + +class SQLiteStorage: + + DB_LOCK_TIMEOUT = timedelta(hours=1) + + def __init__(self, filename): + # The isolation_level argument is set to None such that the implicit + # transaction management behavior of the sqlite3 module is disabled. + self._db = sqlite3.connect(str(filename), + isolation_level=None, + timeout=self.DB_LOCK_TIMEOUT.total_seconds()) + self._setup() + + def _setup(self): + # Make sure that the database is locked until the connection is closed, + # not until the transaction ends. + self._db.execute("PRAGMA locking_mode=EXCLUSIVE") + c = self._db.execute("BEGIN") + c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='snapshot'") + if c.fetchone()[0] == 0: + # Keep in mind what might happen if the process dies somewhere below. + # The system must not be rendered permanently broken by that. + self._db.execute("CREATE TABLE snapshot (handle TEXT PRIMARY KEY, data BLOB)") + self._db.execute(''' + CREATE TABLE notice ( + sequence INTEGER PRIMARY KEY AUTOINCREMENT, + event_path TEXT, + observer_path TEXT, + method_name TEXT) + ''') + self._db.commit() + + def close(self): + self._db.close() + + def commit(self): + self._db.commit() + + # There's commit but no rollback. For abort to be supported, we'll need logic that + # can rollback decisions made by third-party code in terms of the internal state + # of objects that have been snapshotted, and hooks to let them know about it and + # take the needed actions to undo their logic until the last snapshot. + # This is doable but will increase significantly the chances for mistakes. + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + """Part of the Storage API, persist a snapshot data under the given handle. + + Args: + handle_path: The string identifying the snapshot. + snapshot_data: The data to be persisted. (as returned by Object.snapshot()). This + might be a dict/tuple/int, but must only contain 'simple' python types. + """ + # Use pickle for serialization, so the value remains portable. + raw_data = pickle.dumps(snapshot_data) + self._db.execute("REPLACE INTO snapshot VALUES (?, ?)", (handle_path, raw_data)) + + def load_snapshot(self, handle_path: str) -> typing.Any: + """Part of the Storage API, retrieve a snapshot that was previously saved. + + Args: + handle_path: The string identifying the snapshot. + Raises: + NoSnapshotError: if there is no snapshot for the given handle_path. + """ + c = self._db.cursor() + c.execute("SELECT data FROM snapshot WHERE handle=?", (handle_path,)) + row = c.fetchone() + if row: + return pickle.loads(row[0]) + raise NoSnapshotError(handle_path) + + def drop_snapshot(self, handle_path: str): + """Part of the Storage API, remove a snapshot that was previously saved. + + Dropping a snapshot that doesn't exist is treated as a no-op. + """ + self._db.execute("DELETE FROM snapshot WHERE handle=?", (handle_path,)) + + def list_snapshots(self) -> typing.Generator[str, None, None]: + """Return the name of all snapshots that are currently saved.""" + c = self._db.cursor() + c.execute("SELECT handle FROM snapshot") + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield row[0] + + def save_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, record an notice (event and observer)""" + self._db.execute('INSERT INTO notice VALUES (NULL, ?, ?, ?)', + (event_path, observer_path, method_name)) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str) -> None: + """Part of the Storage API, remove a notice that was previously recorded.""" + self._db.execute(''' + DELETE FROM notice + WHERE event_path=? + AND observer_path=? + AND method_name=? + ''', (event_path, observer_path, method_name)) + + def notices(self, event_path: typing.Optional[str]) ->\ + typing.Generator[typing.Tuple[str, str, str], None, None]: + """Part of the Storage API, return all notices that begin with event_path. + + Args: + event_path: If supplied, will only yield events that match event_path. If not + supplied (or None/'') will return all events. + Returns: + Iterable of (event_path, observer_path, method_name) tuples + """ + if event_path: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + WHERE event_path=? + ORDER BY sequence + ''', (event_path,)) + else: + c = self._db.execute(''' + SELECT event_path, observer_path, method_name + FROM notice + ORDER BY sequence + ''') + while True: + rows = c.fetchmany() + if not rows: + break + for row in rows: + yield tuple(row) + + +class JujuStorage: + """"Storing the content tracked by the Framework in Juju. + + This uses :class:`_JujuStorageBackend` to interact with state-get/state-set + as the way to store state for the framework and for components. + """ + + NOTICE_KEY = "#notices#" + + def __init__(self, backend: '_JujuStorageBackend' = None): + self._backend = backend + if backend is None: + self._backend = _JujuStorageBackend() + + def close(self): + return + + def commit(self): + return + + def save_snapshot(self, handle_path: str, snapshot_data: typing.Any) -> None: + self._backend.set(handle_path, snapshot_data) + + def load_snapshot(self, handle_path): + try: + content = self._backend.get(handle_path) + except KeyError: + raise NoSnapshotError(handle_path) + return content + + def drop_snapshot(self, handle_path): + self._backend.delete(handle_path) + + def save_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.append([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def drop_notice(self, event_path: str, observer_path: str, method_name: str): + notice_list = self._load_notice_list() + notice_list.remove([event_path, observer_path, method_name]) + self._save_notice_list(notice_list) + + def notices(self, event_path: str): + notice_list = self._load_notice_list() + for row in notice_list: + if row[0] != event_path: + continue + yield tuple(row) + + def _load_notice_list(self) -> typing.List[typing.Tuple[str]]: + try: + notice_list = self._backend.get(self.NOTICE_KEY) + except KeyError: + return [] + if notice_list is None: + return [] + return notice_list + + def _save_notice_list(self, notices: typing.List[typing.Tuple[str]]) -> None: + self._backend.set(self.NOTICE_KEY, notices) + + +class _SimpleLoader(getattr(yaml, 'CSafeLoader', yaml.SafeLoader)): + """Handle a couple basic python types. + + yaml.SafeLoader can handle all the basic int/float/dict/set/etc that we want. The only one + that it *doesn't* handle is tuples. We don't want to support arbitrary types, so we just + subclass SafeLoader and add tuples back in. + """ + # Taken from the example at: + # https://stackoverflow.com/questions/9169025/how-can-i-add-a-python-tuple-to-a-yaml-file-using-pyyaml + + construct_python_tuple = yaml.Loader.construct_python_tuple + + +_SimpleLoader.add_constructor( + u'tag:yaml.org,2002:python/tuple', + _SimpleLoader.construct_python_tuple) + + +class _SimpleDumper(getattr(yaml, 'CSafeDumper', yaml.SafeDumper)): + """Add types supported by 'marshal' + + YAML can support arbitrary types, but that is generally considered unsafe (like pickle). So + we want to only support dumping out types that are safe to load. + """ + + +_SimpleDumper.represent_tuple = yaml.Dumper.represent_tuple +_SimpleDumper.add_representer(tuple, _SimpleDumper.represent_tuple) + + +class _JujuStorageBackend: + """Implements the interface from the Operator framework to Juju's state-get/set/etc.""" + + @staticmethod + def is_available() -> bool: + """Check if Juju state storage is available. + + This checks if there is a 'state-get' executable in PATH. + """ + p = shutil.which('state-get') + return p is not None + + def set(self, key: str, value: typing.Any) -> None: + """Set a key to a given value. + + Args: + key: The string key that will be used to find the value later + value: Arbitrary content that will be returned by get(). + Raises: + CalledProcessError: if 'state-set' returns an error code. + """ + # default_flow_style=None means that it can use Block for + # complex types (types that have nested types) but use flow + # for simple types (like an array). Not all versions of PyYAML + # have the same default style. + encoded_value = yaml.dump(value, Dumper=_SimpleDumper, default_flow_style=None) + content = yaml.dump( + {key: encoded_value}, encoding='utf-8', default_style='|', + default_flow_style=False, + Dumper=_SimpleDumper) + subprocess.run(["state-set", "--file", "-"], input=content, check=True) + + def get(self, key: str) -> typing.Any: + """Get the bytes value associated with a given key. + + Args: + key: The string key that will be used to find the value + Raises: + CalledProcessError: if 'state-get' returns an error code. + """ + # We don't capture stderr here so it can end up in debug logs. + p = subprocess.run( + ["state-get", key], + stdout=subprocess.PIPE, + check=True, + ) + if p.stdout == b'' or p.stdout == b'\n': + raise KeyError(key) + return yaml.load(p.stdout, Loader=_SimpleLoader) + + def delete(self, key: str) -> None: + """Remove a key from being tracked. + + Args: + key: The key to stop storing + Raises: + CalledProcessError: if 'state-delete' returns an error code. + """ + subprocess.run(["state-delete", key], check=True) + + +class NoSnapshotError(Exception): + + def __init__(self, handle_path): + self.handle_path = handle_path + + def __str__(self): + return 'no snapshot data found for {} object'.format(self.handle_path) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/testing.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/testing.py new file mode 100755 index 00000000..b4b3fe07 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/testing.py @@ -0,0 +1,586 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import pathlib +from textwrap import dedent +import tempfile +import typing +import yaml +import weakref + +from ops import ( + charm, + framework, + model, + storage, +) + + +# OptionalYAML is something like metadata.yaml or actions.yaml. You can +# pass in a file-like object or the string directly. +OptionalYAML = typing.Optional[typing.Union[str, typing.TextIO]] + + +# noinspection PyProtectedMember +class Harness: + """This class represents a way to build up the model that will drive a test suite. + + The model that is created is from the viewpoint of the charm that you are testing. + + Example:: + + harness = Harness(MyCharm) + # Do initial setup here + relation_id = harness.add_relation('db', 'postgresql') + # Now instantiate the charm to see events as the model changes + harness.begin() + harness.add_relation_unit(relation_id, 'postgresql/0') + harness.update_relation_data(relation_id, 'postgresql/0', {'key': 'val'}) + # Check that charm has properly handled the relation_joined event for postgresql/0 + self.assertEqual(harness.charm. ...) + + Args: + charm_cls: The Charm class that you'll be testing. + meta: charm.CharmBase is a A string or file-like object containing the contents of + metadata.yaml. If not supplied, we will look for a 'metadata.yaml' file in the + parent directory of the Charm, and if not found fall back to a trivial + 'name: test-charm' metadata. + actions: A string or file-like object containing the contents of + actions.yaml. If not supplied, we will look for a 'actions.yaml' file in the + parent directory of the Charm. + """ + + def __init__( + self, + charm_cls: typing.Type[charm.CharmBase], + *, + meta: OptionalYAML = None, + actions: OptionalYAML = None): + # TODO: jam 2020-03-05 We probably want to take config as a parameter as well, since + # it would define the default values of config that the charm would see. + self._charm_cls = charm_cls + self._charm = None + self._charm_dir = 'no-disk-path' # this may be updated by _create_meta + self._lazy_resource_dir = None + self._meta = self._create_meta(meta, actions) + self._unit_name = self._meta.name + '/0' + self._framework = None + self._hooks_enabled = True + self._relation_id_counter = 0 + self._backend = _TestingModelBackend(self._unit_name, self._meta) + self._model = model.Model(self._meta, self._backend) + self._storage = storage.SQLiteStorage(':memory:') + self._framework = framework.Framework( + self._storage, self._charm_dir, self._meta, self._model) + + @property + def charm(self) -> charm.CharmBase: + """Return the instance of the charm class that was passed to __init__. + + Note that the Charm is not instantiated until you have called + :meth:`.begin()`. + """ + return self._charm + + @property + def model(self) -> model.Model: + """Return the :class:`~ops.model.Model` that is being driven by this Harness.""" + return self._model + + @property + def framework(self) -> framework.Framework: + """Return the Framework that is being driven by this Harness.""" + return self._framework + + @property + def _resource_dir(self) -> pathlib.Path: + if self._lazy_resource_dir is not None: + return self._lazy_resource_dir + + self.__resource_dir = tempfile.TemporaryDirectory() + self._lazy_resource_dir = pathlib.Path(self.__resource_dir.name) + self._finalizer = weakref.finalize(self, self.__resource_dir.cleanup) + return self._lazy_resource_dir + + def begin(self) -> None: + """Instantiate the Charm and start handling events. + + Before calling begin(), there is no Charm instance, so changes to the Model won't emit + events. You must call begin before :attr:`.charm` is valid. + """ + if self._charm is not None: + raise RuntimeError('cannot call the begin method on the harness more than once') + + # The Framework adds attributes to class objects for events, etc. As such, we can't re-use + # the original class against multiple Frameworks. So create a locally defined class + # and register it. + # TODO: jam 2020-03-16 We are looking to changes this to Instance attributes instead of + # Class attributes which should clean up this ugliness. The API can stay the same + class TestEvents(self._charm_cls.on.__class__): + pass + + TestEvents.__name__ = self._charm_cls.on.__class__.__name__ + + class TestCharm(self._charm_cls): + on = TestEvents() + + # Note: jam 2020-03-01 This is so that errors in testing say MyCharm has no attribute foo, + # rather than TestCharm has no attribute foo. + TestCharm.__name__ = self._charm_cls.__name__ + self._charm = TestCharm(self._framework) + + def _create_meta(self, charm_metadata, action_metadata): + """Create a CharmMeta object. + + Handle the cases where a user doesn't supply explicit metadata snippets. + """ + filename = inspect.getfile(self._charm_cls) + charm_dir = pathlib.Path(filename).parents[1] + + if charm_metadata is None: + metadata_path = charm_dir / 'metadata.yaml' + if metadata_path.is_file(): + charm_metadata = metadata_path.read_text() + self._charm_dir = charm_dir + else: + # The simplest of metadata that the framework can support + charm_metadata = 'name: test-charm' + elif isinstance(charm_metadata, str): + charm_metadata = dedent(charm_metadata) + + if action_metadata is None: + actions_path = charm_dir / 'actions.yaml' + if actions_path.is_file(): + action_metadata = actions_path.read_text() + self._charm_dir = charm_dir + elif isinstance(action_metadata, str): + action_metadata = dedent(action_metadata) + + return charm.CharmMeta.from_yaml(charm_metadata, action_metadata) + + def add_oci_resource(self, resource_name: str, + contents: typing.Mapping[str, str] = None) -> None: + """Add oci resources to the backend. + + This will register an oci resource and create a temporary file for processing metadata + about the resource. A default set of values will be used for all the file contents + unless a specific contents dict is provided. + + Args: + resource_name: Name of the resource to add custom contents to. + contents: Optional custom dict to write for the named resource. + """ + if not contents: + contents = {'registrypath': 'registrypath', + 'username': 'username', + 'password': 'password', + } + if resource_name not in self._meta.resources.keys(): + raise RuntimeError('Resource {} is not a defined resources'.format(resource_name)) + if self._meta.resources[resource_name].type != "oci-image": + raise RuntimeError('Resource {} is not an OCI Image'.format(resource_name)) + resource_dir = self._resource_dir / resource_name + resource_dir.mkdir(exist_ok=True) + resource_file = resource_dir / "contents.yaml" + with resource_file.open('wt', encoding='utf8') as resource_yaml: + yaml.dump(contents, resource_yaml) + self._backend._resources_map[resource_name] = resource_file + + def populate_oci_resources(self) -> None: + """Populate all OCI resources.""" + for name, data in self._meta.resources.items(): + if data.type == "oci-image": + self.add_oci_resource(name) + + def disable_hooks(self) -> None: + """Stop emitting hook events when the model changes. + + This can be used by developers to stop changes to the model from emitting events that + the charm will react to. Call :meth:`.enable_hooks` + to re-enable them. + """ + self._hooks_enabled = False + + def enable_hooks(self) -> None: + """Re-enable hook events from charm.on when the model is changed. + + By default hook events are enabled once you call :meth:`.begin`, + but if you have used :meth:`.disable_hooks`, this can be used to + enable them again. + """ + self._hooks_enabled = True + + def _next_relation_id(self): + rel_id = self._relation_id_counter + self._relation_id_counter += 1 + return rel_id + + def add_relation(self, relation_name: str, remote_app: str) -> int: + """Declare that there is a new relation between this app and `remote_app`. + + Args: + relation_name: The relation on Charm that is being related to + remote_app: The name of the application that is being related to + + Return: + The relation_id created by this add_relation. + """ + rel_id = self._next_relation_id() + self._backend._relation_ids_map.setdefault(relation_name, []).append(rel_id) + self._backend._relation_names[rel_id] = relation_name + self._backend._relation_list_map[rel_id] = [] + self._backend._relation_data[rel_id] = { + remote_app: {}, + self._backend.unit_name: {}, + self._backend.app_name: {}, + } + # Reload the relation_ids list + if self._model is not None: + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return rel_id + relation = self._model.get_relation(relation_name, rel_id) + app = self._model.get_app(remote_app) + self._charm.on[relation_name].relation_created.emit( + relation, app) + return rel_id + + def add_relation_unit(self, relation_id: int, remote_unit_name: str) -> None: + """Add a new unit to a relation. + + Example:: + + rel_id = harness.add_relation('db', 'postgresql') + harness.add_relation_unit(rel_id, 'postgresql/0') + + This will trigger a `relation_joined` event and a `relation_changed` event. + + Args: + relation_id: The integer relation identifier (as returned by add_relation). + remote_unit_name: A string representing the remote unit that is being added. + Return: + None + """ + self._backend._relation_list_map[relation_id].append(remote_unit_name) + self._backend._relation_data[relation_id][remote_unit_name] = {} + relation_name = self._backend._relation_names[relation_id] + # Make sure that the Model reloads the relation_list for this relation_id, as well as + # reloading the relation data for this unit. + if self._model is not None: + remote_unit = self._model.get_unit(remote_unit_name) + relation = self._model.get_relation(relation_name, relation_id) + unit_cache = relation.data.get(remote_unit, None) + if unit_cache is not None: + unit_cache._invalidate() + self._model.relations._invalidate(relation_name) + if self._charm is None or not self._hooks_enabled: + return + self._charm.on[relation_name].relation_joined.emit( + relation, remote_unit.app, remote_unit) + + def get_relation_data(self, relation_id: int, app_or_unit: str) -> typing.Mapping: + """Get the relation data bucket for a single app or unit in a given relation. + + This ignores all of the safety checks of who can and can't see data in relations (eg, + non-leaders can't read their own application's relation data because there are no events + that keep that data up-to-date for the unit). + + Args: + relation_id: The relation whose content we want to look at. + app_or_unit: The name of the application or unit whose data we want to read + Return: + a dict containing the relation data for `app_or_unit` or None. + Raises: + KeyError: if relation_id doesn't exist + """ + return self._backend._relation_data[relation_id].get(app_or_unit, None) + + def get_workload_version(self) -> str: + """Read the workload version that was set by the unit.""" + return self._backend._workload_version + + def set_model_name(self, name: str) -> None: + """Set the name of the Model that this is representing. + + This cannot be called once begin() has been called. But it lets you set the value that + will be returned by Model.name. + """ + if self._charm is not None: + raise RuntimeError('cannot set the Model name after begin()') + self._backend.model_name = name + + def update_relation_data( + self, + relation_id: int, + app_or_unit: str, + key_values: typing.Mapping, + ) -> None: + """Update the relation data for a given unit or application in a given relation. + + This also triggers the `relation_changed` event for this relation_id. + + Args: + relation_id: The integer relation_id representing this relation. + app_or_unit: The unit or application name that is being updated. + This can be the local or remote application. + key_values: Each key/value will be updated in the relation data. + """ + relation_name = self._backend._relation_names[relation_id] + relation = self._model.get_relation(relation_name, relation_id) + if '/' in app_or_unit: + entity = self._model.get_unit(app_or_unit) + else: + entity = self._model.get_app(app_or_unit) + rel_data = relation.data.get(entity, None) + if rel_data is not None: + # rel_data may have cached now-stale data, so _invalidate() it. + # Note, this won't cause the data to be loaded if it wasn't already. + rel_data._invalidate() + + new_values = self._backend._relation_data[relation_id][app_or_unit].copy() + for k, v in key_values.items(): + if v == '': + new_values.pop(k, None) + else: + new_values[k] = v + self._backend._relation_data[relation_id][app_or_unit] = new_values + + if app_or_unit == self._model.unit.name: + # No events for our own unit + return + if app_or_unit == self._model.app.name: + # updating our own app only generates an event if it is a peer relation and we + # aren't the leader + is_peer = self._meta.relations[relation_name].role.is_peer() + if not is_peer: + return + if self._model.unit.is_leader(): + return + self._emit_relation_changed(relation_id, app_or_unit) + + def _emit_relation_changed(self, relation_id, app_or_unit): + if self._charm is None or not self._hooks_enabled: + return + rel_name = self._backend._relation_names[relation_id] + relation = self.model.get_relation(rel_name, relation_id) + if '/' in app_or_unit: + app_name = app_or_unit.split('/')[0] + unit_name = app_or_unit + app = self.model.get_app(app_name) + unit = self.model.get_unit(unit_name) + args = (relation, app, unit) + else: + app_name = app_or_unit + app = self.model.get_app(app_name) + args = (relation, app) + self._charm.on[rel_name].relation_changed.emit(*args) + + def update_config( + self, + key_values: typing.Mapping[str, str] = None, + unset: typing.Iterable[str] = (), + ) -> None: + """Update the config as seen by the charm. + + This will trigger a `config_changed` event. + + Args: + key_values: A Mapping of key:value pairs to update in config. + unset: An iterable of keys to remove from Config. (Note that this does + not currently reset the config values to the default defined in config.yaml.) + """ + config = self._backend._config + if key_values is not None: + for key, value in key_values.items(): + config[key] = value + for key in unset: + config.pop(key, None) + # NOTE: jam 2020-03-01 Note that this sort of works "by accident". Config + # is a LazyMapping, but its _load returns a dict and this method mutates + # the dict that Config is caching. Arguably we should be doing some sort + # of charm.framework.model.config._invalidate() + if self._charm is None or not self._hooks_enabled: + return + self._charm.on.config_changed.emit() + + def set_leader(self, is_leader: bool = True) -> None: + """Set whether this unit is the leader or not. + + If this charm becomes a leader then `leader_elected` will be triggered. + + Args: + is_leader: True/False as to whether this unit is the leader. + """ + was_leader = self._backend._is_leader + self._backend._is_leader = is_leader + # Note: jam 2020-03-01 currently is_leader is cached at the ModelBackend level, not in + # the Model objects, so this automatically gets noticed. + if is_leader and not was_leader and self._charm is not None and self._hooks_enabled: + self._charm.on.leader_elected.emit() + + def _get_backend_calls(self, reset: bool = True) -> list: + """Return the calls that we have made to the TestingModelBackend. + + This is useful mostly for testing the framework itself, so that we can assert that we + do/don't trigger extra calls. + + Args: + reset: If True, reset the calls list back to empty, if false, the call list is + preserved. + Return: + ``[(call1, args...), (call2, args...)]`` + """ + calls = self._backend._calls.copy() + if reset: + self._backend._calls.clear() + return calls + + +def _record_calls(cls): + """Replace methods on cls with methods that record that they have been called. + + Iterate all attributes of cls, and for public methods, replace them with a wrapped method + that records the method called along with the arguments and keyword arguments. + """ + for meth_name, orig_method in cls.__dict__.items(): + if meth_name.startswith('_'): + continue + + def decorator(orig_method): + def wrapped(self, *args, **kwargs): + full_args = (orig_method.__name__,) + args + if kwargs: + full_args = full_args + (kwargs,) + self._calls.append(full_args) + return orig_method(self, *args, **kwargs) + return wrapped + + setattr(cls, meth_name, decorator(orig_method)) + return cls + + +@_record_calls +class _TestingModelBackend: + """This conforms to the interface for ModelBackend but provides canned data. + + DO NOT use this class directly, it is used by `Harness`_ to drive the model. + `Harness`_ is responsible for maintaining the internal consistency of the values here, + as the only public methods of this type are for implementing ModelBackend. + """ + + def __init__(self, unit_name, meta): + self.unit_name = unit_name + self.app_name = self.unit_name.split('/')[0] + self.model_name = None + self._calls = [] + self._meta = meta + self._is_leader = None + self._relation_ids_map = {} # relation name to [relation_ids,...] + self._relation_names = {} # reverse map from relation_id to relation_name + self._relation_list_map = {} # relation_id: [unit_name,...] + self._relation_data = {} # {relation_id: {name: data}} + self._config = {} + self._is_leader = False + self._resources_map = {} + self._pod_spec = None + self._app_status = {'status': 'unknown', 'message': ''} + self._unit_status = {'status': 'maintenance', 'message': ''} + self._workload_version = None + + def relation_ids(self, relation_name): + try: + return self._relation_ids_map[relation_name] + except KeyError as e: + if relation_name not in self._meta.relations: + raise model.ModelError('{} is not a known relation'.format(relation_name)) from e + return [] + + def relation_list(self, relation_id): + try: + return self._relation_list_map[relation_id] + except KeyError as e: + raise model.RelationNotFoundError from e + + def relation_get(self, relation_id, member_name, is_app): + if is_app and '/' in member_name: + member_name = member_name.split('/')[0] + if relation_id not in self._relation_data: + raise model.RelationNotFoundError() + return self._relation_data[relation_id][member_name].copy() + + def relation_set(self, relation_id, key, value, is_app): + relation = self._relation_data[relation_id] + if is_app: + bucket_key = self.app_name + else: + bucket_key = self.unit_name + if bucket_key not in relation: + relation[bucket_key] = {} + bucket = relation[bucket_key] + if value == '': + bucket.pop(key, None) + else: + bucket[key] = value + + def config_get(self): + return self._config + + def is_leader(self): + return self._is_leader + + def application_version_set(self, version): + self._workload_version = version + + def resource_get(self, resource_name): + return self._resources_map[resource_name] + + def pod_spec_set(self, spec, k8s_resources): + self._pod_spec = (spec, k8s_resources) + + def status_get(self, *, is_app=False): + if is_app: + return self._app_status + else: + return self._unit_status + + def status_set(self, status, message='', *, is_app=False): + if is_app: + self._app_status = {'status': status, 'message': message} + else: + self._unit_status = {'status': status, 'message': message} + + def storage_list(self, name): + raise NotImplementedError(self.storage_list) + + def storage_get(self, storage_name_id, attribute): + raise NotImplementedError(self.storage_get) + + def storage_add(self, name, count=1): + raise NotImplementedError(self.storage_add) + + def action_get(self): + raise NotImplementedError(self.action_get) + + def action_set(self, results): + raise NotImplementedError(self.action_set) + + def action_log(self, message): + raise NotImplementedError(self.action_log) + + def action_fail(self, message=''): + raise NotImplementedError(self.action_fail) + + def network_get(self, endpoint_name, relation_id=None): + raise NotImplementedError(self.network_get) diff --git a/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/version.py b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/version.py new file mode 100644 index 00000000..15e54785 --- /dev/null +++ b/hackfest_firewall_pnf/charms/vyos-config-src/lib/ops/version.py @@ -0,0 +1,50 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +from pathlib import Path + +__all__ = ('version',) + +_FALLBACK = '0.8' # this gets bumped after release + + +def _get_version(): + version = _FALLBACK + ".dev0+unknown" + + p = Path(__file__).parent + if (p.parent / '.git').exists(): + try: + proc = subprocess.run( + ['git', 'describe', '--tags', '--dirty'], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + cwd=p, + check=True) + except Exception: + pass + else: + version = proc.stdout.strip().decode('utf8') + if '-' in version: + # version will look like <tag>-<#commits>-g<hex>[-dirty] + # in terms of PEP 440, the tag we'll make sure is a 'public version identifier'; + # everything after the first - needs to be a 'local version' + public, local = version.split('-', 1) + version = public + '+' + local.replace('-', '.') + # version now <tag>+<#commits>.g<hex>[.dirty] + # which is PEP440-compliant (as long as <tag> is :-) + return version + + +version = _get_version() -- GitLab