Major improvement in OSM charms 85/10385/17
authorDavid Garcia <david.garcia@canonical.com>
Wed, 24 Feb 2021 12:48:22 +0000 (13:48 +0100)
committergarciadav <david.garcia@canonical.com>
Fri, 5 Mar 2021 10:04:33 +0000 (11:04 +0100)
- Adapt all new operator charms to use the same pattern. They are all
using now this library that encapsulates the common logic for all
charms: https://github.com/davigar15/ops-lib-charmed-osm. That will be
eventually moved to gitlab, when it has a PyPI repository available
- Add unit tests to all charms
- Modify installer and bundles to point to the new charms
- Improve the build.sh script for building the charms

Change-Id: I0896ceb082d1b6a76b3560c07482a4135a220a3f
Signed-off-by: David Garcia <david.garcia@canonical.com>
115 files changed:
devops-stages/stage-test.sh
installers/charm/build.sh
installers/charm/bundles/osm-ha/bundle.yaml
installers/charm/bundles/osm/bundle.yaml
installers/charm/generate_bundle.py
installers/charm/grafana/.gitignore
installers/charm/grafana/.jujuignore [new file with mode: 0644]
installers/charm/grafana/.yamllint.yaml
installers/charm/grafana/requirements-test.txt [new file with mode: 0644]
installers/charm/grafana/requirements.txt
installers/charm/grafana/src/charm.py
installers/charm/grafana/src/pod_spec.py
installers/charm/grafana/tests/test_charm.py
installers/charm/grafana/tests/test_pod_spec.py
installers/charm/grafana/tox.ini
installers/charm/keystone/.gitignore
installers/charm/keystone/.jujuignore [new file with mode: 0644]
installers/charm/keystone/.yamllint.yaml
installers/charm/keystone/config.yaml
installers/charm/keystone/metadata.yaml
installers/charm/keystone/requirements-test.txt [new file with mode: 0644]
installers/charm/keystone/requirements.txt
installers/charm/keystone/src/charm.py
installers/charm/keystone/tests/__init__.py
installers/charm/keystone/tests/test_charm.py
installers/charm/keystone/tox.ini
installers/charm/lcm/.gitignore
installers/charm/lcm/.jujuignore [new file with mode: 0644]
installers/charm/lcm/.yamllint.yaml
installers/charm/lcm/config.yaml
installers/charm/lcm/metadata.yaml
installers/charm/lcm/requirements-test.txt [new file with mode: 0644]
installers/charm/lcm/requirements.txt
installers/charm/lcm/src/charm.py
installers/charm/lcm/src/pod_spec.py
installers/charm/lcm/tests/test_charm.py
installers/charm/lcm/tox.ini
installers/charm/local_osm_bundle.yaml [new file with mode: 0644]
installers/charm/local_osm_ha_bundle.yaml [new file with mode: 0644]
installers/charm/mon/.gitignore
installers/charm/mon/.jujuignore [new file with mode: 0644]
installers/charm/mon/.yamllint.yaml
installers/charm/mon/config.yaml
installers/charm/mon/requirements-test.txt [new file with mode: 0644]
installers/charm/mon/requirements.txt
installers/charm/mon/src/charm.py
installers/charm/mon/src/pod_spec.py
installers/charm/mon/tests/test_charm.py
installers/charm/mon/tests/test_pod_spec.py
installers/charm/mon/tox.ini
installers/charm/mongodb-exporter/tests/test_charm.py
installers/charm/nbi/.gitignore
installers/charm/nbi/.jujuignore [new file with mode: 0644]
installers/charm/nbi/.yamllint.yaml
installers/charm/nbi/metadata.yaml
installers/charm/nbi/requirements-test.txt [new file with mode: 0644]
installers/charm/nbi/requirements.txt
installers/charm/nbi/src/charm.py
installers/charm/nbi/src/pod_spec.py
installers/charm/nbi/tests/test_charm.py
installers/charm/nbi/tox.ini
installers/charm/ng-ui/.gitignore
installers/charm/ng-ui/.jujuignore [new file with mode: 0644]
installers/charm/ng-ui/.yamllint.yaml
installers/charm/ng-ui/metadata.yaml
installers/charm/ng-ui/requirements-test.txt [new file with mode: 0644]
installers/charm/ng-ui/requirements.txt
installers/charm/ng-ui/src/charm.py
installers/charm/ng-ui/src/pod_spec.py
installers/charm/ng-ui/tests/__init__.py
installers/charm/ng-ui/tests/test_charm.py
installers/charm/ng-ui/tox.ini
installers/charm/pla/.gitignore
installers/charm/pla/.jujuignore [new file with mode: 0644]
installers/charm/pla/.yamllint.yaml
installers/charm/pla/config.yaml
installers/charm/pla/metadata.yaml
installers/charm/pla/requirements-test.txt [new file with mode: 0644]
installers/charm/pla/requirements.txt
installers/charm/pla/src/charm.py
installers/charm/pla/tests/__init__.py
installers/charm/pla/tests/test_charm.py
installers/charm/pla/tox.ini
installers/charm/pol/.gitignore
installers/charm/pol/.jujuignore [new file with mode: 0644]
installers/charm/pol/.yamllint.yaml
installers/charm/pol/requirements-test.txt [new file with mode: 0644]
installers/charm/pol/requirements.txt
installers/charm/pol/src/charm.py
installers/charm/pol/src/pod_spec.py
installers/charm/pol/tests/test_charm.py
installers/charm/pol/tox.ini
installers/charm/prometheus/.gitignore
installers/charm/prometheus/.jujuignore [new file with mode: 0644]
installers/charm/prometheus/.yamllint.yaml
installers/charm/prometheus/config.yaml
installers/charm/prometheus/metadata.yaml
installers/charm/prometheus/requirements-test.txt [new file with mode: 0644]
installers/charm/prometheus/requirements.txt
installers/charm/prometheus/src/charm.py
installers/charm/prometheus/tests/test_charm.py
installers/charm/prometheus/tests/test_pod_spec.py
installers/charm/prometheus/tox.ini
installers/charm/release_edge.sh
installers/charm/ro/.gitignore
installers/charm/ro/.jujuignore [new file with mode: 0644]
installers/charm/ro/.yamllint.yaml
installers/charm/ro/metadata.yaml
installers/charm/ro/requirements-test.txt [new file with mode: 0644]
installers/charm/ro/requirements.txt
installers/charm/ro/src/charm.py
installers/charm/ro/src/pod_spec.py
installers/charm/ro/tests/test_charm.py
installers/charm/ro/tox.ini
installers/charmed_install.sh

index 3e5196f..bea5479 100755 (executable)
@@ -18,7 +18,7 @@ CURRENT_DIR=`pwd`
 
 # Execute tests for charms
 CHARM_PATH="./installers/charm"
-CHARM_NAMES="keystone lcm mon nbi ng-ui pla pol prometheus ro"
+CHARM_NAMES="keystone lcm mon nbi ng-ui pla pol prometheus ro grafana"
 for charm in $CHARM_NAMES; do
     cd $CHARM_PATH/$charm
     tox --recreate
index b5c0c0b..e7649d0 100755 (executable)
@@ -17,21 +17,29 @@ function build() {
     cd $1 && tox -e build && cd ..
 }
 
-build 'lcm-k8s'
-build 'mon-k8s'
-build 'nbi-k8s'
-build 'pol-k8s'
-build 'ro-k8s'
+# reactive_charms=""
+# for charm_directory in $reactive_charms; do
+#     echo "Building charm $charm_directory..."
+#     cd $charm_directory
+#     charmcraft build
+#     cd ..
+# done
+
+# build 'lcm-k8s'
+# build 'mon-k8s'
+# build 'nbi-k8s'
+# build 'pol-k8s'
+# build 'ro-k8s'
 # build 'ui-k8s'
 
-charms="nbi pla pol mon lcm ng-ui keystone"
+charms="ro nbi pla pol mon lcm ng-ui keystone grafana prometheus keystone mariadb-k8s mongodb-k8s zookeeper-k8s kafka-k8s"
 if [ -z `which charmcraft` ]; then
-    sudo snap install charmcraft --beta
+    sudo snap install charmcraft --edge
 fi
 
 for charm_directory in $charms; do
     echo "Building charm $charm_directory..."
-    cd $charm_directory
-    charmcraft build
-    cd ..
+    cd $charm_directory
+    build $charm_directory
+    cd ..
 done
index ff666c0..8069543 100644 (file)
@@ -22,9 +22,6 @@ applications:
     storage:
       database: 100M
     options:
-      # client-port: 2181
-      server-port: 2888
-      leader-election-port: 3888
       zookeeper-units: 3
     annotations:
       gui-x: 0
@@ -40,10 +37,6 @@ applications:
       password: manopw
       root_password: osm4u
       user: mano
-      database: database
-      mysql_port: "3306"
-      query-cache-type: "OFF"
-      query-cache-size: 0
       ha-mode: true
     annotations:
       gui-x: -250
@@ -54,13 +47,10 @@ applications:
     scale: 3
     series: kubernetes
     storage:
-      database: 200M
+      database: 100M
     options:
-      advertised-hostname: "kafka-k8s"
-      advertised-port: 9092
-      kafka-units: 3
       zookeeper-units: 3
-      zookeeper-service-name: zookeeper-k8s-endpoints
+      kafka-units: 3
     annotations:
       gui-x: 0
       gui-y: 300
@@ -70,183 +60,142 @@ applications:
     scale: 3
     series: kubernetes
     storage:
-      database: 300M
+      database: 50M
     options:
-      advertised-port: 27017
       replica-set: rs0
       namespace: osm
-      service-name: mongodb-k8s-endpoints
-      cluster-domain: cluster.local
       enable-sidecar: true
     annotations:
       gui-x: 0
       gui-y: 50
-  nbi-k8s:
-    charm: "%(prefix)s/nbi-k8s%(suffix)s"
+  nbi:
+    charm: "cs:~charmed-osm/nbi-5"
     scale: 3
     series: kubernetes
-    storage:
-      packages: 100M
     options:
-      log_level: "INFO"
-      DATABASE_COMMONKEY: osm
-      auth-backend: keystone
+      database_commonkey: osm
+      auth_backend: keystone
+      log_level: DEBUG
     annotations:
       gui-x: 0
       gui-y: -200
-  ro-k8s:
-    charm: "%(prefix)s/ro-k8s%(suffix)s"
+  ro:
+    charm: "cs:~charmed-osm/ro-0"
     scale: 3
     series: kubernetes
-    storage:
-      log: 50M
     options:
-      vim_database: "mano_vim_db"
-      ro_database: "mano_db"
-      OPENMANO_TENANT: "osm"
+      log_level: DEBUG
     annotations:
       gui-x: -250
       gui-y: 300
   ng-ui:
-    charm: "%(prefix)s/ng-ui%(suffix)s"
+    charm: "cs:~charmed-osm/ng-ui-16"
     scale: 3
     series: kubernetes
-    options:
-      port: 80
-      https_port: 443
-      server_name: localhost
-      client_max_body_size: 15M
     annotations:
       gui-x: 500
       gui-y: 100
-  lcm-k8s:
-    charm: "%(prefix)s/lcm-k8s%(suffix)s"
+  lcm:
+    charm: "cs:~charmed-osm/lcm-0"
     scale: 3
     series: kubernetes
-    storage:
-      packages: 100M
     options:
-      vca_host: vca
-      vca_port: 17070
-      vca_user: admin
-      vca_password: secret
-      vca_pubkey: pubkey
-      vca_cacert: cacert
-      use_external_vca: true
-      DATABASE_COMMONKEY: osm
+      database_commonkey: osm
+      log_level: DEBUG
     annotations:
       gui-x: -250
       gui-y: 50
-  mon-k8s:
-    charm: "%(prefix)s/mon-k8s%(suffix)s"
+  mon:
+    charm: "cs:~charmed-osm/mon-0"
     scale: 1
     series: kubernetes
-    storage:
-      database: 100M
     options:
-      OSMMON_OPENSTACK_DEFAULT_GRANULARITY: 300
-      OSMMON_GLOBAL_REQUEST_TIMEOUT: 10
-      OSMMON_GLOBAL_LOGLEVEL: INFO
-      OSMMON_DATABASE_COMMONKEY: osm
-      OSMMON_COLLECTOR_INTERVAL: 30
-      OSMMON_EVALUATOR_INTERVAL: 30
-      vca_host: vca
-      vca_user: admin
-      vca_password: secret
-      vca_cacert: cacert
-      use_external_vca: true
+      database_commonkey: osm
+      log_level: DEBUG
     annotations:
       gui-x: 250
       gui-y: 50
-  pol-k8s:
-    charm: "%(prefix)s/pol-k8s%(suffix)s"
+  pol:
+    charm: "cs:~charmed-osm/pol-0"
     scale: 3
     series: kubernetes
-    storage:
-      database: 100M
     options:
-      log_level: INFO
+      log_level: DEBUG
     annotations:
       gui-x: -250
       gui-y: 550
   pla:
-    charm: "%(prefix)s/pla%(suffix)s"
+    charm: "cs:~charmed-osm/pla-6"
     scale: 3
     series: kubernetes
+    options:
+      log_level: DEBUG
     annotations:
       gui-x: 500
       gui-y: -200
-  prometheus-k8s:
-    charm: "cs:~charmed-osm/prometheus-k8s"
-    channel: "stable"
+  prometheus:
+    charm: "cs:~charmed-osm/prometheus-0"
     scale: 1
     series: kubernetes
     storage:
-      database: 100M
+      data: 50M
     options:
-      advertised-port: 9090
-      web-subpath: /
-      default-target: "mon-k8s:8000"
+      default-target: "mon:8000"
     annotations:
       gui-x: 250
       gui-y: 300
-  grafana-k8s:
-    charm: "cs:~charmed-osm/grafana-k8s"
-    channel: "stable"
+  grafana:
+    charm: "cs:~charmed-osm/grafana-0"
     scale: 3
     series: kubernetes
     annotations:
       gui-x: 250
       gui-y: 550
   keystone:
-    charm: '%(prefix)s/keystone%(suffix)s'
-    channel: '%(channel)s'
+    charm: "cs:~charmed-osm/keystone-4"
     scale: 1
     series: kubernetes
     annotations:
       gui-x: -250
       gui-y: 550
-
 relations:
-  - - "kafka-k8s:zookeeper"
-    - "zookeeper-k8s:zookeeper"
-  - - "ro-k8s:mysql"
-    - "mariadb-k8s:mysql"
-  - - "nbi-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "nbi-k8s:kafka"
-    - "kafka-k8s:kafka"
-  - - "lcm-k8s:kafka"
-    - "kafka-k8s:kafka"
-  - - "lcm-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "mon-k8s:kafka"
-    - "kafka-k8s:kafka"
-  - - "mon-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "pol-k8s:kafka"
-    - "kafka-k8s:kafka"
-  - - "pol-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "lcm-k8s:ro"
-    - "ro-k8s:ro"
-  - - "prometheus-k8s:prometheus"
-    - "mon-k8s:prometheus"
-  - - "grafana-k8s:prometheus"
-    - "prometheus-k8s:prometheus"
-  - - "prometheus-k8s:prometheus"
-    - "nbi-k8s:prometheus"
-  - - "pla:kafka"
-    - "kafka-k8s:kafka"
-  - - "pla:mongo"
-    - "mongodb-k8s:mongo"
-  - - 'ng-ui:nbi'
-    - 'nbi-k8s:nbi'
-  - - 'keystone:db'
-    - 'mariadb-k8s:mysql'
-  - - 'keystone:keystone'
-    - 'nbi-k8s:keystone'
-  - - "ro-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "ro-k8s:kafka"
-    - "kafka-k8s:kafka"
+  - - grafana:prometheus
+    - prometheus:prometheus
+  - - kafka-k8s:zookeeper
+    - zookeeper-k8s:zookeeper
+  - - keystone:db
+    - mariadb-k8s:mysql
+  - - lcm:kafka
+    - kafka-k8s:kafka
+  - - lcm:mongodb
+    - mongodb-k8s:mongo
+  - - ro:ro
+    - lcm:ro
+  - - ro:kafka
+    - kafka-k8s:kafka
+  - - ro:mongodb
+    - mongodb-k8s:mongo
+  - - pol:kafka
+    - kafka-k8s:kafka
+  - - pol:mongodb
+    - mongodb-k8s:mongo
+  - - mon:mongodb
+    - mongodb-k8s:mongo
+  - - mon:kafka
+    - kafka-k8s:kafka
+  - - pla:kafka
+    - kafka-k8s:kafka
+  - - pla:mongodb
+    - mongodb-k8s:mongo
+  - - nbi:mongodb
+    - mongodb-k8s:mongo
+  - - nbi:kafka
+    - kafka-k8s:kafka
+  - - nbi:prometheus
+    - prometheus:prometheus
+  - - nbi:keystone
+    - keystone:keystone
+  - - mon:prometheus
+    - prometheus:prometheus
+  - - ng-ui:nbi
+    - nbi:nbi
index f1a846c..58a9707 100644 (file)
@@ -21,11 +21,6 @@ applications:
     series: kubernetes
     storage:
       database: 100M
-    options:
-      # client-port: 2181
-      server-port: 2888
-      leader-election-port: 3888
-      zookeeper-units: 1
     annotations:
       gui-x: 0
       gui-y: 550
@@ -40,11 +35,6 @@ applications:
       password: manopw
       root_password: osm4u
       user: mano
-      database: database
-      mysql_port: "3306"
-      query-cache-type: "OFF"
-      query-cache-size: 0
-      ha-mode: false
     annotations:
       gui-x: -250
       gui-y: -200
@@ -55,12 +45,6 @@ applications:
     series: kubernetes
     storage:
       database: 100M
-    options:
-      advertised-hostname: "kafka-k8s"
-      advertised-port: 9092
-      kafka-units: 1
-      zookeeper-units: 1
-      zookeeper-service-name: zookeeper-k8s-endpoints
     annotations:
       gui-x: 0
       gui-y: 300
@@ -72,180 +56,140 @@ applications:
     storage:
       database: 50M
     options:
-      advertised-port: 27017
       replica-set: rs0
       namespace: osm
-      service-name: mongodb-k8s-endpoints
-      cluster-domain: cluster.local
-      enable-sidecar: false
+      enable-sidecar: true
     annotations:
       gui-x: 0
       gui-y: 50
-  nbi-k8s:
-    charm: "%(prefix)s/nbi-k8s%(suffix)s"
+  nbi:
+    charm: "cs:~charmed-osm/nbi-5"
     scale: 1
     series: kubernetes
-    storage:
-      packages: 50M
     options:
-      log_level: "INFO"
-      DATABASE_COMMONKEY: osm
-      auth-backend: keystone
+      database_commonkey: osm
+      auth_backend: keystone
+      log_level: DEBUG
     annotations:
       gui-x: 0
       gui-y: -200
-  ro-k8s:
-    charm: "%(prefix)s/ro-k8s%(suffix)s"
+  ro:
+    charm: "cs:~charmed-osm/ro-0"
     scale: 1
     series: kubernetes
-    storage:
-      log: 50M
     options:
-      vim_database: "mano_vim_db"
-      ro_database: "mano_db"
-      OPENMANO_TENANT: "osm"
+      log_level: DEBUG
     annotations:
       gui-x: -250
       gui-y: 300
   ng-ui:
-    charm: "%(prefix)s/ng-ui%(suffix)s"
+    charm: "cs:~charmed-osm/ng-ui-16"
     scale: 1
     series: kubernetes
-    options:
-      port: 80
-      https_port: 443
-      server_name: localhost
-      client_max_body_size: 15M
     annotations:
       gui-x: 500
       gui-y: 100
-  lcm-k8s:
-    charm: "%(prefix)s/lcm-k8s%(suffix)s"
+  lcm:
+    charm: "cs:~charmed-osm/lcm-0"
     scale: 1
     series: kubernetes
-    storage:
-      packages: 50M
     options:
-      vca_host: vca
-      vca_port: 17070
-      vca_user: admin
-      vca_password: secret
-      vca_pubkey: pubkey
-      vca_cacert: cacert
-      use_external_vca: false
-      DATABASE_COMMONKEY: osm
+      database_commonkey: osm
+      log_level: DEBUG
     annotations:
       gui-x: -250
       gui-y: 50
-  mon-k8s:
-    charm: "%(prefix)s/mon-k8s%(suffix)s"
+  mon:
+    charm: "cs:~charmed-osm/mon-0"
     scale: 1
     series: kubernetes
-    storage:
-      database: 100M
     options:
-      OSMMON_OPENSTACK_DEFAULT_GRANULARITY: 300
-      OSMMON_GLOBAL_REQUEST_TIMEOUT: 10
-      OSMMON_GLOBAL_LOGLEVEL: INFO
-      OSMMON_DATABASE_COMMONKEY: osm
-      OSMMON_COLLECTOR_INTERVAL: 30
-      OSMMON_EVALUATOR_INTERVAL: 30
-      vca_host: vca
-      vca_user: admin
-      vca_password: secret
-      vca_cacert: cacert
-      use_external_vca: false
+      database_commonkey: osm
+      log_level: DEBUG
     annotations:
       gui-x: 250
       gui-y: 50
-  pol-k8s:
-    charm: "%(prefix)s/pol-k8s%(suffix)s"
+  pol:
+    charm: "cs:~charmed-osm/pol-0"
     scale: 1
     series: kubernetes
-    storage:
-      database: 100M
     options:
-      log_level: INFO
+      log_level: DEBUG
     annotations:
       gui-x: -250
       gui-y: 550
   pla:
-    charm: "%(prefix)s/pla%(suffix)s"
+    charm: "cs:~charmed-osm/pla-6"
     scale: 1
     series: kubernetes
+    options:
+      log_level: DEBUG
     annotations:
       gui-x: 500
       gui-y: -200
-  prometheus-k8s:
-    charm: "cs:~charmed-osm/prometheus-k8s"
-    channel: "stable"
+  prometheus:
+    charm: "cs:~charmed-osm/prometheus-0"
     scale: 1
     series: kubernetes
     storage:
-      database: 50M
+      data: 50M
     options:
-      advertised-port: 9090
-      web-subpath: /
-      default-target: "mon-k8s:8000"
+      default-target: "mon:8000"
     annotations:
       gui-x: 250
       gui-y: 300
-  grafana-k8s:
-    charm: "cs:~charmed-osm/grafana-k8s"
-    channel: "stable"
+  grafana:
+    charm: "cs:~charmed-osm/grafana-0"
     scale: 1
     series: kubernetes
     annotations:
       gui-x: 250
       gui-y: 550
   keystone:
-    charm: '%(prefix)s/keystone%(suffix)s'
+    charm: "cs:~charmed-osm/keystone-4"
     scale: 1
     series: kubernetes
     annotations:
       gui-x: -250
       gui-y: 550
-
 relations:
-  - - "kafka-k8s:zookeeper"
-    - "zookeeper-k8s:zookeeper"
-  - - "ro-k8s:mysql"
-    - "mariadb-k8s:mysql"
-  - - "nbi-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "nbi-k8s:kafka"
-    - "kafka-k8s:kafka"
-  - - "lcm-k8s:kafka"
-    - "kafka-k8s:kafka"
-  - - "lcm-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "mon-k8s:kafka"
-    - "kafka-k8s:kafka"
-  - - "mon-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "pol-k8s:kafka"
-    - "kafka-k8s:kafka"
-  - - "pol-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "lcm-k8s:ro"
-    - "ro-k8s:ro"
-  - - "prometheus-k8s:prometheus"
-    - "mon-k8s:prometheus"
-  - - "grafana-k8s:prometheus"
-    - "prometheus-k8s:prometheus"
-  - - "prometheus-k8s:prometheus"
-    - "nbi-k8s:prometheus"
-  - - "pla:kafka"
-    - "kafka-k8s:kafka"
-  - - "pla:mongo"
-    - "mongodb-k8s:mongo"
-  - - 'ng-ui:nbi'
-    - 'nbi-k8s:nbi'
-  - - 'keystone:db'
-    - 'mariadb-k8s:mysql'
-  - - 'keystone:keystone'
-    - 'nbi-k8s:keystone'
-  - - "ro-k8s:mongo"
-    - "mongodb-k8s:mongo"
-  - - "ro-k8s:kafka"
-    - "kafka-k8s:kafka"
+  - - grafana:prometheus
+    - prometheus:prometheus
+  - - kafka-k8s:zookeeper
+    - zookeeper-k8s:zookeeper
+  - - keystone:db
+    - mariadb-k8s:mysql
+  - - lcm:kafka
+    - kafka-k8s:kafka
+  - - lcm:mongodb
+    - mongodb-k8s:mongo
+  - - ro:ro
+    - lcm:ro
+  - - ro:kafka
+    - kafka-k8s:kafka
+  - - ro:mongodb
+    - mongodb-k8s:mongo
+  - - pol:kafka
+    - kafka-k8s:kafka
+  - - pol:mongodb
+    - mongodb-k8s:mongo
+  - - mon:mongodb
+    - mongodb-k8s:mongo
+  - - mon:kafka
+    - kafka-k8s:kafka
+  - - pla:kafka
+    - kafka-k8s:kafka
+  - - pla:mongodb
+    - mongodb-k8s:mongo
+  - - nbi:mongodb
+    - mongodb-k8s:mongo
+  - - nbi:kafka
+    - kafka-k8s:kafka
+  - - nbi:prometheus
+    - prometheus:prometheus
+  - - nbi:keystone
+    - keystone:keystone
+  - - mon:prometheus
+    - prometheus:prometheus
+  - - ng-ui:nbi
+    - nbi:nbi
index 95ffeaa..a82e016 100644 (file)
@@ -49,7 +49,7 @@ channel = args.channel if args.channel else "stable"
 path = HA_BUNDLE if args.ha else DEFAULT_BUNDLE
 destination = args.destination
 prefix = "." if args.local else BUNDLE_PREFIX
-suffix = "/release" if args.local else ""
+suffix = "/build" if args.local else ""
 
 data = {
     "channel": channel,
index 01ec352..2885df2 100644 (file)
@@ -22,7 +22,9 @@
 venv
 .vscode
 build
-grafana.charm
-.coverage*
+*.charm
+.coverage
+coverage.xml
 .stestr
 cover
+release
\ No newline at end of file
diff --git a/installers/charm/grafana/.jujuignore b/installers/charm/grafana/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index f3ecd3a..d71fb69 100644 (file)
@@ -28,6 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
+  cover/
   build/
-  mod/
-  lib/
+  venv
+  release/
diff --git a/installers/charm/grafana/requirements-test.txt b/installers/charm/grafana/requirements-test.txt
new file mode 100644 (file)
index 0000000..d7585f3
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index 884cf9f..f10a199 100644 (file)
@@ -19,5 +19,4 @@
 # osm-charmers@lists.launchpad.net
 ##
 
-ops
-git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index 1920e76..2d982d1 100755 (executable)
 # osm-charmers@lists.launchpad.net
 ##
 
+# pylint: disable=E0213
+
 import logging
-from typing import Dict, List, NoReturn
+from typing import Optional, NoReturn
+from ipaddress import ip_network
 
-from ops.charm import CharmBase
-from ops.framework import StoredState
 from ops.main import main
-from ops.model import ActiveStatus, Application, BlockedStatus, MaintenanceStatus, Unit
-from oci_image import OCIImageResource, OCIImageResourceError
 
-from pod_spec import make_pod_spec
+from opslib.osm.charm import CharmedOsmBase, RelationsMissing
+
+from opslib.osm.pod import (
+    IngressResourceV3Builder,
+    FilesV3Builder,
+    ContainerV3Builder,
+    PodSpecV3Builder,
+)
+
+
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
+
+from opslib.osm.interfaces.prometheus import PrometheusClient
+
+from urllib.parse import urlparse
+from string import Template
+from pathlib import Path
 
 logger = logging.getLogger(__name__)
 
-GRAFANA_PORT = 3000
+PORT = 3000
 
 
-class RelationsMissing(Exception):
-    def __init__(self, missing_relations: List):
-        self.message = ""
-        if missing_relations and isinstance(missing_relations, list):
-            self.message += f'Waiting for {", ".join(missing_relations)} relation'
-            if "," in self.message:
-                self.message += "s"
+class ConfigModel(ModelValidator):
+    max_file_size: int
+    osm_dashboards: bool
+    site_url: Optional[str]
+    ingress_whitelist_source_range: Optional[str]
+    tls_secret_name: Optional[str]
 
+    @validator("max_file_size")
+    def validate_max_file_size(cls, v):
+        if v < 0:
+            raise ValueError("value must be equal or greater than 0")
+        return v
 
-class RelationDefinition:
-    def __init__(self, relation_name: str, keys: List, source_type):
-        if source_type != Application and source_type != Unit:
-            raise TypeError(
-                "source_type should be ops.model.Application or ops.model.Unit"
-            )
-        self.relation_name = relation_name
-        self.keys = keys
-        self.source_type = source_type
-
-
-def check_missing_relation_data(
-    data: Dict,
-    expected_relations_data: List[RelationDefinition],
-):
-    missing_relations = []
-    for relation_data in expected_relations_data:
-        if not all(
-            f"{relation_data.relation_name}_{k}" in data for k in relation_data.keys
-        ):
-            missing_relations.append(relation_data.relation_name)
-    if missing_relations:
-        raise RelationsMissing(missing_relations)
-
-
-def get_relation_data(
-    charm: CharmBase,
-    relation_data: RelationDefinition,
-) -> Dict:
-    data = {}
-    relation = charm.model.get_relation(relation_data.relation_name)
-    if relation:
-        self_app_unit = (
-            charm.app if relation_data.source_type == Application else charm.unit
-        )
-        expected_type = relation_data.source_type
-        for app_unit in relation.data:
-            if app_unit != self_app_unit and isinstance(app_unit, expected_type):
-                if all(k in relation.data[app_unit] for k in relation_data.keys):
-                    for k in relation_data.keys:
-                        data[f"{relation_data.relation_name}_{k}"] = relation.data[
-                            app_unit
-                        ].get(k)
-                    break
-    return data
+    @validator("site_url")
+    def validate_site_url(cls, v):
+        if v:
+            parsed = urlparse(v)
+            if not parsed.scheme.startswith("http"):
+                raise ValueError("value must start with http")
+        return v
 
+    @validator("ingress_whitelist_source_range")
+    def validate_ingress_whitelist_source_range(cls, v):
+        if v:
+            ip_network(v)
+        return v
 
-class GrafanaCharm(CharmBase):
-    """Grafana Charm."""
 
-    state = StoredState()
+class GrafanaCharm(CharmedOsmBase):
+    """GrafanaCharm Charm."""
 
     def __init__(self, *args) -> NoReturn:
-        """Grafana Charm constructor."""
-        super().__init__(*args)
-
-        # Internal state initialization
-        self.state.set_default(pod_spec=None)
-
-        self.port = GRAFANA_PORT
-        self.image = OCIImageResource(self, "image")
-
-        # Registering regular events
-        self.framework.observe(self.on.start, self.configure_pod)
-        self.framework.observe(self.on.config_changed, self.configure_pod)
-
-        # Registering required relation events
-        self.framework.observe(self.on.prometheus_relation_changed, self.configure_pod)
-
-        # Registering required relation broken events
-        self.framework.observe(self.on.prometheus_relation_broken, self.configure_pod)
-
-    @property
-    def relations_requirements(self):
-        return [RelationDefinition("prometheus", ["host", "port"], Unit)]
-
-    def get_relation_state(self):
-        relation_state = {}
-        for relation_requirements in self.relations_requirements:
-            data = get_relation_data(self, relation_requirements)
-            relation_state = {**relation_state, **data}
-        check_missing_relation_data(relation_state, self.relations_requirements)
-        return relation_state
-
-    def configure_pod(self, _=None) -> NoReturn:
-        """Assemble the pod spec and apply it, if possible.
-
-        Args:
-            event (EventBase): Hook or Relation event that started the
-                               function.
-        """
-        if not self.unit.is_leader():
-            self.unit.status = ActiveStatus("ready")
-            return
-
-        relation_state = None
-        try:
-            relation_state = self.get_relation_state()
-        except RelationsMissing as exc:
-            logger.exception("Relation missing error")
-            self.unit.status = BlockedStatus(exc.message)
-            return
-
-        self.unit.status = MaintenanceStatus("Assembling pod spec")
-
-        # Fetch image information
-        try:
-            self.unit.status = MaintenanceStatus("Fetching image information")
-            image_info = self.image.fetch()
-        except OCIImageResourceError:
-            self.unit.status = BlockedStatus("Error fetching image information")
-            return
-
-        try:
-            pod_spec = make_pod_spec(
-                image_info,
-                self.model.config,
-                relation_state,
-                self.model.app.name,
-                self.port,
+        """Prometheus Charm constructor."""
+        super().__init__(*args, oci_image="image")
+
+        self.prometheus_client = PrometheusClient(self, "prometheus")
+        self.framework.observe(self.on["prometheus"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["prometheus"].relation_broken, self.configure_pod)
+
+    def _build_dashboard_files(self, config: ConfigModel):
+        files_builder = FilesV3Builder()
+        files_builder.add_file(
+            "dashboard_osm.yaml",
+            Path("files/default_dashboards.yaml").read_text(),
+        )
+        if config.osm_dashboards:
+            osm_dashboards_mapping = {
+                "kafka_exporter_dashboard.yaml": "files/kafka_exporter_dashboard.yaml",
+                "mongodb_exporter_dashboard.yaml": "files/mongodb_exporter_dashboard.yaml",
+                "mysql_exporter_dashboard.yaml": "files/mysql_exporter_dashboard.yaml",
+                "nodes_exporter_dashboard.yaml": "files/nodes_exporter_dashboard.yaml",
+                "summary_dashboard.yaml": "files/summary_dashboard.yaml",
+            }
+            for file_name, path in osm_dashboards_mapping.items():
+                files_builder.add_file(file_name, Path(path).read_text())
+        return files_builder.build()
+
+    def _build_datasources_files(self):
+        files_builder = FilesV3Builder()
+        files_builder.add_file(
+            "datasource_prometheus.yaml",
+            Template(Path("files/default_datasources.yaml").read_text()).substitute(
+                prometheus_host=self.prometheus_client.hostname,
+                prometheus_port=self.prometheus_client.port,
+            ),
+        )
+        return files_builder.build()
+
+    def _check_missing_dependencies(self):
+        missing_relations = []
+
+        if self.prometheus_client.is_missing_data_in_app():
+            missing_relations.append("prometheus")
+
+        if missing_relations:
+            raise RelationsMissing(missing_relations)
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        # Check relations
+        self._check_missing_dependencies()
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=PORT)
+        container_builder.add_http_readiness_probe(
+            "/api/health",
+            PORT,
+            initial_delay_seconds=10,
+            period_seconds=10,
+            timeout_seconds=5,
+            failure_threshold=3,
+        )
+        container_builder.add_http_liveness_probe(
+            "/api/health",
+            PORT,
+            initial_delay_seconds=60,
+            timeout_seconds=30,
+            failure_threshold=10,
+        )
+        container_builder.add_volume_config(
+            "dashboards",
+            "/etc/grafana/provisioning/dashboards/",
+            self._build_dashboard_files(config),
+        )
+        container_builder.add_volume_config(
+            "datasources",
+            "/etc/grafana/provisioning/datasources/",
+            self._build_datasources_files(),
+        )
+        container = container_builder.build()
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        # Add ingress resources to pod spec if site url exists
+        if config.site_url:
+            parsed = urlparse(config.site_url)
+            annotations = {
+                "nginx.ingress.kubernetes.io/proxy-body-size": "{}".format(
+                    str(config.max_file_size) + "m"
+                    if config.max_file_size > 0
+                    else config.max_file_size
+                ),
+            }
+            ingress_resource_builder = IngressResourceV3Builder(
+                f"{self.app.name}-ingress", annotations
             )
-        except ValueError as exc:
-            logger.exception("Config/Relation data validation error")
-            self.unit.status = BlockedStatus(str(exc))
-            return
-
-        if self.state.pod_spec != pod_spec:
-            self.model.pod.set_spec(pod_spec)
-            self.state.pod_spec = pod_spec
 
-        self.unit.status = ActiveStatus("ready")
+            if config.ingress_whitelist_source_range:
+                annotations[
+                    "nginx.ingress.kubernetes.io/whitelist-source-range"
+                ] = config.ingress_whitelist_source_range
+
+            if parsed.scheme == "https":
+                ingress_resource_builder.add_tls(
+                    [parsed.hostname], config.tls_secret_name
+                )
+            else:
+                annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
+
+            ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
+            ingress_resource = ingress_resource_builder.build()
+            pod_spec_builder.add_ingress_resource(ingress_resource)
+        return pod_spec_builder.build()
 
 
 if __name__ == "__main__":
index 8d525f3..1701df0 100644 (file)
@@ -97,10 +97,12 @@ def _validate_data(config_data: Dict[str, Any], relation_data: Dict[str, Any]) -
         else True,
     }
     relation_validators = {
-        "prometheus_host": lambda value, _: isinstance(value, str) and len(value) > 0,
-        "prometheus_port": lambda value, _: isinstance(value, str)
-        and len(value) > 0
-        and int(value) > 0,
+        "prometheus_hostname": lambda value, _: (
+            isinstance(value, str) and len(value) > 0
+        ),
+        "prometheus_port": lambda value, _: (
+            isinstance(value, str) and len(value) > 0 and int(value) > 0
+        ),
     }
     problems = []
 
@@ -117,6 +119,7 @@ def _validate_data(config_data: Dict[str, Any], relation_data: Dict[str, Any]) -
             problems.append(key)
 
     if len(problems) > 0:
+        logger.debug(relation_data)
         raise ValueError("Errors found in: {}".format(", ".join(problems)))
 
     return True
@@ -246,33 +249,25 @@ def _make_pod_files(
             [
                 {
                     "path": "kafka_exporter_dashboard.yaml",
-                    "content": Template(
-                        Path("files/kafka_exporter_dashboard.yaml").read_text()
-                    ),
+                    "content": Path("files/kafka_exporter_dashboard.yaml").read_text(),
                 },
                 {
                     "path": "mongodb_exporter_dashboard.yaml",
-                    "content": Template(
-                        Path("files/mongodb_exporter_dashboard.yaml").read_text()
-                    ),
+                    "content": Path(
+                        "files/mongodb_exporter_dashboard.yaml"
+                    ).read_text(),
                 },
                 {
                     "path": "mysql_exporter_dashboard.yaml",
-                    "content": Template(
-                        Path("files/mysql_exporter_dashboard.yaml").read_text()
-                    ),
+                    "content": Path("files/mysql_exporter_dashboard.yaml").read_text(),
                 },
                 {
                     "path": "nodes_exporter_dashboard.yaml",
-                    "content": Template(
-                        Path("files/nodes_exporter_dashboard.yaml").read_text()
-                    ),
+                    "content": Path("files/nodes_exporter_dashboard.yaml").read_text(),
                 },
                 {
                     "path": "summary_dashboard.yaml",
-                    "content": Template(
-                        Path("files/summary_dashboard.yaml").read_text()
-                    ),
+                    "content": Path("files/summary_dashboard.yaml").read_text(),
                 },
             ]
         )
@@ -280,7 +275,7 @@ def _make_pod_files(
     dashboards.append(
         {
             "path": "dashboard_osm.yaml",
-            "content": Template(Path("files/default_dashboards.yaml").read_text()),
+            "content": Path("files/default_dashboards.yaml").read_text(),
         }
     )
 
@@ -376,7 +371,7 @@ def make_pod_spec(
 
     ports = _make_pod_ports(port)
     env_config = _make_pod_envconfig(config, relation_state)
-    files = _make_pod_files(relation_state)
+    files = _make_pod_files(config, relation_state)
     readiness_probe = _make_readiness_probe(port)
     liveness_probe = _make_liveness_probe(port)
     ingress_resources = _make_pod_ingress_resources(config, app_name, port)
index 5bbd15b..07beb4a 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
+# Copyright 2020 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-
-from ops.model import BlockedStatus
+from ops.model import ActiveStatus, BlockedStatus
 from ops.testing import Harness
 
 from charm import GrafanaCharm
 
 
 class TestCharm(unittest.TestCase):
-    """Grafana Charm unit tests."""
+    """Prometheus Charm unit tests."""
 
     def setUp(self) -> NoReturn:
         """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
         self.harness = Harness(GrafanaCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.config = {
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "",
+            "tls_secret_name": "",
+            "site_url": "https://grafana.192.168.100.100.xip.io",
+            "osm_dashboards": True,
+        }
+        self.harness.update_config(self.config)
 
-    def test_on_start_without_relations(self) -> NoReturn:
-        """Test installation without any relation."""
-        self.harness.charm.on.start.emit()
+    def test_config_changed(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
 
-        # Verifying status
+        self.harness.charm.on.config_changed.emit()
+
+        # Assertions
         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+        self.assertTrue("prometheus" in self.harness.charm.unit.status.message)
 
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-
-    def test_on_start_with_relations_without_http(self) -> NoReturn:
-        """Test deployment."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "grafana",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "grafana",
-                            "containerPort": 3000,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "dashboards",
-                            "mountPath": "/etc/grafana/provisioning/dashboards/",
-                            "files": [
-                                {
-                                    "path": "dashboard-osm.yml",
-                                    "content": (
-                                        "apiVersion: 1\n"
-                                        "providers:\n"
-                                        "  - name: 'osm'\n"
-                                        "    orgId: 1\n"
-                                        "    folder: ''\n"
-                                        "    type: file\n"
-                                        "    options:\n"
-                                        "      path: /etc/grafana/provisioning/dashboards/\n"
-                                    ),
-                                },
-                            ],
-                        },
-                        {
-                            "name": "datasources",
-                            "mountPath": "/etc/grafana/provisioning/datasources/",
-                            "files": [
-                                {
-                                    "path": "datasource-prometheus.yml",
-                                    "content": (
-                                        "datasources:\n"
-                                        "  - access: proxy\n"
-                                        "    editable: true\n"
-                                        "    is_default: true\n"
-                                        "    name: osm_prometheus\n"
-                                        "    orgId: 1\n"
-                                        "    type: prometheus\n"
-                                        "    version: 1\n"
-                                        "    url: http://prometheus:9090\n"
-                                    ),
-                                },
-                            ],
-                        },
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": 3000,
-                            },
-                            "initialDelaySeconds": 10,
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": 3000,
-                            },
-                            "initialDelaySeconds": 60,
-                            "timeoutSeconds": 30,
-                            "failureThreshold": 10,
-                        },
-                    },
-                },
-            ],
-            "kubernetesResources": {"ingressResources": []},
-        }
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
+        self.harness.charm.on.config_changed.emit()
 
-        self.harness.charm.on.start.emit()
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
 
-        # Initializing the prometheus relation
+    def test_with_prometheus(
+        self,
+    ) -> NoReturn:
+        """Test to see if prometheus relation is updated."""
         relation_id = self.harness.add_relation("prometheus", "prometheus")
         self.harness.add_relation_unit(relation_id, "prometheus/0")
         self.harness.update_relation_data(
             relation_id,
-            "prometheus/0",
-            {
-                "host": "prometheus",
-                "port": "9090",
-            },
+            "prometheus",
+            {"hostname": "prometheus", "port": 9090},
         )
 
         # Verifying status
         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
 
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_ingress_resources_with_http(self) -> NoReturn:
-        """Test ingress resources with HTTP."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "grafana",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "grafana",
-                            "containerPort": 3000,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "dashboards",
-                            "mountPath": "/etc/grafana/provisioning/dashboards/",
-                            "files": [
-                                {
-                                    "path": "dashboard-osm.yml",
-                                    "content": (
-                                        "apiVersion: 1\n"
-                                        "providers:\n"
-                                        "  - name: 'osm'\n"
-                                        "    orgId: 1\n"
-                                        "    folder: ''\n"
-                                        "    type: file\n"
-                                        "    options:\n"
-                                        "      path: /etc/grafana/provisioning/dashboards/\n"
-                                    ),
-                                },
-                            ],
-                        },
-                        {
-                            "name": "datasources",
-                            "mountPath": "/etc/grafana/provisioning/datasources/",
-                            "files": [
-                                {
-                                    "path": "datasource-prometheus.yml",
-                                    "content": (
-                                        "datasources:\n"
-                                        "  - access: proxy\n"
-                                        "    editable: true\n"
-                                        "    is_default: true\n"
-                                        "    name: osm_prometheus\n"
-                                        "    orgId: 1\n"
-                                        "    type: prometheus\n"
-                                        "    version: 1\n"
-                                        "    url: http://prometheus:9090\n"
-                                    ),
-                                },
-                            ],
-                        },
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": 3000,
-                            },
-                            "initialDelaySeconds": 10,
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": 3000,
-                            },
-                            "initialDelaySeconds": 60,
-                            "timeoutSeconds": 30,
-                            "failureThreshold": 10,
-                        },
-                    },
-                },
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "grafana-ingress",
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-                            "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": "grafana",
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": "grafana",
-                                                    "servicePort": 3000,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ]
-                        },
-                    }
-                ],
-            },
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Initializing the prometheus relation
-        relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            relation_id,
-            "prometheus/0",
-            {
-                "host": "prometheus",
-                "port": "9090",
-            },
-        )
-
-        self.harness.update_config({"site_url": "http://grafana"})
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_ingress_resources_with_https(self) -> NoReturn:
-        """Test ingress resources with HTTPS."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "grafana",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "grafana",
-                            "containerPort": 3000,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "dashboards",
-                            "mountPath": "/etc/grafana/provisioning/dashboards/",
-                            "files": [
-                                {
-                                    "path": "dashboard-osm.yml",
-                                    "content": (
-                                        "apiVersion: 1\n"
-                                        "providers:\n"
-                                        "  - name: 'osm'\n"
-                                        "    orgId: 1\n"
-                                        "    folder: ''\n"
-                                        "    type: file\n"
-                                        "    options:\n"
-                                        "      path: /etc/grafana/provisioning/dashboards/\n"
-                                    ),
-                                },
-                            ],
-                        },
-                        {
-                            "name": "datasources",
-                            "mountPath": "/etc/grafana/provisioning/datasources/",
-                            "files": [
-                                {
-                                    "path": "datasource-prometheus.yml",
-                                    "content": (
-                                        "datasources:\n"
-                                        "  - access: proxy\n"
-                                        "    editable: true\n"
-                                        "    is_default: true\n"
-                                        "    name: osm_prometheus\n"
-                                        "    orgId: 1\n"
-                                        "    type: prometheus\n"
-                                        "    version: 1\n"
-                                        "    url: http://prometheus:9090\n"
-                                    ),
-                                },
-                            ],
-                        },
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": 3000,
-                            },
-                            "initialDelaySeconds": 10,
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": 3000,
-                            },
-                            "initialDelaySeconds": 60,
-                            "timeoutSeconds": 30,
-                            "failureThreshold": 10,
-                        },
-                    },
-                },
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "grafana-ingress",
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": "grafana",
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": "grafana",
-                                                    "servicePort": 3000,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ],
-                            "tls": [{"hosts": ["grafana"], "secretName": "grafana"}],
-                        },
-                    }
-                ],
-            },
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Initializing the prometheus relation
-        relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            relation_id,
-            "prometheus/0",
-            {
-                "host": "prometheus",
-                "port": "9090",
-            },
-        )
-
-        self.harness.update_config(
-            {"site_url": "https://grafana", "tls_secret_name": "grafana"}
-        )
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
-        """Test ingress resources with HTTPS and ingress whitelist."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "grafana",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "grafana",
-                            "containerPort": 3000,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "dashboards",
-                            "mountPath": "/etc/grafana/provisioning/dashboards/",
-                            "files": [
-                                {
-                                    "path": "dashboard-osm.yml",
-                                    "content": (
-                                        "apiVersion: 1\n"
-                                        "providers:\n"
-                                        "  - name: 'osm'\n"
-                                        "    orgId: 1\n"
-                                        "    folder: ''\n"
-                                        "    type: file\n"
-                                        "    options:\n"
-                                        "      path: /etc/grafana/provisioning/dashboards/\n"
-                                    ),
-                                },
-                            ],
-                        },
-                        {
-                            "name": "datasources",
-                            "mountPath": "/etc/grafana/provisioning/datasources/",
-                            "files": [
-                                {
-                                    "path": "datasource-prometheus.yml",
-                                    "content": (
-                                        "datasources:\n"
-                                        "  - access: proxy\n"
-                                        "    editable: true\n"
-                                        "    is_default: true\n"
-                                        "    name: osm_prometheus\n"
-                                        "    orgId: 1\n"
-                                        "    type: prometheus\n"
-                                        "    version: 1\n"
-                                        "    url: http://prometheus:9090\n"
-                                    ),
-                                },
-                            ],
-                        },
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": 3000,
-                            },
-                            "initialDelaySeconds": 10,
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": 3000,
-                            },
-                            "initialDelaySeconds": 60,
-                            "timeoutSeconds": 30,
-                            "failureThreshold": 10,
-                        },
-                    },
-                },
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "grafana-ingress",
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-                            "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": "grafana",
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": "grafana",
-                                                    "servicePort": 3000,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ],
-                            "tls": [{"hosts": ["grafana"], "secretName": "grafana"}],
-                        },
-                    }
-                ],
-            },
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Initializing the prometheus relation
-        relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            relation_id,
-            "prometheus/0",
-            {
-                "host": "prometheus",
-                "port": "9090",
-            },
-        )
-
-        self.harness.update_config(
-            {
-                "site_url": "https://grafana",
-                "tls_secret_name": "grafana",
-                "ingress_whitelist_source_range": "0.0.0.0/0",
-            }
-        )
-
-        pod_spec, _ = self.harness.get_pod_spec()
 
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_on_prometheus_unit_relation_changed(self) -> NoReturn:
-        """Test to see if prometheus relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            relation_id,
-            "prometheus/0",
-            {
-                "host": "prometheus",
-                "port": "9090",
-            },
-        )
+if __name__ == "__main__":
+    unittest.main()
 
-        # Verifying status
-        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+# class TestCharm(unittest.TestCase):
+#     """Grafana Charm unit tests."""
+
+#     def setUp(self) -> NoReturn:
+#         """Test setup"""
+#         self.harness = Harness(GrafanaCharm)
+#         self.harness.set_leader(is_leader=True)
+#         self.harness.begin()
+
+#     def test_on_start_without_relations(self) -> NoReturn:
+#         """Test installation without any relation."""
+#         self.harness.charm.on.start.emit()
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("prometheus", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
+
+#     def test_on_start_with_relations_without_http(self) -> NoReturn:
+#         """Test deployment."""
+#         expected_result = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": "grafana",
+#                     "imageDetails": self.harness.charm.image.fetch(),
+#                     "imagePullPolicy": "Always",
+#                     "ports": [
+#                         {
+#                             "name": "grafana",
+#                             "containerPort": 3000,
+#                             "protocol": "TCP",
+#                         }
+#                     ],
+#                     "envConfig": {},
+#                     "volumeConfig": [
+#                         {
+#                             "name": "dashboards",
+#                             "mountPath": "/etc/grafana/provisioning/dashboards/",
+#                             "files": [
+#                                 {
+#                                     "path": "dashboard-osm.yml",
+#                                     "content": (
+#                                         "apiVersion: 1\n"
+#                                         "providers:\n"
+#                                         "  - name: 'osm'\n"
+#                                         "    orgId: 1\n"
+#                                         "    folder: ''\n"
+#                                         "    type: file\n"
+#                                         "    options:\n"
+#                                         "      path: /etc/grafana/provisioning/dashboards/\n"
+#                                     ),
+#                                 },
+#                             ],
+#                         },
+#                         {
+#                             "name": "datasources",
+#                             "mountPath": "/etc/grafana/provisioning/datasources/",
+#                             "files": [
+#                                 {
+#                                     "path": "datasource-prometheus.yml",
+#                                     "content": (
+#                                         "datasources:\n"
+#                                         "  - access: proxy\n"
+#                                         "    editable: true\n"
+#                                         "    is_default: true\n"
+#                                         "    name: osm_prometheus\n"
+#                                         "    orgId: 1\n"
+#                                         "    type: prometheus\n"
+#                                         "    version: 1\n"
+#                                         "    url: http://prometheus:9090\n"
+#                                     ),
+#                                 },
+#                             ],
+#                         },
+#                     ],
+#                     "kubernetes": {
+#                         "readinessProbe": {
+#                             "httpGet": {
+#                                 "path": "/api/health",
+#                                 "port": 3000,
+#                             },
+#                             "initialDelaySeconds": 10,
+#                             "periodSeconds": 10,
+#                             "timeoutSeconds": 5,
+#                             "successThreshold": 1,
+#                             "failureThreshold": 3,
+#                         },
+#                         "livenessProbe": {
+#                             "httpGet": {
+#                                 "path": "/api/health",
+#                                 "port": 3000,
+#                             },
+#                             "initialDelaySeconds": 60,
+#                             "timeoutSeconds": 30,
+#                             "failureThreshold": 10,
+#                         },
+#                     },
+#                 },
+#             ],
+#             "kubernetesResources": {"ingressResources": []},
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         # Initializing the prometheus relation
+#         relation_id = self.harness.add_relation("prometheus", "prometheus")
+#         self.harness.add_relation_unit(relation_id, "prometheus/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "prometheus",
+#             {
+#                 "hostname": "prometheus",
+#                 "port": "9090",
+#             },
+#         )
+
+#         # Verifying status
+#         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         pod_spec, _ = self.harness.get_pod_spec()
+
+#         self.assertDictEqual(expected_result, pod_spec)
+
+#     def test_ingress_resources_with_http(self) -> NoReturn:
+#         """Test ingress resources with HTTP."""
+#         expected_result = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": "grafana",
+#                     "imageDetails": self.harness.charm.image.fetch(),
+#                     "imagePullPolicy": "Always",
+#                     "ports": [
+#                         {
+#                             "name": "grafana",
+#                             "containerPort": 3000,
+#                             "protocol": "TCP",
+#                         }
+#                     ],
+#                     "envConfig": {},
+#                     "volumeConfig": [
+#                         {
+#                             "name": "dashboards",
+#                             "mountPath": "/etc/grafana/provisioning/dashboards/",
+#                             "files": [
+#                                 {
+#                                     "path": "dashboard-osm.yml",
+#                                     "content": (
+#                                         "apiVersion: 1\n"
+#                                         "providers:\n"
+#                                         "  - name: 'osm'\n"
+#                                         "    orgId: 1\n"
+#                                         "    folder: ''\n"
+#                                         "    type: file\n"
+#                                         "    options:\n"
+#                                         "      path: /etc/grafana/provisioning/dashboards/\n"
+#                                     ),
+#                                 },
+#                             ],
+#                         },
+#                         {
+#                             "name": "datasources",
+#                             "mountPath": "/etc/grafana/provisioning/datasources/",
+#                             "files": [
+#                                 {
+#                                     "path": "datasource-prometheus.yml",
+#                                     "content": (
+#                                         "datasources:\n"
+#                                         "  - access: proxy\n"
+#                                         "    editable: true\n"
+#                                         "    is_default: true\n"
+#                                         "    name: osm_prometheus\n"
+#                                         "    orgId: 1\n"
+#                                         "    type: prometheus\n"
+#                                         "    version: 1\n"
+#                                         "    url: http://prometheus:9090\n"
+#                                     ),
+#                                 },
+#                             ],
+#                         },
+#                     ],
+#                     "kubernetes": {
+#                         "readinessProbe": {
+#                             "httpGet": {
+#                                 "path": "/api/health",
+#                                 "port": 3000,
+#                             },
+#                             "initialDelaySeconds": 10,
+#                             "periodSeconds": 10,
+#                             "timeoutSeconds": 5,
+#                             "successThreshold": 1,
+#                             "failureThreshold": 3,
+#                         },
+#                         "livenessProbe": {
+#                             "httpGet": {
+#                                 "path": "/api/health",
+#                                 "port": 3000,
+#                             },
+#                             "initialDelaySeconds": 60,
+#                             "timeoutSeconds": 30,
+#                             "failureThreshold": 10,
+#                         },
+#                     },
+#                 },
+#             ],
+#             "kubernetesResources": {
+#                 "ingressResources": [
+#                     {
+#                         "name": "grafana-ingress",
+#                         "annotations": {
+#                             "nginx.ingress.kubernetes.io/proxy-body-size": "0",
+#                             "nginx.ingress.kubernetes.io/ssl-redirect": "false",
+#                         },
+#                         "spec": {
+#                             "rules": [
+#                                 {
+#                                     "host": "grafana",
+#                                     "http": {
+#                                         "paths": [
+#                                             {
+#                                                 "path": "/",
+#                                                 "backend": {
+#                                                     "serviceName": "grafana",
+#                                                     "servicePort": 3000,
+#                                                 },
+#                                             }
+#                                         ]
+#                                     },
+#                                 }
+#                             ]
+#                         },
+#                     }
+#                 ],
+#             },
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         # Initializing the prometheus relation
+#         relation_id = self.harness.add_relation("prometheus", "prometheus")
+#         self.harness.add_relation_unit(relation_id, "prometheus/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "prometheus",
+#             {
+#                 "hostname": "prometheus",
+#                 "port": "9090",
+#             },
+#         )
+
+#         self.harness.update_config({"site_url": "http://grafana"})
+
+#         pod_spec, _ = self.harness.get_pod_spec()
+
+#         self.assertDictEqual(expected_result, pod_spec)
+
+#     def test_ingress_resources_with_https(self) -> NoReturn:
+#         """Test ingress resources with HTTPS."""
+#         expected_result = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": "grafana",
+#                     "imageDetails": self.harness.charm.image.fetch(),
+#                     "imagePullPolicy": "Always",
+#                     "ports": [
+#                         {
+#                             "name": "grafana",
+#                             "containerPort": 3000,
+#                             "protocol": "TCP",
+#                         }
+#                     ],
+#                     "envConfig": {},
+#                     "volumeConfig": [
+#                         {
+#                             "name": "dashboards",
+#                             "mountPath": "/etc/grafana/provisioning/dashboards/",
+#                             "files": [
+#                                 {
+#                                     "path": "dashboard-osm.yml",
+#                                     "content": (
+#                                         "apiVersion: 1\n"
+#                                         "providers:\n"
+#                                         "  - name: 'osm'\n"
+#                                         "    orgId: 1\n"
+#                                         "    folder: ''\n"
+#                                         "    type: file\n"
+#                                         "    options:\n"
+#                                         "      path: /etc/grafana/provisioning/dashboards/\n"
+#                                     ),
+#                                 },
+#                             ],
+#                         },
+#                         {
+#                             "name": "datasources",
+#                             "mountPath": "/etc/grafana/provisioning/datasources/",
+#                             "files": [
+#                                 {
+#                                     "path": "datasource-prometheus.yml",
+#                                     "content": (
+#                                         "datasources:\n"
+#                                         "  - access: proxy\n"
+#                                         "    editable: true\n"
+#                                         "    is_default: true\n"
+#                                         "    name: osm_prometheus\n"
+#                                         "    orgId: 1\n"
+#                                         "    type: prometheus\n"
+#                                         "    version: 1\n"
+#                                         "    url: http://prometheus:9090\n"
+#                                     ),
+#                                 },
+#                             ],
+#                         },
+#                     ],
+#                     "kubernetes": {
+#                         "readinessProbe": {
+#                             "httpGet": {
+#                                 "path": "/api/health",
+#                                 "port": 3000,
+#                             },
+#                             "initialDelaySeconds": 10,
+#                             "periodSeconds": 10,
+#                             "timeoutSeconds": 5,
+#                             "successThreshold": 1,
+#                             "failureThreshold": 3,
+#                         },
+#                         "livenessProbe": {
+#                             "httpGet": {
+#                                 "path": "/api/health",
+#                                 "port": 3000,
+#                             },
+#                             "initialDelaySeconds": 60,
+#                             "timeoutSeconds": 30,
+#                             "failureThreshold": 10,
+#                         },
+#                     },
+#                 },
+#             ],
+#             "kubernetesResources": {
+#                 "ingressResources": [
+#                     {
+#                         "name": "grafana-ingress",
+#                         "annotations": {
+#                             "nginx.ingress.kubernetes.io/proxy-body-size": "0",
+#                         },
+#                         "spec": {
+#                             "rules": [
+#                                 {
+#                                     "host": "grafana",
+#                                     "http": {
+#                                         "paths": [
+#                                             {
+#                                                 "path": "/",
+#                                                 "backend": {
+#                                                     "serviceName": "grafana",
+#                                                     "servicePort": 3000,
+#                                                 },
+#                                             }
+#                                         ]
+#                                     },
+#                                 }
+#                             ],
+#                             "tls": [{"hosts": ["grafana"], "secretName": "grafana"}],
+#                         },
+#                     }
+#                 ],
+#             },
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         # Initializing the prometheus relation
+#         relation_id = self.harness.add_relation("prometheus", "prometheus")
+#         self.harness.add_relation_unit(relation_id, "prometheus/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "prometheus",
+#             {
+#                 "hostname": "prometheus",
+#                 "port": "9090",
+#             },
+#         )
+
+#         self.harness.update_config(
+#             {"site_url": "https://grafana", "tls_secret_name": "grafana"}
+#         )
+
+#         pod_spec, _ = self.harness.get_pod_spec()
+
+#         self.assertDictEqual(expected_result, pod_spec)
+
+#     def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
+#         """Test ingress resources with HTTPS and ingress whitelist."""
+#         expected_result = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": "grafana",
+#                     "imageDetails": self.harness.charm.image.fetch(),
+#                     "imagePullPolicy": "Always",
+#                     "ports": [
+#                         {
+#                             "name": "grafana",
+#                             "containerPort": 3000,
+#                             "protocol": "TCP",
+#                         }
+#                     ],
+#                     "envConfig": {},
+#                     "volumeConfig": [
+#                         {
+#                             "name": "dashboards",
+#                             "mountPath": "/etc/grafana/provisioning/dashboards/",
+#                             "files": [
+#                                 {
+#                                     "path": "dashboard-osm.yml",
+#                                     "content": (
+#                                         "apiVersion: 1\n"
+#                                         "providers:\n"
+#                                         "  - name: 'osm'\n"
+#                                         "    orgId: 1\n"
+#                                         "    folder: ''\n"
+#                                         "    type: file\n"
+#                                         "    options:\n"
+#                                         "      path: /etc/grafana/provisioning/dashboards/\n"
+#                                     ),
+#                                 },
+#                             ],
+#                         },
+#                         {
+#                             "name": "datasources",
+#                             "mountPath": "/etc/grafana/provisioning/datasources/",
+#                             "files": [
+#                                 {
+#                                     "path": "datasource-prometheus.yml",
+#                                     "content": (
+#                                         "datasources:\n"
+#                                         "  - access: proxy\n"
+#                                         "    editable: true\n"
+#                                         "    is_default: true\n"
+#                                         "    name: osm_prometheus\n"
+#                                         "    orgId: 1\n"
+#                                         "    type: prometheus\n"
+#                                         "    version: 1\n"
+#                                         "    url: http://prometheus:9090\n"
+#                                     ),
+#                                 },
+#                             ],
+#                         },
+#                     ],
+#                     "kubernetes": {
+#                         "readinessProbe": {
+#                             "httpGet": {
+#                                 "path": "/api/health",
+#                                 "port": 3000,
+#                             },
+#                             "initialDelaySeconds": 10,
+#                             "periodSeconds": 10,
+#                             "timeoutSeconds": 5,
+#                             "successThreshold": 1,
+#                             "failureThreshold": 3,
+#                         },
+#                         "livenessProbe": {
+#                             "httpGet": {
+#                                 "path": "/api/health",
+#                                 "port": 3000,
+#                             },
+#                             "initialDelaySeconds": 60,
+#                             "timeoutSeconds": 30,
+#                             "failureThreshold": 10,
+#                         },
+#                     },
+#                 },
+#             ],
+#             "kubernetesResources": {
+#                 "ingressResources": [
+#                     {
+#                         "name": "grafana-ingress",
+#                         "annotations": {
+#                             "nginx.ingress.kubernetes.io/proxy-body-size": "0",
+#                             "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
+#                         },
+#                         "spec": {
+#                             "rules": [
+#                                 {
+#                                     "host": "grafana",
+#                                     "http": {
+#                                         "paths": [
+#                                             {
+#                                                 "path": "/",
+#                                                 "backend": {
+#                                                     "serviceName": "grafana",
+#                                                     "servicePort": 3000,
+#                                                 },
+#                                             }
+#                                         ]
+#                                     },
+#                                 }
+#                             ],
+#                             "tls": [{"hosts": ["grafana"], "secretName": "grafana"}],
+#                         },
+#                     }
+#                 ],
+#             },
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         # Initializing the prometheus relation
+#         relation_id = self.harness.add_relation("prometheus", "prometheus")
+#         self.harness.add_relation_unit(relation_id, "prometheus/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "prometheus",
+#             {
+#                 "hostname": "prometheus",
+#                 "port": "9090",
+#             },
+#         )
+
+#         self.harness.update_config(
+#             {
+#                 "site_url": "https://grafana",
+#                 "tls_secret_name": "grafana",
+#                 "ingress_whitelist_source_range": "0.0.0.0/0",
+#             }
+#         )
+
+#         pod_spec, _ = self.harness.get_pod_spec()
+
+#         self.assertDictEqual(expected_result, pod_spec)
+
+#     def test_on_prometheus_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if prometheus relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         relation_id = self.harness.add_relation("prometheus", "prometheus")
+#         self.harness.add_relation_unit(relation_id, "prometheus/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "prometheus",
+#             {"hostname": "prometheus", "port": 9090},
+#         )
+
+#         # Verifying status
+#         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
 
 
 if __name__ == "__main__":
index 842769c..88c85d3 100644 (file)
-#!/usr/bin/env python3
-# Copyright 2021 Canonical Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# For those usages not covered by the Apache License, Version 2.0 please
-# contact: legal@canonical.com
-#
-# To get in touch with the maintainers, please contact:
-# osm-charmers@lists.launchpad.net
-##
-
-from typing import NoReturn
-import unittest
-
-import pod_spec
-
-
-class TestPodSpec(unittest.TestCase):
-    """Pod spec unit tests."""
-
-    def test_make_pod_ports(self) -> NoReturn:
-        """Testing make pod ports."""
-        port = 3000
-
-        expected_result = [
-            {
-                "name": "grafana",
-                "containerPort": port,
-                "protocol": "TCP",
-            }
-        ]
-
-        pod_ports = pod_spec._make_pod_ports(port)
-
-        self.assertListEqual(expected_result, pod_ports)
-
-    def test_make_pod_envconfig(self) -> NoReturn:
-        """Teting make pod envconfig."""
-        config = {}
-        relation_state = {
-            "prometheus_host": "prometheus",
-            "prometheus_port": "9090",
-        }
-
-        expected_result = {}
-
-        pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
-
-        self.assertDictEqual(expected_result, pod_envconfig)
-
-    def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn:
-        """Testing make pod ingress resources without site_url."""
-        config = {"site_url": ""}
-        app_name = "grafana"
-        port = 3000
-
-        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-            config, app_name, port
-        )
-
-        self.assertIsNone(pod_ingress_resources)
-
-    def test_make_pod_ingress_resources(self) -> NoReturn:
-        """Testing make pod ingress resources."""
-        config = {
-            "site_url": "http://grafana",
-            "max_file_size": 0,
-            "ingress_whitelist_source_range": "",
-        }
-        app_name = "grafana"
-        port = 3000
-
-        expected_result = [
-            {
-                "name": f"{app_name}-ingress",
-                "annotations": {
-                    "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
-                    "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-                },
-                "spec": {
-                    "rules": [
-                        {
-                            "host": app_name,
-                            "http": {
-                                "paths": [
-                                    {
-                                        "path": "/",
-                                        "backend": {
-                                            "serviceName": app_name,
-                                            "servicePort": port,
-                                        },
-                                    }
-                                ]
-                            },
-                        }
-                    ]
-                },
-            }
-        ]
-
-        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-            config, app_name, port
-        )
-
-        self.assertListEqual(expected_result, pod_ingress_resources)
-
-    def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:
-        """Testing make pod ingress resources with whitelist_source_range."""
-        config = {
-            "site_url": "http://grafana",
-            "max_file_size": 0,
-            "ingress_whitelist_source_range": "0.0.0.0/0",
-        }
-        app_name = "grafana"
-        port = 3000
-
-        expected_result = [
-            {
-                "name": f"{app_name}-ingress",
-                "annotations": {
-                    "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
-                    "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-                    "nginx.ingress.kubernetes.io/whitelist-source-range": config[
-                        "ingress_whitelist_source_range"
-                    ],
-                },
-                "spec": {
-                    "rules": [
-                        {
-                            "host": app_name,
-                            "http": {
-                                "paths": [
-                                    {
-                                        "path": "/",
-                                        "backend": {
-                                            "serviceName": app_name,
-                                            "servicePort": port,
-                                        },
-                                    }
-                                ]
-                            },
-                        }
-                    ]
-                },
-            }
-        ]
-
-        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-            config, app_name, port
-        )
-
-        self.assertListEqual(expected_result, pod_ingress_resources)
-
-    def test_make_pod_ingress_resources_with_https(self) -> NoReturn:
-        """Testing make pod ingress resources with HTTPs."""
-        config = {
-            "site_url": "https://grafana",
-            "max_file_size": 0,
-            "ingress_whitelist_source_range": "",
-            "tls_secret_name": "",
-        }
-        app_name = "grafana"
-        port = 3000
-
-        expected_result = [
-            {
-                "name": f"{app_name}-ingress",
-                "annotations": {
-                    "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
-                },
-                "spec": {
-                    "rules": [
-                        {
-                            "host": app_name,
-                            "http": {
-                                "paths": [
-                                    {
-                                        "path": "/",
-                                        "backend": {
-                                            "serviceName": app_name,
-                                            "servicePort": port,
-                                        },
-                                    }
-                                ]
-                            },
-                        }
-                    ],
-                    "tls": [{"hosts": [app_name]}],
-                },
-            }
-        ]
-
-        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-            config, app_name, port
-        )
-
-        self.assertListEqual(expected_result, pod_ingress_resources)
-
-    def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:
-        """Testing make pod ingress resources with HTTPs and TLS secret name."""
-        config = {
-            "site_url": "https://grafana",
-            "max_file_size": 0,
-            "ingress_whitelist_source_range": "",
-            "tls_secret_name": "secret_name",
-        }
-        app_name = "grafana"
-        port = 3000
-
-        expected_result = [
-            {
-                "name": f"{app_name}-ingress",
-                "annotations": {
-                    "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
-                },
-                "spec": {
-                    "rules": [
-                        {
-                            "host": app_name,
-                            "http": {
-                                "paths": [
-                                    {
-                                        "path": "/",
-                                        "backend": {
-                                            "serviceName": app_name,
-                                            "servicePort": port,
-                                        },
-                                    }
-                                ]
-                            },
-                        }
-                    ],
-                    "tls": [
-                        {"hosts": [app_name], "secretName": config["tls_secret_name"]}
-                    ],
-                },
-            }
-        ]
-
-        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
-            config, app_name, port
-        )
-
-        self.assertListEqual(expected_result, pod_ingress_resources)
-
-    def test_make_pod_files(self) -> NoReturn:
-        """Testing make pod files."""
-        relation_state = {
-            "prometheus_host": "prometheus",
-            "prometheus_port": "9090",
-        }
-
-        expected_result = [
-            {
-                "name": "dashboards",
-                "mountPath": "/etc/grafana/provisioning/dashboards/",
-                "files": [
-                    {
-                        "path": "dashboard-osm.yml",
-                        "content": (
-                            "apiVersion: 1\n"
-                            "providers:\n"
-                            "  - name: 'osm'\n"
-                            "    orgId: 1\n"
-                            "    folder: ''\n"
-                            "    type: file\n"
-                            "    options:\n"
-                            "      path: /etc/grafana/provisioning/dashboards/\n"
-                        ),
-                    }
-                ],
-            },
-            {
-                "name": "datasources",
-                "mountPath": "/etc/grafana/provisioning/datasources/",
-                "files": [
-                    {
-                        "path": "datasource-prometheus.yml",
-                        "content": (
-                            "datasources:\n"
-                            "  - access: proxy\n"
-                            "    editable: true\n"
-                            "    is_default: true\n"
-                            "    name: osm_prometheus\n"
-                            "    orgId: 1\n"
-                            "    type: prometheus\n"
-                            "    version: 1\n"
-                            "    url: http://{}:{}\n".format(
-                                relation_state.get("prometheus_host"),
-                                relation_state.get("prometheus_port"),
-                            )
-                        ),
-                    }
-                ],
-            },
-        ]
-
-        pod_envconfig = pod_spec._make_pod_files(relation_state)
-
-        self.assertListEqual(expected_result, pod_envconfig)
-
-    def test_make_readiness_probe(self) -> NoReturn:
-        """Testing make readiness probe."""
-        port = 3000
-
-        expected_result = {
-            "httpGet": {
-                "path": "/api/health",
-                "port": port,
-            },
-            "initialDelaySeconds": 10,
-            "periodSeconds": 10,
-            "timeoutSeconds": 5,
-            "successThreshold": 1,
-            "failureThreshold": 3,
-        }
-
-        readiness_probe = pod_spec._make_readiness_probe(port)
-
-        self.assertDictEqual(expected_result, readiness_probe)
-
-    def test_make_liveness_probe(self) -> NoReturn:
-        """Testing make liveness probe."""
-        port = 3000
-
-        expected_result = {
-            "httpGet": {
-                "path": "/api/health",
-                "port": port,
-            },
-            "initialDelaySeconds": 60,
-            "timeoutSeconds": 30,
-            "failureThreshold": 10,
-        }
-
-        liveness_probe = pod_spec._make_liveness_probe(port)
-
-        self.assertDictEqual(expected_result, liveness_probe)
-
-    def test_make_pod_spec(self) -> NoReturn:
-        """Testing make pod spec."""
-        image_info = {"upstream-source": "ubuntu/grafana:latest"}
-        config = {
-            "site_url": "",
-        }
-        relation_state = {
-            "prometheus_host": "prometheus",
-            "prometheus_port": "9090",
-        }
-        app_name = "grafana"
-        port = 3000
-
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": app_name,
-                    "imageDetails": image_info,
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": app_name,
-                            "containerPort": port,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "dashboards",
-                            "mountPath": "/etc/grafana/provisioning/dashboards/",
-                            "files": [
-                                {
-                                    "path": "dashboard-osm.yml",
-                                    "content": (
-                                        "apiVersion: 1\n"
-                                        "providers:\n"
-                                        "  - name: 'osm'\n"
-                                        "    orgId: 1\n"
-                                        "    folder: ''\n"
-                                        "    type: file\n"
-                                        "    options:\n"
-                                        "      path: /etc/grafana/provisioning/dashboards/\n"
-                                    ),
-                                }
-                            ],
-                        },
-                        {
-                            "name": "datasources",
-                            "mountPath": "/etc/grafana/provisioning/datasources/",
-                            "files": [
-                                {
-                                    "path": "datasource-prometheus.yml",
-                                    "content": (
-                                        "datasources:\n"
-                                        "  - access: proxy\n"
-                                        "    editable: true\n"
-                                        "    is_default: true\n"
-                                        "    name: osm_prometheus\n"
-                                        "    orgId: 1\n"
-                                        "    type: prometheus\n"
-                                        "    version: 1\n"
-                                        "    url: http://{}:{}\n".format(
-                                            relation_state.get("prometheus_host"),
-                                            relation_state.get("prometheus_port"),
-                                        )
-                                    ),
-                                }
-                            ],
-                        },
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": port,
-                            },
-                            "initialDelaySeconds": 10,
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": port,
-                            },
-                            "initialDelaySeconds": 60,
-                            "timeoutSeconds": 30,
-                            "failureThreshold": 10,
-                        },
-                    },
-                }
-            ],
-            "kubernetesResources": {"ingressResources": []},
-        }
-
-        spec = pod_spec.make_pod_spec(
-            image_info, config, relation_state, app_name, port
-        )
-
-        self.assertDictEqual(expected_result, spec)
-
-    def test_make_pod_spec_with_ingress(self) -> NoReturn:
-        """Testing make pod spec."""
-        image_info = {"upstream-source": "ubuntu/grafana:latest"}
-        config = {
-            "site_url": "https://grafana",
-            "tls_secret_name": "grafana",
-            "max_file_size": 0,
-            "ingress_whitelist_source_range": "0.0.0.0/0",
-        }
-        relation_state = {
-            "prometheus_host": "prometheus",
-            "prometheus_port": "9090",
-        }
-        app_name = "grafana"
-        port = 3000
-
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": app_name,
-                    "imageDetails": image_info,
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": app_name,
-                            "containerPort": port,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "dashboards",
-                            "mountPath": "/etc/grafana/provisioning/dashboards/",
-                            "files": [
-                                {
-                                    "path": "dashboard-osm.yml",
-                                    "content": (
-                                        "apiVersion: 1\n"
-                                        "providers:\n"
-                                        "  - name: 'osm'\n"
-                                        "    orgId: 1\n"
-                                        "    folder: ''\n"
-                                        "    type: file\n"
-                                        "    options:\n"
-                                        "      path: /etc/grafana/provisioning/dashboards/\n"
-                                    ),
-                                }
-                            ],
-                        },
-                        {
-                            "name": "datasources",
-                            "mountPath": "/etc/grafana/provisioning/datasources/",
-                            "files": [
-                                {
-                                    "path": "datasource-prometheus.yml",
-                                    "content": (
-                                        "datasources:\n"
-                                        "  - access: proxy\n"
-                                        "    editable: true\n"
-                                        "    is_default: true\n"
-                                        "    name: osm_prometheus\n"
-                                        "    orgId: 1\n"
-                                        "    type: prometheus\n"
-                                        "    version: 1\n"
-                                        "    url: http://{}:{}\n".format(
-                                            relation_state.get("prometheus_host"),
-                                            relation_state.get("prometheus_port"),
-                                        )
-                                    ),
-                                }
-                            ],
-                        },
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": port,
-                            },
-                            "initialDelaySeconds": 10,
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/api/health",
-                                "port": port,
-                            },
-                            "initialDelaySeconds": 60,
-                            "timeoutSeconds": 30,
-                            "failureThreshold": 10,
-                        },
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "{}-ingress".format(app_name),
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": str(
-                                config.get("max_file_size")
-                            ),
-                            "nginx.ingress.kubernetes.io/whitelist-source-range": config.get(
-                                "ingress_whitelist_source_range"
-                            ),
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": app_name,
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": app_name,
-                                                    "servicePort": port,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ],
-                            "tls": [
-                                {
-                                    "hosts": [app_name],
-                                    "secretName": config.get("tls_secret_name"),
-                                }
-                            ],
-                        },
-                    }
-                ],
-            },
-        }
-
-        spec = pod_spec.make_pod_spec(
-            image_info, config, relation_state, app_name, port
-        )
-
-        self.assertDictEqual(expected_result, spec)
-
-    def test_make_pod_spec_without_image_info(self) -> NoReturn:
-        """Testing make pod spec without image_info."""
-        image_info = None
-        config = {
-            "site_url": "",
-        }
-        relation_state = {
-            "prometheus_host": "prometheus",
-            "prometheus_port": "9090",
-        }
-        app_name = "grafana"
-        port = 3000
-
-        spec = pod_spec.make_pod_spec(
-            image_info, config, relation_state, app_name, port
-        )
-
-        self.assertIsNone(spec)
-
-    def test_make_pod_spec_without_relation_state(self) -> NoReturn:
-        """Testing make pod spec without relation_state."""
-        image_info = {"upstream-source": "ubuntu/grafana:latest"}
-        config = {
-            "site_url": "",
-        }
-        relation_state = {}
-        app_name = "grafana"
-        port = 3000
-
-        with self.assertRaises(ValueError):
-            pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port)
-
-
-if __name__ == "__main__":
-    unittest.main()
+# #!/usr/bin/env python3
+# Copyright 2021 Canonical Ltd.
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+# #
+#         http://www.apache.org/licenses/LICENSE-2.0
+# #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+# #
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+# #
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+# ##
+
+from typing import NoReturn
+import unittest
+
+import pod_spec
+
+
+class TestPodSpec(unittest.TestCase):
+    """Pod spec unit tests."""
+
+    def test_make_pod_ports(self) -> NoReturn:
+        """Testing make pod ports."""
+        port = 3000
+
+        expected_result = [
+            {
+                "name": "grafana",
+                "containerPort": port,
+                "protocol": "TCP",
+            }
+        ]
+
+        pod_ports = pod_spec._make_pod_ports(port)
+
+        self.assertListEqual(expected_result, pod_ports)
+
+    def test_make_pod_envconfig(self) -> NoReturn:
+        """Teting make pod envconfig."""
+        config = {}
+        relation_state = {
+#             "prometheus_hostname": "prometheus",
+            "prometheus_port": "9090",
+        }
+
+        expected_result = {}
+
+        pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
+
+        self.assertDictEqual(expected_result, pod_envconfig)
+
+    def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn:
+        """Testing make pod ingress resources without site_url."""
+        config = {"site_url": ""}
+        app_name = "grafana"
+        port = 3000
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertIsNone(pod_ingress_resources)
+
+    def test_make_pod_ingress_resources(self) -> NoReturn:
+        """Testing make pod ingress resources."""
+        config = {
+            "site_url": "http://grafana",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "",
+        }
+        app_name = "grafana"
+        port = 3000
+
+        expected_result = [
+            {
+                "name": f"{app_name}-ingress",
+                "annotations": {
+                    "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
+                    "nginx.ingress.kubernetes.io/ssl-redirect": "false",
+                },
+                "spec": {
+                    "rules": [
+                        {
+                            "host": app_name,
+                            "http": {
+                                "paths": [
+                                    {
+                                        "path": "/",
+                                        "backend": {
+                                            "serviceName": app_name,
+                                            "servicePort": port,
+                                        },
+                                    }
+                                ]
+                            },
+                        }
+                    ]
+                },
+            }
+        ]
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertListEqual(expected_result, pod_ingress_resources)
+
+    def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:
+        """Testing make pod ingress resources with whitelist_source_range."""
+        config = {
+            "site_url": "http://grafana",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "0.0.0.0/0",
+        }
+        app_name = "grafana"
+        port = 3000
+
+        expected_result = [
+            {
+                "name": f"{app_name}-ingress",
+                "annotations": {
+                    "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
+                    "nginx.ingress.kubernetes.io/ssl-redirect": "false",
+                    "nginx.ingress.kubernetes.io/whitelist-source-range": config[
+                        "ingress_whitelist_source_range"
+                    ],
+                },
+                "spec": {
+                    "rules": [
+                        {
+                            "host": app_name,
+                            "http": {
+                                "paths": [
+                                    {
+                                        "path": "/",
+                                        "backend": {
+                                            "serviceName": app_name,
+                                            "servicePort": port,
+                                        },
+                                    }
+                                ]
+                            },
+                        }
+                    ]
+                },
+            }
+        ]
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertListEqual(expected_result, pod_ingress_resources)
+
+    def test_make_pod_ingress_resources_with_https(self) -> NoReturn:
+        """Testing make pod ingress resources with HTTPs."""
+        config = {
+            "site_url": "https://grafana",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "",
+            "tls_secret_name": "",
+        }
+        app_name = "grafana"
+        port = 3000
+
+        expected_result = [
+            {
+                "name": f"{app_name}-ingress",
+                "annotations": {
+                    "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
+                },
+                "spec": {
+                    "rules": [
+                        {
+                            "host": app_name,
+                            "http": {
+                                "paths": [
+                                    {
+                                        "path": "/",
+                                        "backend": {
+                                            "serviceName": app_name,
+                                            "servicePort": port,
+                                        },
+                                    }
+                                ]
+                            },
+                        }
+                    ],
+                    "tls": [{"hosts": [app_name]}],
+                },
+            }
+        ]
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertListEqual(expected_result, pod_ingress_resources)
+
+    def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:
+        """Testing make pod ingress resources with HTTPs and TLS secret name."""
+        config = {
+            "site_url": "https://grafana",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "",
+            "tls_secret_name": "secret_name",
+        }
+        app_name = "grafana"
+        port = 3000
+
+        expected_result = [
+            {
+                "name": f"{app_name}-ingress",
+                "annotations": {
+                    "nginx.ingress.kubernetes.io/proxy-body-size": f"{config['max_file_size']}",
+                },
+                "spec": {
+                    "rules": [
+                        {
+                            "host": app_name,
+                            "http": {
+                                "paths": [
+                                    {
+                                        "path": "/",
+                                        "backend": {
+                                            "serviceName": app_name,
+                                            "servicePort": port,
+                                        },
+                                    }
+                                ]
+                            },
+                        }
+                    ],
+                    "tls": [
+                        {"hosts": [app_name], "secretName": config["tls_secret_name"]}
+                    ],
+                },
+            }
+        ]
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertListEqual(expected_result, pod_ingress_resources)
+
+    def test_make_pod_files(self) -> NoReturn:
+        """Testing make pod files."""
+#         config = {"osm_dashboards": False}
+#         relation_state = {
+#             "prometheus_hostname": "prometheus",
+#             "prometheus_port": "9090",
+#         }
+
+#         expected_result = [
+#             {
+#                 "name": "dashboards",
+#                 "mountPath": "/etc/grafana/provisioning/dashboards/",
+#                 "files": [
+#                     {
+#                         "path": "dashboard-osm.yml",
+#                         "content": (
+#                             "apiVersion: 1\n"
+#                             "providers:\n"
+#                             "  - name: 'osm'\n"
+#                             "    orgId: 1\n"
+#                             "    folder: ''\n"
+#                             "    type: file\n"
+#                             "    options:\n"
+#                             "      path: /etc/grafana/provisioning/dashboards/\n"
+#                         ),
+#                     }
+#                 ],
+#             },
+#             {
+#                 "name": "datasources",
+#                 "mountPath": "/etc/grafana/provisioning/datasources/",
+#                 "files": [
+#                     {
+#                         "path": "datasource-prometheus.yml",
+#                         "content": (
+#                             "datasources:\n"
+#                             "  - access: proxy\n"
+#                             "    editable: true\n"
+#                             "    is_default: true\n"
+#                             "    name: osm_prometheus\n"
+#                             "    orgId: 1\n"
+#                             "    type: prometheus\n"
+#                             "    version: 1\n"
+#                             "    url: http://{}:{}\n".format(
+#                                 relation_state.get("prometheus_hostname"),
+#                                 relation_state.get("prometheus_port"),
+#                             )
+#                         ),
+#                     }
+#                 ],
+#             },
+#         ]
+
+#         pod_envconfig = pod_spec._make_pod_files(config, relation_state)
+        self.assertListEqual(expected_result, pod_envconfig)
+
+    def test_make_readiness_probe(self) -> NoReturn:
+        """Testing make readiness probe."""
+        port = 3000
+
+        expected_result = {
+            "httpGet": {
+                "path": "/api/health",
+                "port": port,
+            },
+            "initialDelaySeconds": 10,
+            "periodSeconds": 10,
+            "timeoutSeconds": 5,
+            "successThreshold": 1,
+            "failureThreshold": 3,
+        }
+
+        readiness_probe = pod_spec._make_readiness_probe(port)
+
+        self.assertDictEqual(expected_result, readiness_probe)
+
+    def test_make_liveness_probe(self) -> NoReturn:
+        """Testing make liveness probe."""
+        port = 3000
+
+        expected_result = {
+            "httpGet": {
+                "path": "/api/health",
+                "port": port,
+            },
+            "initialDelaySeconds": 60,
+            "timeoutSeconds": 30,
+            "failureThreshold": 10,
+        }
+
+        liveness_probe = pod_spec._make_liveness_probe(port)
+
+        self.assertDictEqual(expected_result, liveness_probe)
+
+    def test_make_pod_spec(self) -> NoReturn:
+        """Testing make pod spec."""
+        image_info = {"upstream-source": "ubuntu/grafana:latest"}
+        config = {
+            "site_url": "",
+        }
+        relation_state = {
+#             "prometheus_hostname": "prometheus",
+            "prometheus_port": "9090",
+        }
+        app_name = "grafana"
+        port = 3000
+
+        expected_result = {
+            "version": 3,
+            "containers": [
+                {
+                    "name": app_name,
+                    "imageDetails": image_info,
+                    "imagePullPolicy": "Always",
+                    "ports": [
+                        {
+                            "name": app_name,
+                            "containerPort": port,
+                            "protocol": "TCP",
+                        }
+                    ],
+                    "envConfig": {},
+                    "volumeConfig": [
+                        {
+                            "name": "dashboards",
+                            "mountPath": "/etc/grafana/provisioning/dashboards/",
+                            "files": [
+                                {
+                                    "path": "dashboard-osm.yml",
+                                    "content": (
+                                        "apiVersion: 1\n"
+                                        "providers:\n"
+                                        "  - name: 'osm'\n"
+                                        "    orgId: 1\n"
+                                        "    folder: ''\n"
+                                        "    type: file\n"
+                                        "    options:\n"
+                                        "      path: /etc/grafana/provisioning/dashboards/\n"
+                                    ),
+                                }
+                            ],
+                        },
+                        {
+                            "name": "datasources",
+                            "mountPath": "/etc/grafana/provisioning/datasources/",
+                            "files": [
+                                {
+                                    "path": "datasource-prometheus.yml",
+                                    "content": (
+                                        "datasources:\n"
+                                        "  - access: proxy\n"
+                                        "    editable: true\n"
+                                        "    is_default: true\n"
+                                        "    name: osm_prometheus\n"
+                                        "    orgId: 1\n"
+                                        "    type: prometheus\n"
+                                        "    version: 1\n"
+                                        "    url: http://{}:{}\n".format(
+#                                             relation_state.get("prometheus_hostname"),
+                                            relation_state.get("prometheus_port"),
+                                        )
+                                    ),
+                                }
+                            ],
+                        },
+                    ],
+                    "kubernetes": {
+                        "readinessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": port,
+                            },
+                            "initialDelaySeconds": 10,
+                            "periodSeconds": 10,
+                            "timeoutSeconds": 5,
+                            "successThreshold": 1,
+                            "failureThreshold": 3,
+                        },
+                        "livenessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": port,
+                            },
+                            "initialDelaySeconds": 60,
+                            "timeoutSeconds": 30,
+                            "failureThreshold": 10,
+                        },
+                    },
+                }
+            ],
+            "kubernetesResources": {"ingressResources": []},
+        }
+
+        spec = pod_spec.make_pod_spec(
+            image_info, config, relation_state, app_name, port
+        )
+
+        self.assertDictEqual(expected_result, spec)
+
+    def test_make_pod_spec_with_ingress(self) -> NoReturn:
+        """Testing make pod spec."""
+        image_info = {"upstream-source": "ubuntu/grafana:latest"}
+        config = {
+            "site_url": "https://grafana",
+            "tls_secret_name": "grafana",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "0.0.0.0/0",
+        }
+        relation_state = {
+#             "prometheus_hostname": "prometheus",
+            "prometheus_port": "9090",
+        }
+        app_name = "grafana"
+        port = 3000
+
+        expected_result = {
+            "version": 3,
+            "containers": [
+                {
+                    "name": app_name,
+                    "imageDetails": image_info,
+                    "imagePullPolicy": "Always",
+                    "ports": [
+                        {
+                            "name": app_name,
+                            "containerPort": port,
+                            "protocol": "TCP",
+                        }
+                    ],
+                    "envConfig": {},
+                    "volumeConfig": [
+                        {
+                            "name": "dashboards",
+                            "mountPath": "/etc/grafana/provisioning/dashboards/",
+                            "files": [
+                                {
+                                    "path": "dashboard-osm.yml",
+                                    "content": (
+                                        "apiVersion: 1\n"
+                                        "providers:\n"
+                                        "  - name: 'osm'\n"
+                                        "    orgId: 1\n"
+                                        "    folder: ''\n"
+                                        "    type: file\n"
+                                        "    options:\n"
+                                        "      path: /etc/grafana/provisioning/dashboards/\n"
+                                    ),
+                                }
+                            ],
+                        },
+                        {
+                            "name": "datasources",
+                            "mountPath": "/etc/grafana/provisioning/datasources/",
+                            "files": [
+                                {
+                                    "path": "datasource-prometheus.yml",
+                                    "content": (
+                                        "datasources:\n"
+                                        "  - access: proxy\n"
+                                        "    editable: true\n"
+                                        "    is_default: true\n"
+                                        "    name: osm_prometheus\n"
+                                        "    orgId: 1\n"
+                                        "    type: prometheus\n"
+                                        "    version: 1\n"
+                                        "    url: http://{}:{}\n".format(
+#                                             relation_state.get("prometheus_hostname"),
+                                            relation_state.get("prometheus_port"),
+                                        )
+                                    ),
+                                }
+                            ],
+                        },
+                    ],
+                    "kubernetes": {
+                        "readinessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": port,
+                            },
+                            "initialDelaySeconds": 10,
+                            "periodSeconds": 10,
+                            "timeoutSeconds": 5,
+                            "successThreshold": 1,
+                            "failureThreshold": 3,
+                        },
+                        "livenessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": port,
+                            },
+                            "initialDelaySeconds": 60,
+                            "timeoutSeconds": 30,
+                            "failureThreshold": 10,
+                        },
+                    },
+                }
+            ],
+            "kubernetesResources": {
+                "ingressResources": [
+                    {
+                        "name": "{}-ingress".format(app_name),
+                        "annotations": {
+                            "nginx.ingress.kubernetes.io/proxy-body-size": str(
+                                config.get("max_file_size")
+                            ),
+                            "nginx.ingress.kubernetes.io/whitelist-source-range": config.get(
+                                "ingress_whitelist_source_range"
+                            ),
+                        },
+                        "spec": {
+                            "rules": [
+                                {
+                                    "host": app_name,
+                                    "http": {
+                                        "paths": [
+                                            {
+                                                "path": "/",
+                                                "backend": {
+                                                    "serviceName": app_name,
+                                                    "servicePort": port,
+                                                },
+                                            }
+                                        ]
+                                    },
+                                }
+                            ],
+                            "tls": [
+                                {
+                                    "hosts": [app_name],
+                                    "secretName": config.get("tls_secret_name"),
+                                }
+                            ],
+                        },
+                    }
+                ],
+            },
+        }
+
+        spec = pod_spec.make_pod_spec(
+            image_info, config, relation_state, app_name, port
+        )
+
+        self.assertDictEqual(expected_result, spec)
+
+    def test_make_pod_spec_without_image_info(self) -> NoReturn:
+        """Testing make pod spec without image_info."""
+        image_info = None
+        config = {
+            "site_url": "",
+        }
+        relation_state = {
+#             "prometheus_hostname": "prometheus",
+            "prometheus_port": "9090",
+        }
+        app_name = "grafana"
+        port = 3000
+
+        spec = pod_spec.make_pod_spec(
+            image_info, config, relation_state, app_name, port
+        )
+
+        self.assertIsNone(spec)
+
+    def test_make_pod_spec_without_relation_state(self) -> NoReturn:
+        """Testing make pod spec without relation_state."""
+        image_info = {"upstream-source": "ubuntu/grafana:latest"}
+        config = {
+            "site_url": "",
+        }
+        relation_state = {}
+        app_name = "grafana"
+        port = 3000
+
+        with self.assertRaises(ValueError):
+            pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port)
+
+
+if __name__ == "__main__":
+    unittest.main()
index 069cf10..1f9442e 100644 (file)
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = grafana
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release grafana.charm
   charmcraft build
-  unzip grafana.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
-
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/"
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =
-  .tox/*
-  tests/*
index 43fcb62..493739e 100644 (file)
@@ -1,20 +1,30 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
 .vscode
-.tox
 build
-keystone.charm
+*.charm
+.coverage
+coverage.xml
 .stestr
-.coverage*
-cover/
\ No newline at end of file
+cover
+release
diff --git a/installers/charm/keystone/.jujuignore b/installers/charm/keystone/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index 08ab437..d71fb69 100644 (file)
@@ -1,16 +1,24 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
 ---
 extends: default
 
@@ -20,6 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
+  cover/
   build/
-  mod/
-  lib/
+  venv
+  release/
index 06ea060..a606d8e 100644 (file)
 #     See the License for the specific language governing permissions and
 #     limitations under the License.
 options:
-  image:
-    type: string
-    default: opensourcemano/keystone:latest
-    description: The docker image to install.
-  image_username:
-    type: string
-    description: |
-      The username for accessing the registry specified in image.
-    default: ""
-  image_password:
-    type: string
-    description: |
-      The password associated with image_username for accessing
-      the registry specified in image.
-    default: ""
   max_file_size:
     type: int
     description: |
index c04c121..38c03ed 100644 (file)
@@ -18,6 +18,11 @@ description: |
 series:
     - kubernetes
 min-juju-version: 2.8.0
+resources:
+    image:
+        type: oci-image
+        description: OSM docker image for NBI
+        upstream-source: "opensourcemano/keystone:latest"
 requires:
     db:
         interface: mysql
diff --git a/installers/charm/keystone/requirements-test.txt b/installers/charm/keystone/requirements-test.txt
new file mode 100644 (file)
index 0000000..d7585f3
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index 5a4c0af..d42bd9e 100644 (file)
@@ -1,15 +1,22 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
 cryptography
-ops
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index d5c169b..72d7090 100755 (executable)
@@ -1,44 +1,59 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+# pylint: disable=E0213
+
 
 import json
 import logging
+from cryptography.fernet import Fernet
 from datetime import datetime
-from typing import (
-    Any,
-    Dict,
-    List,
-    NoReturn,
-    Tuple,
-)
+from typing import Optional, NoReturn, List, Tuple
+from ipaddress import ip_network
 from urllib.parse import urlparse
 
-from cryptography.fernet import Fernet
-
-from ops.charm import CharmBase, EventBase, CharmEvents
-from ops.framework import StoredState, EventSource
 from ops.main import main
-from ops.model import (
-    ActiveStatus,
-    BlockedStatus,
-    # MaintenanceStatus,
-    WaitingStatus,
-    # ModelError,
+
+from opslib.osm.charm import CharmedOsmBase, RelationsMissing
+
+from opslib.osm.pod import (
+    ContainerV3Builder,
+    PodSpecV3Builder,
+    FilesV3Builder,
+    IngressResourceV3Builder,
 )
 
-LOGGER = logging.getLogger(__name__)
+
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
+
+from opslib.osm.interfaces.mysql import MysqlClient
+from opslib.osm.interfaces.keystone import KeystoneServer
+
+
+logger = logging.getLogger(__name__)
+
 
 REQUIRED_SETTINGS = ["token_expiration"]
 
@@ -46,7 +61,7 @@ REQUIRED_SETTINGS = ["token_expiration"]
 DATABASE_NAME = "keystone"
 
 # We expect the keystone container to use the default port
-KEYSTONE_PORT = 5000
+PORT = 5000
 
 # Number of keys need might need to be adjusted in the future
 NUMBER_FERNET_KEYS = 2
@@ -57,311 +72,109 @@ CREDENTIAL_KEYS_PATH = "/etc/keystone/credential-keys"
 FERNET_KEYS_PATH = "/etc/keystone/fernet-keys"
 
 
-class ConfigurePodEvent(EventBase):
-    """Configure Pod event"""
-
-    pass
-
-
-class KeystoneEvents(CharmEvents):
-    """Keystone Events"""
-
-    configure_pod = EventSource(ConfigurePodEvent)
-
-
-class KeystoneCharm(CharmBase):
-    """Keystone K8s Charm"""
-
-    state = StoredState()
-    on = KeystoneEvents()
-
+class ConfigModel(ModelValidator):
+    region_id: str
+    keystone_db_password: str
+    admin_username: str
+    admin_password: str
+    admin_project: str
+    service_username: str
+    service_password: str
+    service_project: str
+    user_domain_name: str
+    project_domain_name: str
+    token_expiration: int
+    max_file_size: int
+    site_url: Optional[str]
+    ingress_whitelist_source_range: Optional[str]
+    tls_secret_name: Optional[str]
+
+    @validator("max_file_size")
+    def validate_max_file_size(cls, v):
+        if v < 0:
+            raise ValueError("value must be equal or greater than 0")
+        return v
+
+    @validator("site_url")
+    def validate_site_url(cls, v):
+        if v:
+            parsed = urlparse(v)
+            if not parsed.scheme.startswith("http"):
+                raise ValueError("value must start with http")
+        return v
+
+    @validator("ingress_whitelist_source_range")
+    def validate_ingress_whitelist_source_range(cls, v):
+        if v:
+            ip_network(v)
+        return v
+
+
+class ConfigLdapModel(ModelValidator):
+    ldap_enabled: bool
+    ldap_authentication_domain_name: Optional[str]
+    ldap_url: Optional[str]
+    ldap_bind_user: Optional[str]
+    ldap_bind_password: Optional[str]
+    ldap_chase_referrals: Optional[str]
+    ldap_page_size: Optional[int]
+    ldap_user_tree_dn: Optional[str]
+    ldap_user_objectclass: Optional[str]
+    ldap_user_id_attribute: Optional[str]
+    ldap_user_name_attribute: Optional[str]
+    ldap_user_pass_attribute: Optional[str]
+    ldap_user_filter: Optional[str]
+    ldap_user_enabled_attribute: Optional[str]
+    ldap_user_enabled_mask: Optional[int]
+    ldap_user_enabled_default: Optional[bool]
+    ldap_user_enabled_invert: Optional[bool]
+    ldap_group_objectclass: Optional[str]
+    ldap_group_tree_dn: Optional[str]
+    ldap_use_starttls: Optional[bool]
+    ldap_tls_cacert_base64: Optional[str]
+    ldap_tls_req_cert: Optional[str]
+
+
+class KeystoneCharm(CharmedOsmBase):
     def __init__(self, *args) -> NoReturn:
-        """Constructor of the Charm object.
-        Initializes internal state and register events it can handle.
-        """
-        super().__init__(*args)
-        self.state.set_default(db_host=None)
-        self.state.set_default(db_port=None)
-        self.state.set_default(db_user=None)
-        self.state.set_default(db_password=None)
-        self.state.set_default(pod_spec=None)
+        super().__init__(*args, oci_image="image")
         self.state.set_default(fernet_keys=None)
         self.state.set_default(credential_keys=None)
         self.state.set_default(keys_timestamp=0)
 
-        # Register all of the events we want to observe
-        self.framework.observe(self.on.config_changed, self.configure_pod)
-        self.framework.observe(self.on.start, self.configure_pod)
-        self.framework.observe(self.on.upgrade_charm, self.configure_pod)
-        self.framework.observe(self.on.leader_elected, self.configure_pod)
-        self.framework.observe(self.on.update_status, self.configure_pod)
-
-        # Registering custom internal events
-        self.framework.observe(self.on.configure_pod, self.configure_pod)
+        self.keystone_server = KeystoneServer(self, "keystone")
+        self.mysql_client = MysqlClient(self, "db")
+        self.framework.observe(self.on["db"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["db"].relation_broken, self.configure_pod)
 
-        # Register relation events
-        self.framework.observe(
-            self.on.db_relation_changed, self._on_db_relation_changed
-        )
-        self.framework.observe(
-            self.on.db_relation_departed, self._on_db_relation_departed
-        )
         self.framework.observe(
-            self.on.keystone_relation_joined, self._publish_keystone_info
+            self.on["keystone"].relation_joined, self._publish_keystone_info
         )
 
-    def _publish_keystone_info(self, event: EventBase) -> NoReturn:
-        """Publishes keystone information for NBI usage through the keystone
-           relation.
-
-        Args:
-            event (EventBase): Keystone relation event to update NBI.
-        """
-        config = self.model.config
-        rel_data = {
-            "host": f"http://{self.app.name}:{KEYSTONE_PORT}/v3",
-            "port": str(KEYSTONE_PORT),
-            "keystone_db_password": config["keystone_db_password"],
-            "region_id": config["region_id"],
-            "user_domain_name": config["user_domain_name"],
-            "project_domain_name": config["project_domain_name"],
-            "admin_username": config["admin_username"],
-            "admin_password": config["admin_password"],
-            "admin_project_name": config["admin_project"],
-            "username": config["service_username"],
-            "password": config["service_password"],
-            "service": config["service_project"],
-        }
-        for k, v in rel_data.items():
-            event.relation.data[self.model.unit][k] = v
-
-    def _on_db_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the DB relation, in order for keystone to
-           access it.
-
-        Args:
-            event (EventBase): DB relation event to access database
-                               information.
-        """
-        if event.unit not in event.relation.data:
-            return
-        relation_data = event.relation.data[event.unit]
-        db_host = relation_data.get("host")
-        db_port = int(relation_data.get("port", 3306))
-        db_user = "root"
-        db_password = relation_data.get("root_password")
-
-        if (
-            db_host
-            and db_port
-            and db_user
-            and db_password
-            and (
-                self.state.db_host != db_host
-                or self.state.db_port != db_port
-                or self.state.db_user != db_user
-                or self.state.db_password != db_password
+    def _publish_keystone_info(self, event):
+        if self.unit.is_leader():
+            config = ConfigModel(**dict(self.config))
+            self.keystone_server.publish_info(
+                host=f"http://{self.app.name}:{PORT}/v3",
+                port=PORT,
+                user_domain_name=config.user_domain_name,
+                project_domain_name=config.project_domain_name,
+                username=config.service_username,
+                password=config.service_password,
+                service=config.service_project,
+                keystone_db_password=config.keystone_db_password,
+                region_id=config.region_id,
+                admin_username=config.admin_username,
+                admin_password=config.admin_password,
+                admin_project_name=config.admin_project,
             )
-        ):
-            self.state.db_host = db_host
-            self.state.db_port = db_port
-            self.state.db_user = db_user
-            self.state.db_password = db_password
-            self.on.configure_pod.emit()
-
-    def _on_db_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clears data from db relation.
-
-        Args:
-            event (EventBase): DB relation event.
-
-        """
-        self.state.db_host = None
-        self.state.db_port = None
-        self.state.db_user = None
-        self.state.db_password = None
-        self.on.configure_pod.emit()
-
-    def _check_settings(self) -> str:
-        """Check if there any settings missing from Keystone configuration.
-
-        Returns:
-            str: Information about the problems found (if any).
-        """
-        problems = []
-        config = self.model.config
 
-        for setting in REQUIRED_SETTINGS:
-            if not config.get(setting):
-                problem = f"missing config {setting}"
-                problems.append(problem)
-
-        return ";".join(problems)
-
-    def _make_pod_image_details(self) -> Dict[str, str]:
-        """Generate the pod image details.
-
-        Returns:
-            Dict[str, str]: pod image details.
-        """
-        config = self.model.config
-        image_details = {
-            "imagePath": config["image"],
-        }
-        if config["image_username"]:
-            image_details.update(
-                {
-                    "username": config["image_username"],
-                    "password": config["image_password"],
-                }
-            )
-        return image_details
-
-    def _make_pod_ports(self) -> List[Dict[str, Any]]:
-        """Generate the pod ports details.
-
-        Returns:
-            List[Dict[str, Any]]: pod ports details.
-        """
-        return [
-            {"name": "keystone", "containerPort": KEYSTONE_PORT, "protocol": "TCP"},
-        ]
-
-    def _make_pod_envconfig(self) -> Dict[str, Any]:
-        """Generate pod environment configuraiton.
-
-        Returns:
-            Dict[str, Any]: pod environment configuration.
-        """
-        config = self.model.config
-
-        envconfig = {
-            "DB_HOST": self.state.db_host,
-            "DB_PORT": self.state.db_port,
-            "ROOT_DB_USER": self.state.db_user,
-            "ROOT_DB_PASSWORD": self.state.db_password,
-            "KEYSTONE_DB_PASSWORD": config["keystone_db_password"],
-            "REGION_ID": config["region_id"],
-            "KEYSTONE_HOST": self.app.name,
-            "ADMIN_USERNAME": config["admin_username"],
-            "ADMIN_PASSWORD": config["admin_password"],
-            "ADMIN_PROJECT": config["admin_project"],
-            "SERVICE_USERNAME": config["service_username"],
-            "SERVICE_PASSWORD": config["service_password"],
-            "SERVICE_PROJECT": config["service_project"],
-        }
-
-        if config.get("ldap_enabled"):
-            envconfig["LDAP_AUTHENTICATION_DOMAIN_NAME"] = config[
-                "ldap_authentication_domain_name"
-            ]
-            envconfig["LDAP_URL"] = config["ldap_url"]
-            envconfig["LDAP_PAGE_SIZE"] = config["ldap_page_size"]
-            envconfig["LDAP_USER_OBJECTCLASS"] = config["ldap_user_objectclass"]
-            envconfig["LDAP_USER_ID_ATTRIBUTE"] = config["ldap_user_id_attribute"]
-            envconfig["LDAP_USER_NAME_ATTRIBUTE"] = config["ldap_user_name_attribute"]
-            envconfig["LDAP_USER_PASS_ATTRIBUTE"] = config["ldap_user_pass_attribute"]
-            envconfig["LDAP_USER_ENABLED_MASK"] = config["ldap_user_enabled_mask"]
-            envconfig["LDAP_USER_ENABLED_DEFAULT"] = config["ldap_user_enabled_default"]
-            envconfig["LDAP_USER_ENABLED_INVERT"] = config["ldap_user_enabled_invert"]
-            envconfig["LDAP_GROUP_OBJECTCLASS"] = config["ldap_group_objectclass"]
-
-            if config["ldap_bind_user"]:
-                envconfig["LDAP_BIND_USER"] = config["ldap_bind_user"]
-
-            if config["ldap_bind_password"]:
-                envconfig["LDAP_BIND_PASSWORD"] = config["ldap_bind_password"]
-
-            if config["ldap_user_tree_dn"]:
-                envconfig["LDAP_USER_TREE_DN"] = config["ldap_user_tree_dn"]
-
-            if config["ldap_user_filter"]:
-                envconfig["LDAP_USER_FILTER"] = config["ldap_user_filter"]
-
-            if config["ldap_user_enabled_attribute"]:
-                envconfig["LDAP_USER_ENABLED_ATTRIBUTE"] = config[
-                    "ldap_user_enabled_attribute"
-                ]
-
-            if config["ldap_chase_referrals"]:
-                envconfig["LDAP_CHASE_REFERRALS"] = config["ldap_chase_referrals"]
-
-            if config["ldap_group_tree_dn"]:
-                envconfig["LDAP_GROUP_TREE_DN"] = config["ldap_group_tree_dn"]
-
-            if config["ldap_use_starttls"]:
-                envconfig["LDAP_USE_STARTTLS"] = config["ldap_use_starttls"]
-                envconfig["LDAP_TLS_CACERT_BASE64"] = config["ldap_tls_cacert_base64"]
-                envconfig["LDAP_TLS_REQ_CERT"] = config["ldap_tls_req_cert"]
-
-        return envconfig
-
-    def _make_pod_ingress_resources(self) -> List[Dict[str, Any]]:
-        """Generate pod ingress resources.
-
-        Returns:
-            List[Dict[str, Any]]: pod ingress resources.
-        """
-        site_url = self.model.config["site_url"]
-
-        if not site_url:
-            return
-
-        parsed = urlparse(site_url)
-
-        if not parsed.scheme.startswith("http"):
-            return
-
-        max_file_size = self.model.config["max_file_size"]
-        ingress_whitelist_source_range = self.model.config[
-            "ingress_whitelist_source_range"
-        ]
-
-        annotations = {
-            "nginx.ingress.kubernetes.io/proxy-body-size": "{}m".format(max_file_size)
-        }
-
-        if ingress_whitelist_source_range:
-            annotations[
-                "nginx.ingress.kubernetes.io/whitelist-source-range"
-            ] = ingress_whitelist_source_range
-
-        ingress_spec_tls = None
-
-        if parsed.scheme == "https":
-            ingress_spec_tls = [{"hosts": [parsed.hostname]}]
-            tls_secret_name = self.model.config["tls_secret_name"]
-            if tls_secret_name:
-                ingress_spec_tls[0]["secretName"] = tls_secret_name
-        else:
-            annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
-
-        ingress = {
-            "name": "{}-ingress".format(self.app.name),
-            "annotations": annotations,
-            "spec": {
-                "rules": [
-                    {
-                        "host": parsed.hostname,
-                        "http": {
-                            "paths": [
-                                {
-                                    "path": "/",
-                                    "backend": {
-                                        "serviceName": self.app.name,
-                                        "servicePort": KEYSTONE_PORT,
-                                    },
-                                }
-                            ]
-                        },
-                    }
-                ],
-            },
-        }
-        if ingress_spec_tls:
-            ingress["spec"]["tls"] = ingress_spec_tls
-
-        return [ingress]
+    def _check_missing_dependencies(self, config: ConfigModel):
+        missing_relations = []
+        if self.mysql_client.is_missing_data_in_unit():
+            missing_relations.append("mysql")
+        if missing_relations:
+            raise RelationsMissing(missing_relations)
 
     def _generate_keys(self) -> Tuple[List[str], List[str]]:
         """Generating new fernet tokens.
@@ -382,58 +195,8 @@ class KeystoneCharm(CharmBase):
 
         return (fernet_keys, credential_keys)
 
-    def _make_pod_files(
-        self, fernet_keys: List[str], credential_keys: List[str]
-    ) -> List[Dict[str, Any]]:
-        """Generating ConfigMap information.
-
-        Args:
-            fernet_keys (List[str]): keys for fernet.
-            credential_keys (List[str]): keys for credentials.
-
-        Returns:
-            List[Dict[str, Any]]: ConfigMap information.
-        """
-        files = [
-            {
-                "name": "fernet-keys",
-                "mountPath": FERNET_KEYS_PATH,
-                "files": [
-                    {"path": str(key_id), "content": value}
-                    for (key_id, value) in enumerate(fernet_keys)
-                ],
-            }
-        ]
-
-        files.append(
-            {
-                "name": "credential-keys",
-                "mountPath": CREDENTIAL_KEYS_PATH,
-                "files": [
-                    {"path": str(key_id), "content": value}
-                    for (key_id, value) in enumerate(credential_keys)
-                ],
-            }
-        )
-
-        return files
-
-    def configure_pod(self, event: EventBase) -> NoReturn:
-        """Assemble the pod spec and apply it, if possible.
-
-        Args:
-            event (EventBase): Hook or Relation event that started the
-                               function.
-        """
-        if not self.state.db_host:
-            self.unit.status = WaitingStatus("Waiting for database relation")
-            event.defer()
-            return
-
-        if not self.unit.is_leader():
-            self.unit.status = ActiveStatus("ready")
-            return
-
+    def _get_keys(self):
+        keys_timestamp = self.state.keys_timestamp
         if fernet_keys := self.state.fernet_keys:
             fernet_keys = json.loads(fernet_keys)
 
@@ -441,8 +204,7 @@ class KeystoneCharm(CharmBase):
             credential_keys = json.loads(credential_keys)
 
         now = datetime.now().timestamp()
-        keys_timestamp = self.state.keys_timestamp
-        token_expiration = self.model.config["token_expiration"]
+        token_expiration = self.config["token_expiration"]
 
         valid_keys = (now - keys_timestamp) < token_expiration
         if not credential_keys or not fernet_keys or not valid_keys:
@@ -450,42 +212,553 @@ class KeystoneCharm(CharmBase):
             self.state.fernet_keys = json.dumps(fernet_keys)
             self.state.credential_keys = json.dumps(credential_keys)
             self.state.keys_timestamp = now
+        return credential_keys, fernet_keys
+
+    def _build_files(self, config: ConfigModel):
+        credentials_files_builder = FilesV3Builder()
+        fernet_files_builder = FilesV3Builder()
+
+        credential_keys, fernet_keys = self._get_keys()
+
+        for (key_id, value) in enumerate(credential_keys):
+            credentials_files_builder.add_file(str(key_id), value)
+        for (key_id, value) in enumerate(fernet_keys):
+            fernet_files_builder.add_file(str(key_id), value)
+        return credentials_files_builder.build(), fernet_files_builder.build()
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        config_ldap = ConfigLdapModel(**dict(self.config))
+        # Check relations
+        self._check_missing_dependencies(config)
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=PORT)
+        # Build files
+        credential_files, fernet_files = self._build_files(config)
+        container_builder.add_volume_config(
+            "credential-keys", CREDENTIAL_KEYS_PATH, credential_files
+        )
+        container_builder.add_volume_config(
+            "fernet-keys", FERNET_KEYS_PATH, fernet_files
+        )
+        container_builder.add_envs(
+            {
+                "DB_HOST": self.mysql_client.host,
+                "DB_PORT": self.mysql_client.port,
+                "ROOT_DB_USER": "root",
+                "ROOT_DB_PASSWORD": self.mysql_client.root_password,
+                "KEYSTONE_DB_PASSWORD": config.keystone_db_password,
+                "REGION_ID": config.region_id,
+                "KEYSTONE_HOST": self.app.name,
+                "ADMIN_USERNAME": config.admin_username,
+                "ADMIN_PASSWORD": config.admin_password,
+                "ADMIN_PROJECT": config.admin_project,
+                "SERVICE_USERNAME": config.service_username,
+                "SERVICE_PASSWORD": config.service_password,
+                "SERVICE_PROJECT": config.service_project,
+            }
+        )
 
-        # Check problems in the settings
-        problems = self._check_settings()
-        if problems:
-            self.unit.status = BlockedStatus(problems)
-            return
-
-        self.unit.status = BlockedStatus("Assembling pod spec")
-        image_details = self._make_pod_image_details()
-        ports = self._make_pod_ports()
-        env_config = self._make_pod_envconfig()
-        ingress_resources = self._make_pod_ingress_resources()
-        files = self._make_pod_files(fernet_keys, credential_keys)
-
-        pod_spec = {
-            "version": 3,
-            "containers": [
+        if config_ldap.ldap_enabled:
+
+            container_builder.add_envs(
                 {
-                    "name": self.framework.model.app.name,
-                    "imageDetails": image_details,
-                    "ports": ports,
-                    "envConfig": env_config,
-                    "volumeConfig": files,
+                    "LDAP_AUTHENTICATION_DOMAIN_NAME": config_ldap.ldap_authentication_domain_name,
+                    "LDAP_URL": config_ldap.ldap_url,
+                    "LDAP_PAGE_SIZE": config_ldap.ldap_page_size,
+                    "LDAP_USER_OBJECTCLASS": config_ldap.ldap_user_objectclass,
+                    "LDAP_USER_ID_ATTRIBUTE": config_ldap.ldap_user_id_attribute,
+                    "LDAP_USER_NAME_ATTRIBUTE": config_ldap.ldap_user_name_attribute,
+                    "LDAP_USER_PASS_ATTRIBUTE": config_ldap.ldap_user_pass_attribute,
+                    "LDAP_USER_ENABLED_MASK": config_ldap.ldap_user_enabled_mask,
+                    "LDAP_USER_ENABLED_DEFAULT": config_ldap.ldap_user_enabled_default,
+                    "LDAP_USER_ENABLED_INVERT": config_ldap.ldap_user_enabled_invert,
+                    "LDAP_GROUP_OBJECTCLASS": config_ldap.ldap_group_objectclass,
                 }
-            ],
-            "kubernetesResources": {"ingressResources": ingress_resources or []},
-        }
+            )
+            if config_ldap.ldap_bind_user:
+                container_builder.add_envs(
+                    {"LDAP_BIND_USER": config_ldap.ldap_bind_user}
+                )
+
+            if config_ldap.ldap_bind_password:
+                container_builder.add_envs(
+                    {"LDAP_BIND_PASSWORD": config_ldap.ldap_bind_password}
+                )
+
+            if config_ldap.ldap_user_tree_dn:
+                container_builder.add_envs(
+                    {"LDAP_USER_TREE_DN": config_ldap.ldap_user_tree_dn}
+                )
+
+            if config_ldap.ldap_user_filter:
+                container_builder.add_envs(
+                    {"LDAP_USER_FILTER": config_ldap.ldap_user_filter}
+                )
+
+            if config_ldap.ldap_user_enabled_attribute:
+                container_builder.add_envs(
+                    {
+                        "LDAP_USER_ENABLED_ATTRIBUTE": config_ldap.ldap_user_enabled_attribute
+                    }
+                )
 
-        if self.state.pod_spec != (
-            pod_spec_json := json.dumps(pod_spec, sort_keys=True)
-        ):
-            self.state.pod_spec = pod_spec_json
-            self.model.pod.set_spec(pod_spec)
+            if config_ldap.ldap_chase_referrals:
+                container_builder.add_envs(
+                    {"LDAP_CHASE_REFERRALS": config_ldap.ldap_chase_referrals}
+                )
 
-        self.unit.status = ActiveStatus("ready")
+            if config_ldap.ldap_group_tree_dn:
+                container_builder.add_envs(
+                    {"LDAP_GROUP_TREE_DN": config_ldap.ldap_group_tree_dn}
+                )
+
+            if config_ldap.ldap_use_starttls:
+                container_builder.add_envs(
+                    {
+                        "LDAP_USE_STARTTLS": config_ldap.ldap_use_starttls,
+                        "LDAP_TLS_CACERT_BASE64": config_ldap.ldap_tls_cacert_base64,
+                        "LDAP_TLS_REQ_CERT": config_ldap.ldap_tls_req_cert,
+                    }
+                )
+        container = container_builder.build()
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        # Add ingress resources to pod spec if site url exists
+        if config.site_url:
+            parsed = urlparse(config.site_url)
+            annotations = {
+                "nginx.ingress.kubernetes.io/proxy-body-size": "{}".format(
+                    str(config.max_file_size) + "m"
+                    if config.max_file_size > 0
+                    else config.max_file_size
+                ),
+            }
+            ingress_resource_builder = IngressResourceV3Builder(
+                f"{self.app.name}-ingress", annotations
+            )
+
+            if config.ingress_whitelist_source_range:
+                annotations[
+                    "nginx.ingress.kubernetes.io/whitelist-source-range"
+                ] = config.ingress_whitelist_source_range
+
+            if parsed.scheme == "https":
+                ingress_resource_builder.add_tls(
+                    [parsed.hostname], config.tls_secret_name
+                )
+            else:
+                annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
+
+            ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
+            ingress_resource = ingress_resource_builder.build()
+            pod_spec_builder.add_ingress_resource(ingress_resource)
+        return pod_spec_builder.build()
 
 
 if __name__ == "__main__":
     main(KeystoneCharm)
+
+# LOGGER = logging.getLogger(__name__)
+
+
+# class ConfigurePodEvent(EventBase):
+#     """Configure Pod event"""
+
+#     pass
+
+
+# class KeystoneEvents(CharmEvents):
+#     """Keystone Events"""
+
+#     configure_pod = EventSource(ConfigurePodEvent)
+
+# class KeystoneCharm(CharmBase):
+#     """Keystone K8s Charm"""
+
+#     state = StoredState()
+#     on = KeystoneEvents()
+
+#     def __init__(self, *args) -> NoReturn:
+#         """Constructor of the Charm object.
+#         Initializes internal state and register events it can handle.
+#         """
+#         super().__init__(*args)
+#         self.state.set_default(db_host=None)
+#         self.state.set_default(db_port=None)
+#         self.state.set_default(db_user=None)
+#         self.state.set_default(db_password=None)
+#         self.state.set_default(pod_spec=None)
+#         self.state.set_default(fernet_keys=None)
+#         self.state.set_default(credential_keys=None)
+#         self.state.set_default(keys_timestamp=0)
+
+#         # Register all of the events we want to observe
+#         self.framework.observe(self.on.config_changed, self.configure_pod)
+#         self.framework.observe(self.on.start, self.configure_pod)
+#         self.framework.observe(self.on.upgrade_charm, self.configure_pod)
+#         self.framework.observe(self.on.leader_elected, self.configure_pod)
+#         self.framework.observe(self.on.update_status, self.configure_pod)
+
+#         # Registering custom internal events
+#         self.framework.observe(self.on.configure_pod, self.configure_pod)
+
+#         # Register relation events
+#         self.framework.observe(
+#             self.on.db_relation_changed, self._on_db_relation_changed
+#         )
+#         self.framework.observe(
+#             self.on.db_relation_broken, self._on_db_relation_broken
+#         )
+#         self.framework.observe(
+#             self.on.keystone_relation_joined, self._publish_keystone_info
+#         )
+
+#     def _publish_keystone_info(self, event: EventBase) -> NoReturn:
+#         """Publishes keystone information for NBI usage through the keystone
+#            relation.
+
+#         Args:
+#             event (EventBase): Keystone relation event to update NBI.
+#         """
+#         config = self.model.config
+#         rel_data = {
+#             "host": f"http://{self.app.name}:{KEYSTONE_PORT}/v3",
+#             "port": str(KEYSTONE_PORT),
+#             "keystone_db_password": config["keystone_db_password"],
+#             "region_id": config["region_id"],
+#             "user_domain_name": config["user_domain_name"],
+#             "project_domain_name": config["project_domain_name"],
+#             "admin_username": config["admin_username"],
+#             "admin_password": config["admin_password"],
+#             "admin_project_name": config["admin_project"],
+#             "username": config["service_username"],
+#             "password": config["service_password"],
+#             "service": config["service_project"],
+#         }
+#         for k, v in rel_data.items():
+#             event.relation.data[self.model.unit][k] = v
+
+#     def _on_db_relation_changed(self, event: EventBase) -> NoReturn:
+#         """Reads information about the DB relation, in order for keystone to
+#            access it.
+
+#         Args:
+#             event (EventBase): DB relation event to access database
+#                                information.
+#         """
+#         if not event.unit in event.relation.data:
+#             return
+#         relation_data = event.relation.data[event.unit]
+#         db_host = relation_data.get("host")
+#         db_port = int(relation_data.get("port", 3306))
+#         db_user = "root"
+#         db_password = relation_data.get("root_password")
+
+#         if (
+#             db_host
+#             and db_port
+#             and db_user
+#             and db_password
+#             and (
+#                 self.state.db_host != db_host
+#                 or self.state.db_port != db_port
+#                 or self.state.db_user != db_user
+#                 or self.state.db_password != db_password
+#             )
+#         ):
+#             self.state.db_host = db_host
+#             self.state.db_port = db_port
+#             self.state.db_user = db_user
+#             self.state.db_password = db_password
+#             self.on.configure_pod.emit()
+
+
+#     def _on_db_relation_broken(self, event: EventBase) -> NoReturn:
+#         """Clears data from db relation.
+
+#         Args:
+#             event (EventBase): DB relation event.
+
+#         """
+#         self.state.db_host = None
+#         self.state.db_port = None
+#         self.state.db_user = None
+#         self.state.db_password = None
+#         self.on.configure_pod.emit()
+
+#     def _check_settings(self) -> str:
+#         """Check if there any settings missing from Keystone configuration.
+
+#         Returns:
+#             str: Information about the problems found (if any).
+#         """
+#         problems = []
+#         config = self.model.config
+
+#         for setting in REQUIRED_SETTINGS:
+#             if not config.get(setting):
+#                 problem = f"missing config {setting}"
+#                 problems.append(problem)
+
+#         return ";".join(problems)
+
+#     def _make_pod_image_details(self) -> Dict[str, str]:
+#         """Generate the pod image details.
+
+#         Returns:
+#             Dict[str, str]: pod image details.
+#         """
+#         config = self.model.config
+#         image_details = {
+#             "imagePath": config["image"],
+#         }
+#         if config["image_username"]:
+#             image_details.update(
+#                 {
+#                     "username": config["image_username"],
+#                     "password": config["image_password"],
+#                 }
+#             )
+#         return image_details
+
+#     def _make_pod_ports(self) -> List[Dict[str, Any]]:
+#         """Generate the pod ports details.
+
+#         Returns:
+#             List[Dict[str, Any]]: pod ports details.
+#         """
+#         return [
+#             {"name": "keystone", "containerPort": KEYSTONE_PORT, "protocol": "TCP"},
+#         ]
+
+#     def _make_pod_envconfig(self) -> Dict[str, Any]:
+#         """Generate pod environment configuraiton.
+
+#         Returns:
+#             Dict[str, Any]: pod environment configuration.
+#         """
+#         config = self.model.config
+
+#         envconfig = {
+#             "DB_HOST": self.state.db_host,
+#             "DB_PORT": self.state.db_port,
+#             "ROOT_DB_USER": self.state.db_user,
+#             "ROOT_DB_PASSWORD": self.state.db_password,
+#             "KEYSTONE_DB_PASSWORD": config["keystone_db_password"],
+#             "REGION_ID": config["region_id"],
+#             "KEYSTONE_HOST": self.app.name,
+#             "ADMIN_USERNAME": config["admin_username"],
+#             "ADMIN_PASSWORD": config["admin_password"],
+#             "ADMIN_PROJECT": config["admin_project"],
+#             "SERVICE_USERNAME": config["service_username"],
+#             "SERVICE_PASSWORD": config["service_password"],
+#             "SERVICE_PROJECT": config["service_project"],
+#         }
+
+#         if config.get("ldap_enabled"):
+#             envconfig["LDAP_AUTHENTICATION_DOMAIN_NAME"] = config[
+#                 "ldap_authentication_domain_name"
+#             ]
+#             envconfig["LDAP_URL"] = config["ldap_url"]
+#             envconfig["LDAP_PAGE_SIZE"] = config["ldap_page_size"]
+#             envconfig["LDAP_USER_OBJECTCLASS"] = config["ldap_user_objectclass"]
+#             envconfig["LDAP_USER_ID_ATTRIBUTE"] = config["ldap_user_id_attribute"]
+#             envconfig["LDAP_USER_NAME_ATTRIBUTE"] = config["ldap_user_name_attribute"]
+#             envconfig["LDAP_USER_PASS_ATTRIBUTE"] = config["ldap_user_pass_attribute"]
+#             envconfig["LDAP_USER_ENABLED_MASK"] = config["ldap_user_enabled_mask"]
+#             envconfig["LDAP_USER_ENABLED_DEFAULT"] = config["ldap_user_enabled_default"]
+#             envconfig["LDAP_USER_ENABLED_INVERT"] = config["ldap_user_enabled_invert"]
+#             envconfig["LDAP_GROUP_OBJECTCLASS"] = config["ldap_group_objectclass"]
+
+#             if config["ldap_bind_user"]:
+#                 envconfig["LDAP_BIND_USER"] = config["ldap_bind_user"]
+
+#             if config["ldap_bind_password"]:
+#                 envconfig["LDAP_BIND_PASSWORD"] = config["ldap_bind_password"]
+
+#             if config["ldap_user_tree_dn"]:
+#                 envconfig["LDAP_USER_TREE_DN"] = config["ldap_user_tree_dn"]
+
+#             if config["ldap_user_filter"]:
+#                 envconfig["LDAP_USER_FILTER"] = config["ldap_user_filter"]
+
+#             if config["ldap_user_enabled_attribute"]:
+#                 envconfig["LDAP_USER_ENABLED_ATTRIBUTE"] = config[
+#                     "ldap_user_enabled_attribute"
+#                 ]
+
+#             if config["ldap_chase_referrals"]:
+#                 envconfig["LDAP_CHASE_REFERRALS"] = config["ldap_chase_referrals"]
+
+#             if config["ldap_group_tree_dn"]:
+#                 envconfig["LDAP_GROUP_TREE_DN"] = config["ldap_group_tree_dn"]
+
+#             if config["ldap_use_starttls"]:
+#                 envconfig["LDAP_USE_STARTTLS"] = config["ldap_use_starttls"]
+#                 envconfig["LDAP_TLS_CACERT_BASE64"] = config["ldap_tls_cacert_base64"]
+#                 envconfig["LDAP_TLS_REQ_CERT"] = config["ldap_tls_req_cert"]
+
+#         return envconfig
+
+#     def _make_pod_ingress_resources(self) -> List[Dict[str, Any]]:
+#         """Generate pod ingress resources.
+
+#         Returns:
+#             List[Dict[str, Any]]: pod ingress resources.
+#         """
+#         site_url = self.model.config["site_url"]
+
+#         if not site_url:
+#             return
+
+#         parsed = urlparse(site_url)
+
+#         if not parsed.scheme.startswith("http"):
+#             return
+
+#         max_file_size = self.model.config["max_file_size"]
+#         ingress_whitelist_source_range = self.model.config[
+#             "ingress_whitelist_source_range"
+#         ]
+
+#         annotations = {
+#             "nginx.ingress.kubernetes.io/proxy-body-size": "{}m".format(max_file_size)
+#         }
+
+#         if ingress_whitelist_source_range:
+#             annotations[
+#                 "nginx.ingress.kubernetes.io/whitelist-source-range"
+#             ] = ingress_whitelist_source_range
+
+#         ingress_spec_tls = None
+
+#         if parsed.scheme == "https":
+#             ingress_spec_tls = [{"hosts": [parsed.hostname]}]
+#             tls_secret_name = self.model.config["tls_secret_name"]
+#             if tls_secret_name:
+#                 ingress_spec_tls[0]["secretName"] = tls_secret_name
+#         else:
+#             annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
+
+#         ingress = {
+#             "name": "{}-ingress".format(self.app.name),
+#             "annotations": annotations,
+#             "spec": {
+#                 "rules": [
+#                     {
+#                         "host": parsed.hostname,
+#                         "http": {
+#                             "paths": [
+#                                 {
+#                                     "path": "/",
+#                                     "backend": {
+#                                         "serviceName": self.app.name,
+#                                         "servicePort": KEYSTONE_PORT,
+#                                     },
+#                                 }
+#                             ]
+#                         },
+#                     }
+#                 ],
+#             },
+#         }
+#         if ingress_spec_tls:
+#             ingress["spec"]["tls"] = ingress_spec_tls
+
+#         return [ingress]
+
+#     def _generate_keys(self) -> Tuple[List[str], List[str]]:
+#         """Generating new fernet tokens.
+
+#         Returns:
+#             Tuple[List[str], List[str]]: contains two lists of strings. First
+#                                          list contains strings that represent
+#                                          the keys for fernet and the second
+#                                          list contains strins that represent
+#                                          the keys for credentials.
+#         """
+#         fernet_keys = [
+#             Fernet.generate_key().decode() for _ in range(NUMBER_FERNET_KEYS)
+#         ]
+#         credential_keys = [
+#             Fernet.generate_key().decode() for _ in range(NUMBER_CREDENTIAL_KEYS)
+#         ]
+
+#         return (fernet_keys, credential_keys)
+
+#     def configure_pod(self, event: EventBase) -> NoReturn:
+#         """Assemble the pod spec and apply it, if possible.
+
+#         Args:
+#             event (EventBase): Hook or Relation event that started the
+#                                function.
+#         """
+#         if not self.state.db_host:
+#             self.unit.status = WaitingStatus("Waiting for database relation")
+#             event.defer()
+#             return
+
+#         if not self.unit.is_leader():
+#             self.unit.status = ActiveStatus("ready")
+#             return
+
+#         if fernet_keys := self.state.fernet_keys:
+#             fernet_keys = json.loads(fernet_keys)
+
+#         if credential_keys := self.state.credential_keys:
+#             credential_keys = json.loads(credential_keys)
+
+#         now = datetime.now().timestamp()
+#         keys_timestamp = self.state.keys_timestamp
+#         token_expiration = self.model.config["token_expiration"]
+
+#         valid_keys = (now - keys_timestamp) < token_expiration
+#         if not credential_keys or not fernet_keys or not valid_keys:
+#             fernet_keys, credential_keys = self._generate_keys()
+#             self.state.fernet_keys = json.dumps(fernet_keys)
+#             self.state.credential_keys = json.dumps(credential_keys)
+#             self.state.keys_timestamp = now
+
+#         # Check problems in the settings
+#         problems = self._check_settings()
+#         if problems:
+#             self.unit.status = BlockedStatus(problems)
+#             return
+
+#         self.unit.status = BlockedStatus("Assembling pod spec")
+#         image_details = self._make_pod_image_details()
+#         ports = self._make_pod_ports()
+#         env_config = self._make_pod_envconfig()
+#         ingress_resources = self._make_pod_ingress_resources()
+#         files = self._make_pod_files(fernet_keys, credential_keys)
+
+#         pod_spec = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": self.framework.model.app.name,
+#                     "imageDetails": image_details,
+#                     "ports": ports,
+#                     "envConfig": env_config,
+#                     "volumeConfig": files,
+#                 }
+#             ],
+#             "kubernetesResources": {"ingressResources": ingress_resources or []},
+#         }
+
+#         if self.state.pod_spec != (
+#             pod_spec_json := json.dumps(pod_spec, sort_keys=True)
+#         ):
+#             self.state.pod_spec = pod_spec_json
+#             self.model.pod.set_spec(pod_spec)
+
+#         self.unit.status = ActiveStatus("ready")
+
+
+# if __name__ == "__main__":
+#     main(KeystoneCharm)
index 6004c6d..d0d973a 100644 (file)
 
 """Init mocking for unit tests."""
 
-import sys
-import mock
+import sys
+import mock
 
-sys.path.append("src")
+sys.path.append("src")
 
-oci_image = mock.MagicMock()
-sys.modules["oci_image"] = oci_image
+oci_image = mock.MagicMock()
+sys.modules["oci_image"] = oci_image
index 8cb8604..756a5e4 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-
+from ops.model import ActiveStatus, BlockedStatus
 from ops.testing import Harness
 
 from charm import KeystoneCharm
@@ -33,14 +34,162 @@ class TestCharm(unittest.TestCase):
 
     def setUp(self) -> NoReturn:
         """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
         self.harness = Harness(KeystoneCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.config = {
+            "region_id": "str",
+            "keystone_db_password": "str",
+            "admin_username": "str",
+            "admin_password": "str",
+            "admin_project": "str",
+            "service_username": "str",
+            "service_password": "str",
+            "service_project": "str",
+            "user_domain_name": "str",
+            "project_domain_name": "str",
+            "token_expiration": 10,
+            "max_file_size": 1,
+            "site_url": "http://keystone.com",
+            "ldap_enabled": False,
+        }
+        self.harness.update_config(self.config)
+
+    def test_config_changed_no_relations(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+
+        self.harness.charm.on.config_changed.emit()
+
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+        self.assertTrue(
+            all(
+                relation in self.harness.charm.unit.status.message
+                for relation in ["mysql"]
+            )
+        )
 
-    def test_on_start_without_relations(self) -> NoReturn:
-        """Test installation without any relation."""
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
         self.harness.charm.on.config_changed.emit()
 
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
+
+    def test_with_relations(
+        self,
+    ) -> NoReturn:
+        "Test with relations"
+        self.initialize_mysql_relation()
+        # Verifying status
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+    def initialize_mysql_relation(self):
+        relation_id = self.harness.add_relation("db", "mysql")
+        self.harness.add_relation_unit(relation_id, "mysql/0")
+        self.harness.update_relation_data(
+            relation_id,
+            "mysql/0",
+            {
+                "host": "mysql",
+                "port": 3306,
+                "user": "mano",
+                "password": "manopw",
+                "root_password": "rootmanopw",
+            },
+        )
+
 
 if __name__ == "__main__":
     unittest.main()
+
+
+# class TestCharm(unittest.TestCase):
+#     """Prometheus Charm unit tests."""
+
+#     def setUp(self) -> NoReturn:
+#         """Test setup"""
+#         self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
+#         self.harness = Harness(KeystoneCharm)
+#         self.harness.set_leader(is_leader=True)
+#         self.harness.begin()
+#         self.config = {
+#             "enable_ng_ro": True,
+#             "database_commonkey": "commonkey",
+#             "log_level": "INFO",
+#             "vim_database": "db_name",
+#             "ro_database": "ro_db_name",
+#             "openmano_tenant": "mano",
+#         }
+
+#     def test_config_changed_no_relations(
+#         self,
+#     ) -> NoReturn:
+#         """Test ingress resources without HTTP."""
+
+#         self.harness.charm.on.config_changed.emit()
+
+#         # Assertions
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+#         self.assertTrue(
+#             all(
+#                 relation in self.harness.charm.unit.status.message
+#                 for relation in ["mongodb", "kafka"]
+#             )
+#         )
+
+#         # Disable ng-ro
+#         self.harness.update_config({"enable_ng_ro": False})
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+#         self.assertTrue(
+#             all(
+#                 relation in self.harness.charm.unit.status.message
+#                 for relation in ["mysql"]
+#             )
+#         )
+
+#     def test_config_changed_non_leader(
+#         self,
+#     ) -> NoReturn:
+#         """Test ingress resources without HTTP."""
+#         self.harness.set_leader(is_leader=False)
+#         self.harness.charm.on.config_changed.emit()
+
+#         # Assertions
+#         self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
+
+#     def test_with_relations_ng(
+#         self,
+#     ) -> NoReturn:
+#         "Test with relations (ng-ro)"
+
+#         # Initializing the kafka relation
+#         kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+#         )
+
+#         # Initializing the mongo relation
+#         mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             mongodb_relation_id,
+#             "mongodb/0",
+#             {"connection_string": "mongodb://mongo:27017"},
+#         )
+
+#         self.harness.charm.on.config_changed.emit()
+
+#         # Verifying status
+#         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+
+# if __name__ == "__main__":
+#     unittest.main()
index 781f74e..1f9442e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = keystone
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release keystone.charm
   charmcraft build
-  unzip keystone.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-  -rrequirements.txt
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
-
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/|release/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/ release/"
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =  
-  .tox/*
-  tests/*
index d4f4041..2885df2 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -22,7 +22,9 @@
 venv
 .vscode
 build
-lcm.charm
+*.charm
 .coverage
+coverage.xml
 .stestr
 cover
+release
\ No newline at end of file
diff --git a/installers/charm/lcm/.jujuignore b/installers/charm/lcm/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index c20ac8d..d71fb69 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -28,6 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
+  cover/
   build/
-  mod/
-  lib/
+  venv
+  release/
index eeed96b..4c823d1 100644 (file)
@@ -43,7 +43,6 @@ options:
   vca_cacert:
     type: string
     description: "The VCA cacert."
-    default: ""
   vca_apiproxy:
     type: string
     description: "The VCA api proxy (native charms)"
index 360af8a..c2d4985 100644 (file)
@@ -41,7 +41,10 @@ resources:
 requires:
   kafka:
     interface: kafka
+    limit: 1
   mongodb:
     interface: mongodb
+    limit: 1
   ro:
-    interface: osm-ro
+    interface: http
+    limit: 1
diff --git a/installers/charm/lcm/requirements-test.txt b/installers/charm/lcm/requirements-test.txt
new file mode 100644 (file)
index 0000000..d7585f3
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index a26601f..f10a199 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -19,5 +19,4 @@
 # osm-charmers@lists.launchpad.net
 ##
 
-ops
-git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index 52b6964..270a547 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+# pylint: disable=E0213
+
+
 import logging
-from typing import Any, Dict, NoReturn
+from typing import Optional, NoReturn
 
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, StoredState
 from ops.main import main
-from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus
-from oci_image import OCIImageResource, OCIImageResourceError
 
-from pod_spec import make_pod_spec
+from opslib.osm.charm import CharmedOsmBase, RelationsMissing
 
-logger = logging.getLogger(__name__)
+from opslib.osm.pod import (
+    ContainerV3Builder,
+    PodSpecV3Builder,
+)
 
-LCM_PORT = 9999
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
 
+from opslib.osm.interfaces.kafka import KafkaClient
+from opslib.osm.interfaces.mongo import MongoClient
+from opslib.osm.interfaces.http import HttpClient
 
-class ConfigurePodEvent(EventBase):
-    """Configure Pod event"""
-
-    pass
 
+logger = logging.getLogger(__name__)
 
-class LcmEvents(CharmEvents):
-    """LCM Events"""
+PORT = 9999
 
-    configure_pod = EventSource(ConfigurePodEvent)
 
+class ConfigModel(ModelValidator):
+    vca_host: str
+    vca_port: int
+    vca_user: str
+    vca_password: str
+    vca_pubkey: str
+    vca_cacert: str
+    vca_cloud: str
+    vca_k8s_cloud: str
+    database_commonkey: str
+    log_level: str
+    vca_apiproxy: Optional[str]
 
-class LcmCharm(CharmBase):
-    """LCM Charm."""
+    @validator("log_level")
+    def validate_log_level(cls, v):
+        if v not in {"INFO", "DEBUG"}:
+            raise ValueError("value must be INFO or DEBUG")
+        return v
 
-    state = StoredState()
-    on = LcmEvents()
 
+class LcmCharm(CharmedOsmBase):
     def __init__(self, *args) -> NoReturn:
-        """LCM Charm constructor."""
-        super().__init__(*args)
+        super().__init__(*args, oci_image="image")
+
+        self.kafka_client = KafkaClient(self, "kafka")
+        self.framework.observe(self.on["kafka"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["kafka"].relation_broken, self.configure_pod)
+
+        self.mongodb_client = MongoClient(self, "mongodb")
+        self.framework.observe(self.on["mongodb"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["mongodb"].relation_broken, self.configure_pod)
+
+        self.ro_client = HttpClient(self, "ro")
+        self.framework.observe(self.on["ro"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["ro"].relation_broken, self.configure_pod)
+
+    def _check_missing_dependencies(self, config: ConfigModel):
+        missing_relations = []
+
+        if self.kafka_client.is_missing_data_in_unit():
+            missing_relations.append("kafka")
+        if self.mongodb_client.is_missing_data_in_unit():
+            missing_relations.append("mongodb")
+        if self.ro_client.is_missing_data_in_app():
+            missing_relations.append("ro")
+
+        if missing_relations:
+            raise RelationsMissing(missing_relations)
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        # Check relations
+        self._check_missing_dependencies(config)
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=PORT)
+        container_builder.add_envs(
+            {
+                # General configuration
+                "ALLOW_ANONYMOUS_LOGIN": "yes",
+                "OSMLCM_GLOBAL_LOGLEVEL": config.log_level,
+                # RO configuration
+                "OSMLCM_RO_HOST": self.ro_client.host,
+                "OSMLCM_RO_PORT": self.ro_client.port,
+                "OSMLCM_RO_TENANT": "osm",
+                # Kafka configuration
+                "OSMLCM_MESSAGE_DRIVER": "kafka",
+                "OSMLCM_MESSAGE_HOST": self.kafka_client.host,
+                "OSMLCM_MESSAGE_PORT": self.kafka_client.port,
+                # Database configuration
+                "OSMLCM_DATABASE_DRIVER": "mongo",
+                "OSMLCM_DATABASE_URI": self.mongodb_client.connection_string,
+                "OSMLCM_DATABASE_COMMONKEY": config.database_commonkey,
+                # Storage configuration
+                "OSMLCM_STORAGE_DRIVER": "mongo",
+                "OSMLCM_STORAGE_PATH": "/app/storage",
+                "OSMLCM_STORAGE_COLLECTION": "files",
+                "OSMLCM_STORAGE_URI": self.mongodb_client.connection_string,
+                # VCA configuration
+                "OSMLCM_VCA_HOST": config.vca_host,
+                "OSMLCM_VCA_PORT": config.vca_port,
+                "OSMLCM_VCA_USER": config.vca_user,
+                "OSMLCM_VCA_PUBKEY": config.vca_pubkey,
+                "OSMLCM_VCA_SECRET": config.vca_password,
+                "OSMLCM_VCA_CACERT": config.vca_cacert,
+                "OSMLCM_VCA_CLOUD": config.vca_cloud,
+                "OSMLCM_VCA_K8S_CLOUD": config.vca_k8s_cloud,
+            }
+        )
+        if config.vca_apiproxy:
+            container_builder.add_env("OSMLCM_VCA_APIPROXY", config.vca_apiproxy)
 
-        # Internal state initialization
-        self.state.set_default(pod_spec=None)
+        container = container_builder.build()
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        return pod_spec_builder.build()
 
-        # Message bus data initialization
-        self.state.set_default(message_host=None)
-        self.state.set_default(message_port=None)
 
-        # Database data initialization
-        self.state.set_default(database_uri=None)
+if __name__ == "__main__":
+    main(LcmCharm)
 
-        # RO data initialization
-        self.state.set_default(ro_host=None)
-        self.state.set_default(ro_port=None)
 
-        self.port = LCM_PORT
-        self.image = OCIImageResource(self, "image")
+# class ConfigurePodEvent(EventBase):
+#     """Configure Pod event"""
 
-        # Registering regular events
-        self.framework.observe(self.on.start, self.configure_pod)
-        self.framework.observe(self.on.config_changed, self.configure_pod)
-        self.framework.observe(self.on.upgrade_charm, self.configure_pod)
+#     pass
 
-        # Registering custom internal events
-        self.framework.observe(self.on.configure_pod, self.configure_pod)
 
-        # Registering required relation events
-        self.framework.observe(
-            self.on.kafka_relation_changed, self._on_kafka_relation_changed
-        )
-        self.framework.observe(
-            self.on.mongodb_relation_changed, self._on_mongodb_relation_changed
-        )
-        self.framework.observe(
-            self.on.ro_relation_changed, self._on_ro_relation_changed
-        )
-
-        # Registering required relation departed events
-        self.framework.observe(
-            self.on.kafka_relation_departed, self._on_kafka_relation_departed
-        )
-        self.framework.observe(
-            self.on.mongodb_relation_departed, self._on_mongodb_relation_departed
-        )
-        self.framework.observe(
-            self.on.ro_relation_departed, self._on_ro_relation_departed
-        )
+# class LcmEvents(CharmEvents):
+#     """LCM Events"""
+
+#     configure_pod = EventSource(ConfigurePodEvent)
+
 
-    def _on_kafka_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the kafka relation.
-
-        Args:
-            event (EventBase): Kafka relation event.
-        """
-        message_host = event.relation.data[event.unit].get("host")
-        message_port = event.relation.data[event.unit].get("port")
-
-        if (
-            message_host
-            and message_port
-            and (
-                self.state.message_host != message_host
-                or self.state.message_port != message_port
-            )
-        ):
-            self.state.message_host = message_host
-            self.state.message_port = message_port
-            self.on.configure_pod.emit()
-
-    def _on_kafka_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clears data from kafka relation.
-
-        Args:
-            event (EventBase): Kafka relation event.
-        """
-        self.state.message_host = None
-        self.state.message_port = None
-        self.on.configure_pod.emit()
-
-    def _on_mongodb_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the DB relation.
-
-        Args:
-            event (EventBase): DB relation event.
-        """
-        database_uri = event.relation.data[event.unit].get("connection_string")
-
-        if database_uri and self.state.database_uri != database_uri:
-            self.state.database_uri = database_uri
-            self.on.configure_pod.emit()
-
-    def _on_mongodb_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clears data from mongodb relation.
-
-        Args:
-            event (EventBase): DB relation event.
-        """
-        self.state.database_uri = None
-        self.on.configure_pod.emit()
-
-    def _on_ro_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the RO relation.
-
-        Args:
-            event (EventBase): Keystone relation event.
-        """
-        ro_host = event.relation.data[event.unit].get("host")
-        ro_port = event.relation.data[event.unit].get("port")
-
-        if (
-            ro_host
-            and ro_port
-            and (self.state.ro_host != ro_host or self.state.ro_port != ro_port)
-        ):
-            self.state.ro_host = ro_host
-            self.state.ro_port = ro_port
-            self.on.configure_pod.emit()
-
-    def _on_ro_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clears data from ro relation.
-
-        Args:
-            event (EventBase): Keystone relation event.
-        """
-        self.state.ro_host = None
-        self.state.ro_port = None
-        self.on.configure_pod.emit()
-
-    def _missing_relations(self) -> str:
-        """Checks if there missing relations.
-
-        Returns:
-            str: string with missing relations
-        """
-        data_status = {
-            "kafka": self.state.message_host,
-            "mongodb": self.state.database_uri,
-            "ro": self.state.ro_host,
-        }
-
-        missing_relations = [k for k, v in data_status.items() if not v]
-
-        return ", ".join(missing_relations)
-
-    @property
-    def relation_state(self) -> Dict[str, Any]:
-        """Collects relation state configuration for pod spec assembly.
-
-        Returns:
-            Dict[str, Any]: relation state information.
-        """
-        relation_state = {
-            "message_host": self.state.message_host,
-            "message_port": self.state.message_port,
-            "database_uri": self.state.database_uri,
-            "ro_host": self.state.ro_host,
-            "ro_port": self.state.ro_port,
-        }
-
-        return relation_state
-
-    def configure_pod(self, event: EventBase) -> NoReturn:
-        """Assemble the pod spec and apply it, if possible.
-
-        Args:
-            event (EventBase): Hook or Relation event that started the
-                               function.
-        """
-        if missing := self._missing_relations():
-            self.unit.status = BlockedStatus(
-                "Waiting for {0} relation{1}".format(
-                    missing, "s" if "," in missing else ""
-                )
-            )
-            return
-
-        if not self.unit.is_leader():
-            self.unit.status = ActiveStatus("ready")
-            return
-
-        self.unit.status = MaintenanceStatus("Assembling pod spec")
-
-        # Fetch image information
-        try:
-            self.unit.status = MaintenanceStatus("Fetching image information")
-            image_info = self.image.fetch()
-        except OCIImageResourceError:
-            self.unit.status = BlockedStatus("Error fetching image information")
-            return
-
-        try:
-            pod_spec = make_pod_spec(
-                image_info,
-                self.model.config,
-                self.relation_state,
-                self.model.app.name,
-                self.port,
-            )
-        except ValueError as exc:
-            logger.exception("Config/Relation data validation error")
-            self.unit.status = BlockedStatus(str(exc))
-            return
-
-        if self.state.pod_spec != pod_spec:
-            self.model.pod.set_spec(pod_spec)
-            self.state.pod_spec = pod_spec
-
-        self.unit.status = ActiveStatus("ready")
+# class LcmCharm(CharmBase):
+#     """LCM Charm."""
 
+#     state = StoredState()
+#     on = LcmEvents()
+
+#     def __init__(self, *args) -> NoReturn:
+#         """LCM Charm constructor."""
+#         super().__init__(*args)
+
+#         # Internal state initialization
+#         self.state.set_default(pod_spec=None)
 
-if __name__ == "__main__":
-    main(LcmCharm)
+#         # Message bus data initialization
+#         self.state.set_default(message_host=None)
+#         self.state.set_default(message_port=None)
+
+#         # Database data initialization
+#         self.state.set_default(database_uri=None)
+
+#         # RO data initialization
+#         self.state.set_default(ro_host=None)
+#         self.state.set_default(ro_port=None)
+
+#         self.port = LCM_PORT
+#         self.image = OCIImageResource(self, "image")
+
+#         # Registering regular events
+#         self.framework.observe(self.on.start, self.configure_pod)
+#         self.framework.observe(self.on.config_changed, self.configure_pod)
+#         self.framework.observe(self.on.upgrade_charm, self.configure_pod)
+
+#         # Registering custom internal events
+#         self.framework.observe(self.on.configure_pod, self.configure_pod)
+
+#         # Registering required relation events
+#         self.framework.observe(
+#             self.on.kafka_relation_changed, self._on_kafka_relation_changed
+#         )
+#         self.framework.observe(
+#             self.on.mongodb_relation_changed, self._on_mongodb_relation_changed
+#         )
+#         self.framework.observe(
+#             self.on.ro_relation_changed, self._on_ro_relation_changed
+#         )
+
+#         # Registering required relation broken events
+#         self.framework.observe(
+#             self.on.kafka_relation_broken, self._on_kafka_relation_broken
+#         )
+#         self.framework.observe(
+#             self.on.mongodb_relation_broken, self._on_mongodb_relation_broken
+#         )
+#         self.framework.observe(
+#             self.on.ro_relation_broken, self._on_ro_relation_broken
+#         )
+
+#     def _on_kafka_relation_changed(self, event: EventBase) -> NoReturn:
+#         """Reads information about the kafka relation.
+
+#         Args:
+#             event (EventBase): Kafka relation event.
+#         """
+#         message_host = event.relation.data[event.unit].get("host")
+#         message_port = event.relation.data[event.unit].get("port")
+
+#         if (
+#             message_host
+#             and message_port
+#             and (
+#                 self.state.message_host != message_host
+#                 or self.state.message_port != message_port
+#             )
+#         ):
+#             self.state.message_host = message_host
+#             self.state.message_port = message_port
+#             self.on.configure_pod.emit()
+
+#     def _on_kafka_relation_broken(self, event: EventBase) -> NoReturn:
+#         """Clears data from kafka relation.
+
+#         Args:
+#             event (EventBase): Kafka relation event.
+#         """
+#         self.state.message_host = None
+#         self.state.message_port = None
+#         self.on.configure_pod.emit()
+
+#     def _on_mongodb_relation_changed(self, event: EventBase) -> NoReturn:
+#         """Reads information about the DB relation.
+
+#         Args:
+#             event (EventBase): DB relation event.
+#         """
+#         database_uri = event.relation.data[event.unit].get("connection_string")
+
+#         if database_uri and self.state.database_uri != database_uri:
+#             self.state.database_uri = database_uri
+#             self.on.configure_pod.emit()
+
+#     def _on_mongodb_relation_broken(self, event: EventBase) -> NoReturn:
+#         """Clears data from mongodb relation.
+
+#         Args:
+#             event (EventBase): DB relation event.
+#         """
+#         self.state.database_uri = None
+#         self.on.configure_pod.emit()
+
+#     def _on_ro_relation_changed(self, event: EventBase) -> NoReturn:
+#         """Reads information about the RO relation.
+
+#         Args:
+#             event (EventBase): Keystone relation event.
+#         """
+#         ro_host = event.relation.data[event.unit].get("host")
+#         ro_port = event.relation.data[event.unit].get("port")
+
+#         if (
+#             ro_host
+#             and ro_port
+#             and (self.state.ro_host != ro_host or self.state.ro_port != ro_port)
+#         ):
+#             self.state.ro_host = ro_host
+#             self.state.ro_port = ro_port
+#             self.on.configure_pod.emit()
+
+#     def _on_ro_relation_broken(self, event: EventBase) -> NoReturn:
+#         """Clears data from ro relation.
+
+#         Args:
+#             event (EventBase): Keystone relation event.
+#         """
+#         self.state.ro_host = None
+#         self.state.ro_port = None
+#         self.on.configure_pod.emit()
+
+#     def _missing_relations(self) -> str:
+#         """Checks if there missing relations.
+
+#         Returns:
+#             str: string with missing relations
+#         """
+#         data_status = {
+#             "kafka": self.state.message_host,
+#             "mongodb": self.state.database_uri,
+#             "ro": self.state.ro_host,
+#         }
+
+#         missing_relations = [k for k, v in data_status.items() if not v]
+
+#         return ", ".join(missing_relations)
+
+#     @property
+#     def relation_state(self) -> Dict[str, Any]:
+#         """Collects relation state configuration for pod spec assembly.
+
+#         Returns:
+#             Dict[str, Any]: relation state information.
+#         """
+#         relation_state = {
+#             "message_host": self.state.message_host,
+#             "message_port": self.state.message_port,
+#             "database_uri": self.state.database_uri,
+#             "ro_host": self.state.ro_host,
+#             "ro_port": self.state.ro_port,
+#         }
+
+#         return relation_state
+
+#     def configure_pod(self, event: EventBase) -> NoReturn:
+#         """Assemble the pod spec and apply it, if possible.
+
+#         Args:
+#             event (EventBase): Hook or Relation event that started the
+#                                function.
+#         """
+#         if missing := self._missing_relations():
+#             self.unit.status = BlockedStatus(
+#                 "Waiting for {0} relation{1}".format(
+#                     missing, "s" if "," in missing else ""
+#                 )
+#             )
+#             return
+
+#         if not self.unit.is_leader():
+#             self.unit.status = ActiveStatus("ready")
+#             return
+
+#         self.unit.status = MaintenanceStatus("Assembling pod spec")
+
+#         # Fetch image information
+#         try:
+#             self.unit.status = MaintenanceStatus("Fetching image information")
+#             image_info = self.image.fetch()
+#         except OCIImageResourceError:
+#             self.unit.status = BlockedStatus("Error fetching image information")
+#             return
+
+#         try:
+#             pod_spec = make_pod_spec(
+#                 image_info,
+#                 self.model.config,
+#                 self.relation_state,
+#                 self.model.app.name,
+#                 self.port,
+#             )
+#         except ValueError as exc:
+#             logger.exception("Config/Relation data validation error")
+#             self.unit.status = BlockedStatus(str(exc))
+#             return
+
+#         if self.state.pod_spec != pod_spec:
+#             self.model.pod.set_spec(pod_spec)
+#             self.state.pod_spec = pod_spec
+
+#         self.unit.status = ActiveStatus("ready")
+
+
+# if __name__ == "__main__":
+#     main(LcmCharm)
index dc21453..8709f4f 100644 (file)
@@ -36,10 +36,12 @@ def _validate_data(
         relation_data (Dict[str, Any]): relation data.
     """
     config_validators = {
-        "database_commonkey": lambda value, _: isinstance(value, str)
-        and len(value) > 1,
-        "log_level": lambda value, _: isinstance(value, str)
-        and value in ("INFO", "DEBUG"),
+        "database_commonkey": lambda value, _: (
+            isinstance(value, str) and len(value) > 1
+        ),
+        "log_level": lambda value, _: (
+            isinstance(value, str) and value in ("INFO", "DEBUG")
+        ),
         "vca_host": lambda value, _: isinstance(value, str) and len(value) > 1,
         "vca_port": lambda value, _: isinstance(value, int) and value > 0,
         "vca_user": lambda value, _: isinstance(value, str) and len(value) > 1,
index 25e2cd6..bff3cee 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-from ops.model import BlockedStatus
-
+from ops.model import ActiveStatus, BlockedStatus
 from ops.testing import Harness
 
 from charm import LcmCharm
 
 
 class TestCharm(unittest.TestCase):
-    """LCM Charm unit tests."""
+    """Prometheus Charm unit tests."""
 
     def setUp(self) -> NoReturn:
         """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
         self.harness = Harness(LcmCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.config = {
+            "vca_host": "192.168.0.13",
+            "vca_port": 17070,
+            "vca_user": "admin",
+            "vca_password": "admin",
+            "vca_pubkey": "key",
+            "vca_cacert": "cacert",
+            "vca_cloud": "cloud",
+            "vca_k8s_cloud": "k8scloud",
+            "database_commonkey": "commonkey",
+            "log_level": "INFO",
+        }
+        self.harness.update_config(self.config)
 
-    def test_on_start_without_relations(self) -> NoReturn:
-        """Test installation without any relation."""
-        self.harness.charm.on.start.emit()
+    def test_config_changed_no_relations(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
 
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+        self.harness.charm.on.config_changed.emit()
 
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
         self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
+            all(
+                relation in self.harness.charm.unit.status.message
+                for relation in ["mongodb", "kafka", "ro"]
+            )
         )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("ro", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_start_with_relations(self) -> NoReturn:
-        """Test deployment without keystone."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "lcm",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "lcm",
-                            "containerPort": 9999,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "ALLOW_ANONYMOUS_LOGIN": "yes",
-                        "OSMLCM_GLOBAL_LOGLEVEL": "INFO",
-                        "OSMLCM_RO_HOST": "ro",
-                        "OSMLCM_RO_PORT": 9090,
-                        "OSMLCM_RO_TENANT": "osm",
-                        "OSMLCM_MESSAGE_DRIVER": "kafka",
-                        "OSMLCM_MESSAGE_HOST": "kafka",
-                        "OSMLCM_MESSAGE_PORT": 9092,
-                        "OSMLCM_DATABASE_DRIVER": "mongo",
-                        "OSMLCM_DATABASE_URI": "mongodb://mongo:27017",
-                        "OSMLCM_DATABASE_COMMONKEY": "osm",
-                        "OSMLCM_STORAGE_DRIVER": "mongo",
-                        "OSMLCM_STORAGE_PATH": "/app/storage",
-                        "OSMLCM_STORAGE_COLLECTION": "files",
-                        "OSMLCM_STORAGE_URI": "mongodb://mongo:27017",
-                        "OSMLCM_VCA_HOST": "admin",
-                        "OSMLCM_VCA_PORT": 17070,
-                        "OSMLCM_VCA_USER": "admin",
-                        "OSMLCM_VCA_PUBKEY": "secret",
-                        "OSMLCM_VCA_SECRET": "secret",
-                        "OSMLCM_VCA_CACERT": "",
-                        "OSMLCM_VCA_CLOUD": "localhost",
-                        "OSMLCM_VCA_K8S_CLOUD": "k8scloud",
-                    },
-                }
-            ],
-            "kubernetesResources": {"ingressResources": []},
-        }
-
-        self.harness.charm.on.start.emit()
 
-        # Check if kafka datastore is initialized
-        self.assertIsNone(self.harness.charm.state.message_host)
-        self.assertIsNone(self.harness.charm.state.message_port)
-
-        # Check if mongodb datastore is initialized
-        self.assertIsNone(self.harness.charm.state.database_uri)
-
-        # Check if RO datastore is initialized
-        self.assertIsNone(self.harness.charm.state.ro_host)
-        self.assertIsNone(self.harness.charm.state.ro_port)
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
+        self.harness.charm.on.config_changed.emit()
+
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
+
+    def test_with_relations(
+        self,
+    ) -> NoReturn:
+        "Test with relations (internal)"
+        self.initialize_kafka_relation()
+        self.initialize_mongo_relation()
+        self.initialize_ro_relation()
+        # Verifying status
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
 
-        # Initializing the kafka relation
+    def initialize_kafka_relation(self):
         kafka_relation_id = self.harness.add_relation("kafka", "kafka")
         self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
         self.harness.update_relation_data(
             kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
         )
 
-        # Initializing the mongo relation
+    def initialize_mongo_relation(self):
         mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
         self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
         self.harness.update_relation_data(
@@ -130,116 +104,231 @@ class TestCharm(unittest.TestCase):
             {"connection_string": "mongodb://mongo:27017"},
         )
 
-        # Initializing the RO relation
-        ro_relation_id = self.harness.add_relation("ro", "ro")
-        self.harness.add_relation_unit(ro_relation_id, "ro/0")
-        self.harness.update_relation_data(
-            ro_relation_id, "ro/0", {"host": "ro", "port": 9090}
-        )
-
-        # Checking if kafka data is stored
-        self.assertEqual(self.harness.charm.state.message_host, "kafka")
-        self.assertEqual(self.harness.charm.state.message_port, 9092)
-
-        # Checking if mongodb data is stored
-        self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
-
-        # Checking if RO data is stored
-        self.assertEqual(self.harness.charm.state.ro_host, "ro")
-        self.assertEqual(self.harness.charm.state.ro_port, 9090)
-
-        # Verifying status
-        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_on_kafka_relation_unit_changed(self) -> NoReturn:
-        """Test to see if kafka relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.message_host)
-        self.assertIsNone(self.harness.charm.state.message_port)
-
-        relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            relation_id, "kafka/0", {"host": "kafka", "port": 9092}
-        )
-
-        self.assertEqual(self.harness.charm.state.message_host, "kafka")
-        self.assertEqual(self.harness.charm.state.message_port, 9092)
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertNotIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("ro", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
-        """Test to see if mongodb relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.database_uri)
-
-        relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            relation_id, "mongodb/0", {"connection_string": "mongodb://mongo:27017"}
-        )
-
-        self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertNotIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("ro", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_ro_unit_relation_changed(self) -> NoReturn:
-        """Test to see if RO relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.ro_host)
-        self.assertIsNone(self.harness.charm.state.ro_port)
-
-        relation_id = self.harness.add_relation("ro", "ro")
-        self.harness.add_relation_unit(relation_id, "ro/0")
+    def initialize_ro_relation(self):
+        http_relation_id = self.harness.add_relation("ro", "ro")
+        self.harness.add_relation_unit(http_relation_id, "ro")
         self.harness.update_relation_data(
-            relation_id, "ro/0", {"host": "ro", "port": 9090}
+            http_relation_id,
+            "ro",
+            {"host": "ro", "port": 9090},
         )
 
-        self.assertEqual(self.harness.charm.state.ro_host, "ro")
-        self.assertEqual(self.harness.charm.state.ro_port, 9090)
 
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+if __name__ == "__main__":
+    unittest.main()
 
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertNotIn("ro", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+# class TestCharm(unittest.TestCase):
+#     """LCM Charm unit tests."""
+
+#     def setUp(self) -> NoReturn:
+#         """Test setup"""
+#         self.harness = Harness(LcmCharm)
+#         self.harness.set_leader(is_leader=True)
+#         self.harness.begin()
+
+#     def test_on_start_without_relations(self) -> NoReturn:
+#         """Test installation without any relation."""
+#         self.harness.charm.on.start.emit()
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertIn("ro", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+#     def test_on_start_with_relations(self) -> NoReturn:
+#         """Test deployment without keystone."""
+#         expected_result = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": "lcm",
+#                     "imageDetails": self.harness.charm.image.fetch(),
+#                     "imagePullPolicy": "Always",
+#                     "ports": [
+#                         {
+#                             "name": "lcm",
+#                             "containerPort": 9999,
+#                             "protocol": "TCP",
+#                         }
+#                     ],
+#                     "envConfig": {
+#                         "ALLOW_ANONYMOUS_LOGIN": "yes",
+#                         "OSMLCM_GLOBAL_LOGLEVEL": "INFO",
+#                         "OSMLCM_RO_HOST": "ro",
+#                         "OSMLCM_RO_PORT": 9090,
+#                         "OSMLCM_RO_TENANT": "osm",
+#                         "OSMLCM_MESSAGE_DRIVER": "kafka",
+#                         "OSMLCM_MESSAGE_HOST": "kafka",
+#                         "OSMLCM_MESSAGE_PORT": 9092,
+#                         "OSMLCM_DATABASE_DRIVER": "mongo",
+#                         "OSMLCM_DATABASE_URI": "mongodb://mongo:27017",
+#                         "OSMLCM_DATABASE_COMMONKEY": "osm",
+#                         "OSMLCM_STORAGE_DRIVER": "mongo",
+#                         "OSMLCM_STORAGE_PATH": "/app/storage",
+#                         "OSMLCM_STORAGE_COLLECTION": "files",
+#                         "OSMLCM_STORAGE_URI": "mongodb://mongo:27017",
+#                         "OSMLCM_VCA_HOST": "admin",
+#                         "OSMLCM_VCA_PORT": 17070,
+#                         "OSMLCM_VCA_USER": "admin",
+#                         "OSMLCM_VCA_PUBKEY": "secret",
+#                         "OSMLCM_VCA_SECRET": "secret",
+#                         "OSMLCM_VCA_CACERT": "",
+#                         "OSMLCM_VCA_CLOUD": "localhost",
+#                         "OSMLCM_VCA_K8S_CLOUD": "k8scloud",
+#                     },
+#                 }
+#             ],
+#             "kubernetesResources": {"ingressResources": []},
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         # Check if kafka datastore is initialized
+#         self.assertIsNone(self.harness.charm.state.message_host)
+#         self.assertIsNone(self.harness.charm.state.message_port)
+
+#         # Check if mongodb datastore is initialized
+#         self.assertIsNone(self.harness.charm.state.database_uri)
+
+#         # Check if RO datastore is initialized
+#         self.assertIsNone(self.harness.charm.state.ro_host)
+#         self.assertIsNone(self.harness.charm.state.ro_port)
+
+#         # Initializing the kafka relation
+#         kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+#         )
+
+#         # Initializing the mongo relation
+#         mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             mongodb_relation_id,
+#             "mongodb/0",
+#             {"connection_string": "mongodb://mongo:27017"},
+#         )
+
+#         # Initializing the RO relation
+#         ro_relation_id = self.harness.add_relation("ro", "ro")
+#         self.harness.add_relation_unit(ro_relation_id, "ro/0")
+#         self.harness.update_relation_data(
+#             ro_relation_id, "ro/0", {"host": "ro", "port": 9090}
+#         )
+
+#         # Checking if kafka data is stored
+#         self.assertEqual(self.harness.charm.state.message_host, "kafka")
+#         self.assertEqual(self.harness.charm.state.message_port, 9092)
+
+#         # Checking if mongodb data is stored
+#         self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
+
+#         # Checking if RO data is stored
+#         self.assertEqual(self.harness.charm.state.ro_host, "ro")
+#         self.assertEqual(self.harness.charm.state.ro_port, 9090)
+
+#         # Verifying status
+#         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         pod_spec, _ = self.harness.get_pod_spec()
+
+#         self.assertDictEqual(expected_result, pod_spec)
+
+#     def test_on_kafka_relation_unit_changed(self) -> NoReturn:
+#         """Test to see if kafka relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         self.assertIsNone(self.harness.charm.state.message_host)
+#         self.assertIsNone(self.harness.charm.state.message_port)
+
+#         relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+#         )
+
+#         self.assertEqual(self.harness.charm.state.message_host, "kafka")
+#         self.assertEqual(self.harness.charm.state.message_port, 9092)
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertNotIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertIn("ro", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+#     def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if mongodb relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         self.assertIsNone(self.harness.charm.state.database_uri)
+
+#         relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             relation_id, "mongodb/0", {"connection_string": "mongodb://mongo:27017"}
+#         )
+
+#         self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertNotIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertIn("ro", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+#     def test_on_ro_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if RO relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         self.assertIsNone(self.harness.charm.state.ro_host)
+#         self.assertIsNone(self.harness.charm.state.ro_port)
+
+#         relation_id = self.harness.add_relation("ro", "ro")
+#         self.harness.add_relation_unit(relation_id, "ro/0")
+#         self.harness.update_relation_data(
+#             relation_id, "ro/0", {"host": "ro", "port": 9090}
+#         )
+
+#         self.assertEqual(self.harness.charm.state.ro_host, "ro")
+#         self.assertEqual(self.harness.charm.state.ro_port, 9090)
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertNotIn("ro", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
 
 
-if __name__ == "__main__":
-    unittest.main()
+if __name__ == "__main__":
+    unittest.main()
index 068b4c5..1f9442e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = lcm
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release lcm.charm
   charmcraft build
-  unzip lcm.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
-
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/"
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =
-  .tox/*
-  tests/*
diff --git a/installers/charm/local_osm_bundle.yaml b/installers/charm/local_osm_bundle.yaml
new file mode 100644 (file)
index 0000000..4b4f809
--- /dev/null
@@ -0,0 +1,188 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+description: Single instance OSM bundle
+bundle: kubernetes
+applications:
+  zookeeper-k8s:
+    charm: "cs:~charmed-osm/zookeeper-k8s"
+    channel: "stable"
+    scale: 1
+    series: kubernetes
+    storage:
+      database: 100M
+    annotations:
+      gui-x: 0
+      gui-y: 550
+  mariadb-k8s:
+    charm: "cs:~charmed-osm/mariadb-k8s"
+    channel: "stable"
+    scale: 1
+    series: kubernetes
+    storage:
+      database: 50M
+    options:
+      password: manopw
+      root_password: osm4u
+      user: mano
+    annotations:
+      gui-x: -250
+      gui-y: -200
+  kafka-k8s:
+    charm: "cs:~charmed-osm/kafka-k8s"
+    channel: "stable"
+    scale: 1
+    series: kubernetes
+    storage:
+      database: 100M
+    annotations:
+      gui-x: 0
+      gui-y: 300
+  mongodb-k8s:
+    charm: "cs:~charmed-osm/mongodb-k8s"
+    channel: "stable"
+    scale: 1
+    series: kubernetes
+    storage:
+      database: 50M
+    options:
+      replica-set: rs0
+      namespace: osm
+      enable-sidecar: true
+    annotations:
+      gui-x: 0
+      gui-y: 50
+  nbi:
+    charm: "./nbi/build"
+    scale: 1
+    series: kubernetes
+    options:
+      database_commonkey: osm
+      auth_backend: keystone
+    annotations:
+      gui-x: 0
+      gui-y: -200
+  ro:
+    charm: "./ro/build"
+    scale: 1
+    series: kubernetes
+    annotations:
+      gui-x: -250
+      gui-y: 300
+  ng-ui:
+    charm: "./ng-ui/build"
+    scale: 1
+    series: kubernetes
+    annotations:
+      gui-x: 500
+      gui-y: 100
+  lcm:
+    charm: "./lcm/build"
+    scale: 1
+    series: kubernetes
+    options:
+      database_commonkey: osm
+    annotations:
+      gui-x: -250
+      gui-y: 50
+  mon:
+    charm: "./mon/build"
+    scale: 1
+    series: kubernetes
+    options:
+      database_commonkey: osm
+    annotations:
+      gui-x: 250
+      gui-y: 50
+  pol:
+    charm: "./pol/build"
+    scale: 1
+    series: kubernetes
+    annotations:
+      gui-x: -250
+      gui-y: 550
+  pla:
+    charm: "./pla/build"
+    scale: 1
+    series: kubernetes
+    annotations:
+      gui-x: 500
+      gui-y: -200
+  prometheus:
+    charm: "./prometheus/build"
+    channel: "stable"
+    scale: 1
+    series: kubernetes
+    storage:
+      data: 50M
+    options:
+      default-target: "mon:8000"
+    annotations:
+      gui-x: 250
+      gui-y: 300
+  grafana:
+    charm: "./grafana/build"
+    channel: "stable"
+    scale: 1
+    series: kubernetes
+    annotations:
+      gui-x: 250
+      gui-y: 550
+  keystone:
+    charm: "./keystone/build"
+    scale: 1
+    series: kubernetes
+    annotations:
+      gui-x: -250
+      gui-y: 550
+relations:
+  - - grafana:prometheus
+    - prometheus:prometheus
+  - - kafka-k8s:zookeeper
+    - zookeeper-k8s:zookeeper
+  - - keystone:db
+    - mariadb-k8s:mysql
+  - - lcm:kafka
+    - kafka-k8s:kafka
+  - - lcm:mongodb
+    - mongodb-k8s:mongo
+  - - ro:ro
+    - lcm:ro
+  - - ro:kafka
+    - kafka-k8s:kafka
+  - - ro:mongodb
+    - mongodb-k8s:mongo
+  - - pol:kafka
+    - kafka-k8s:kafka
+  - - pol:mongodb
+    - mongodb-k8s:mongo
+  - - mon:mongodb
+    - mongodb-k8s:mongo
+  - - mon:kafka
+    - kafka-k8s:kafka
+  - - pla:kafka
+    - kafka-k8s:kafka
+  - - pla:mongodb
+    - mongodb-k8s:mongo
+  - - nbi:mongodb
+    - mongodb-k8s:mongo
+  - - nbi:kafka
+    - kafka-k8s:kafka
+  - - nbi:prometheus
+    - prometheus:prometheus
+  - - nbi:keystone
+    - keystone:keystone
+  - - mon:prometheus
+    - prometheus:prometheus
+  - - ng-ui:nbi
+    - nbi:nbi
diff --git a/installers/charm/local_osm_ha_bundle.yaml b/installers/charm/local_osm_ha_bundle.yaml
new file mode 100644 (file)
index 0000000..0a08eaa
--- /dev/null
@@ -0,0 +1,194 @@
+# Copyright 2020 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+#     Unless required by applicable law or agreed to in writing, software
+#     distributed under the License is distributed on an "AS IS" BASIS,
+#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#     See the License for the specific language governing permissions and
+#     limitations under the License.
+description: A high-available OSM cluster.
+bundle: kubernetes
+applications:
+  zookeeper-k8s:
+    charm: "cs:~charmed-osm/zookeeper-k8s"
+    channel: "stable"
+    scale: 3
+    series: kubernetes
+    storage:
+      database: 100M
+    options:
+      zookeeper-units: 3
+    annotations:
+      gui-x: 0
+      gui-y: 550
+  mariadb-k8s:
+    charm: "cs:~charmed-osm/mariadb-k8s"
+    channel: "stable"
+    scale: 3
+    series: kubernetes
+    storage:
+      database: 300M
+    options:
+      password: manopw
+      root_password: osm4u
+      user: mano
+      ha-mode: true
+    annotations:
+      gui-x: -250
+      gui-y: -200
+  kafka-k8s:
+    charm: "cs:~charmed-osm/kafka-k8s"
+    channel: "stable"
+    scale: 3
+    series: kubernetes
+    storage:
+      database: 100M
+    options:
+      zookeeper-units: 3
+      kafka-units: 3
+    annotations:
+      gui-x: 0
+      gui-y: 300
+  mongodb-k8s:
+    charm: "cs:~charmed-osm/mongodb-k8s"
+    channel: "stable"
+    scale: 3
+    series: kubernetes
+    storage:
+      database: 50M
+    options:
+      replica-set: rs0
+      namespace: osm
+      enable-sidecar: true
+    annotations:
+      gui-x: 0
+      gui-y: 50
+  nbi:
+    charm: "./nbi/build"
+    scale: 3
+    series: kubernetes
+    options:
+      database_commonkey: osm
+      auth_backend: keystone
+    annotations:
+      gui-x: 0
+      gui-y: -200
+  ro:
+    charm: "./ro/build"
+    scale: 3
+    series: kubernetes
+    annotations:
+      gui-x: -250
+      gui-y: 300
+  ng-ui:
+    charm: "./ng-ui/build"
+    scale: 3
+    series: kubernetes
+    annotations:
+      gui-x: 500
+      gui-y: 100
+  lcm:
+    charm: "./lcm/build"
+    scale: 3
+    series: kubernetes
+    options:
+      database_commonkey: osm
+    annotations:
+      gui-x: -250
+      gui-y: 50
+  mon:
+    charm: "./mon/build"
+    scale: 1
+    series: kubernetes
+    options:
+      database_commonkey: osm
+    annotations:
+      gui-x: 250
+      gui-y: 50
+  pol:
+    charm: "./pol/build"
+    scale: 3
+    series: kubernetes
+    annotations:
+      gui-x: -250
+      gui-y: 550
+  pla:
+    charm: "./pla/build"
+    scale: 3
+    series: kubernetes
+    annotations:
+      gui-x: 500
+      gui-y: -200
+  prometheus:
+    charm: "./prometheus/build"
+    channel: "stable"
+    scale: 1
+    series: kubernetes
+    storage:
+      data: 50M
+    options:
+      default-target: "mon:8000"
+    annotations:
+      gui-x: 250
+      gui-y: 300
+  grafana:
+    charm: "./grafana/build"
+    channel: "stable"
+    scale: 3
+    series: kubernetes
+    annotations:
+      gui-x: 250
+      gui-y: 550
+  keystone:
+    charm: "./keystone/build"
+    scale: 3
+    series: kubernetes
+    annotations:
+      gui-x: -250
+      gui-y: 550
+relations:
+  - - grafana:prometheus
+    - prometheus:prometheus
+  - - kafka-k8s:zookeeper
+    - zookeeper-k8s:zookeeper
+  - - keystone:db
+    - mariadb-k8s:mysql
+  - - lcm:kafka
+    - kafka-k8s:kafka
+  - - lcm:mongodb
+    - mongodb-k8s:mongo
+  - - ro:ro
+    - lcm:ro
+  - - ro:kafka
+    - kafka-k8s:kafka
+  - - ro:mongodb
+    - mongodb-k8s:mongo
+  - - pol:kafka
+    - kafka-k8s:kafka
+  - - pol:mongodb
+    - mongodb-k8s:mongo
+  - - mon:mongodb
+    - mongodb-k8s:mongo
+  - - mon:kafka
+    - kafka-k8s:kafka
+  - - pla:kafka
+    - kafka-k8s:kafka
+  - - pla:mongodb
+    - mongodb-k8s:mongo
+  - - nbi:mongodb
+    - mongodb-k8s:mongo
+  - - nbi:kafka
+    - kafka-k8s:kafka
+  - - nbi:prometheus
+    - prometheus:prometheus
+  - - nbi:keystone
+    - keystone:keystone
+  - - mon:prometheus
+    - prometheus:prometheus
+  - - ng-ui:nbi
+    - nbi:nbi
index 0be86d6..2885df2 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -22,7 +22,9 @@
 venv
 .vscode
 build
-mon.charm
+*.charm
 .coverage
+coverage.xml
 .stestr
 cover
+release
\ No newline at end of file
diff --git a/installers/charm/mon/.jujuignore b/installers/charm/mon/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index c20ac8d..d71fb69 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -28,6 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
+  cover/
   build/
-  mod/
-  lib/
+  venv
+  release/
index 9480ea7..93d77fe 100644 (file)
@@ -60,3 +60,15 @@ options:
     type: string
     description: "The VCA cacert."
     default: ""
+  grafana_url:
+    description: Grafana URL
+    type: string
+    default: http://grafana:3000
+  grafana_user:
+    description: Grafana user
+    type: string
+    default: admin
+  grafana_password:
+    description: Grafana password
+    type: string
+    default: admin
diff --git a/installers/charm/mon/requirements-test.txt b/installers/charm/mon/requirements-test.txt
new file mode 100644 (file)
index 0000000..d7585f3
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index a26601f..f10a199 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -19,5 +19,4 @@
 # osm-charmers@lists.launchpad.net
 ##
 
-ops
-git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index f51213d..98c3297 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+# pylint: disable=E0213
+
+
 import logging
-from typing import Any, Dict, NoReturn
+from typing import NoReturn
 
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, StoredState
 from ops.main import main
-from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus
-from oci_image import OCIImageResource, OCIImageResourceError
-
-from pod_spec import make_pod_spec
 
-LOGGER = logging.getLogger(__name__)
+from opslib.osm.charm import CharmedOsmBase, RelationsMissing
 
-MON_PORT = 8000
+from opslib.osm.pod import (
+    ContainerV3Builder,
+    PodSpecV3Builder,
+)
 
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
 
-class ConfigurePodEvent(EventBase):
-    """Configure Pod event"""
+from opslib.osm.interfaces.kafka import KafkaClient
+from opslib.osm.interfaces.mongo import MongoClient
+from opslib.osm.interfaces.prometheus import PrometheusClient
 
-    pass
 
+logger = logging.getLogger(__name__)
 
-class MonEvents(CharmEvents):
-    """MON Events"""
+PORT = 8000
 
-    configure_pod = EventSource(ConfigurePodEvent)
 
+class ConfigModel(ModelValidator):
+    vca_host: str
+    vca_user: str
+    vca_password: str
+    vca_cacert: str
+    database_commonkey: str
+    log_level: str
+    openstack_default_granularity: int
+    global_request_timeout: int
+    collector_interval: int
+    evaluator_interval: int
+    grafana_url: str
+    grafana_user: str
+    grafana_password: str
 
-class MonCharm(CharmBase):
-    """MON Charm."""
+    @validator("log_level")
+    def validate_log_level(cls, v):
+        if v not in {"INFO", "DEBUG"}:
+            raise ValueError("value must be INFO or DEBUG")
+        return v
 
-    state = StoredState()
-    on = MonEvents()
 
+class MonCharm(CharmedOsmBase):
     def __init__(self, *args) -> NoReturn:
-        """MON Charm constructor."""
-        super().__init__(*args)
-
-        # Internal state initialization
-        self.state.set_default(pod_spec=None)
-
-        # Message bus data initialization
-        self.state.set_default(message_host=None)
-        self.state.set_default(message_port=None)
-
-        # Database data initialization
-        self.state.set_default(database_uri=None)
+        super().__init__(*args, oci_image="image")
 
-        # Prometheus data initialization
-        self.state.set_default(prometheus_host=None)
-        self.state.set_default(prometheus_port=None)
+        self.kafka_client = KafkaClient(self, "kafka")
+        self.framework.observe(self.on["kafka"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["kafka"].relation_broken, self.configure_pod)
 
-        self.port = MON_PORT
-        self.image = OCIImageResource(self, "image")
+        self.mongodb_client = MongoClient(self, "mongodb")
+        self.framework.observe(self.on["mongodb"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["mongodb"].relation_broken, self.configure_pod)
 
-        # Registering regular events
-        self.framework.observe(self.on.start, self.configure_pod)
-        self.framework.observe(self.on.config_changed, self.configure_pod)
-        self.framework.observe(self.on.upgrade_charm, self.configure_pod)
-
-        # Registering custom internal events
-        self.framework.observe(self.on.configure_pod, self.configure_pod)
-
-        # Registering required relation events
+        self.prometheus_client = PrometheusClient(self, "prometheus")
         self.framework.observe(
-            self.on.kafka_relation_changed, self._on_kafka_relation_changed
+            self.on["prometheus"].relation_changed, self.configure_pod
         )
         self.framework.observe(
-            self.on.mongodb_relation_changed, self._on_mongodb_relation_changed
-        )
-        self.framework.observe(
-            self.on.prometheus_relation_changed, self._on_prometheus_relation_changed
+            self.on["prometheus"].relation_broken, self.configure_pod
         )
 
-        # Registering required relation departed events
-        self.framework.observe(
-            self.on.kafka_relation_departed, self._on_kafka_relation_departed
+    def _check_missing_dependencies(self, config: ConfigModel):
+        missing_relations = []
+
+        if self.kafka_client.is_missing_data_in_unit():
+            missing_relations.append("kafka")
+        if self.mongodb_client.is_missing_data_in_unit():
+            missing_relations.append("mongodb")
+        if self.prometheus_client.is_missing_data_in_app():
+            missing_relations.append("prometheus")
+
+        if missing_relations:
+            raise RelationsMissing(missing_relations)
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        # Check relations
+        self._check_missing_dependencies(config)
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=PORT)
+        container_builder.add_envs(
+            {
+                # General configuration
+                "ALLOW_ANONYMOUS_LOGIN": "yes",
+                "OSMMON_OPENSTACK_DEFAULT_GRANULARITY": config.openstack_default_granularity,
+                "OSMMON_GLOBAL_REQUEST_TIMEOUT": config.global_request_timeout,
+                "OSMMON_GLOBAL_LOGLEVEL": config.log_level,
+                "OSMMON_COLLECTOR_INTERVAL": config.collector_interval,
+                "OSMMON_EVALUATOR_INTERVAL": config.evaluator_interval,
+                # Kafka configuration
+                "OSMMON_MESSAGE_DRIVER": "kafka",
+                "OSMMON_MESSAGE_HOST": self.kafka_client.host,
+                "OSMMON_MESSAGE_PORT": self.kafka_client.port,
+                # Database configuration
+                "OSMMON_DATABASE_DRIVER": "mongo",
+                "OSMMON_DATABASE_URI": self.mongodb_client.connection_string,
+                "OSMMON_DATABASE_COMMONKEY": config.database_commonkey,
+                # Prometheus configuration
+                "OSMMON_PROMETHEUS_URL": f"http://{self.prometheus_client.hostname}:{self.prometheus_client.port}",
+                # VCA configuration
+                "OSMMON_VCA_HOST": config.vca_host,
+                "OSMMON_VCA_USER": config.vca_user,
+                "OSMMON_VCA_SECRET": config.vca_password,
+                "OSMMON_VCA_CACERT": config.vca_cacert,
+                "OSMMON_GRAFANA_URL": config.grafana_url,
+                "OSMMON_GRAFANA_USER": config.grafana_user,
+                "OSMMON_GRAFANA_PASSWORD": config.grafana_password,
+            }
         )
-        self.framework.observe(
-            self.on.mongodb_relation_departed, self._on_mongodb_relation_departed
-        )
-        self.framework.observe(
-            self.on.prometheus_relation_departed, self._on_prometheus_relation_departed
-        )
-
-    def _on_kafka_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the kafka relation.
-
-        Args:
-            event (EventBase): Kafka relation event.
-        """
-        message_host = event.relation.data[event.unit].get("host")
-        message_port = event.relation.data[event.unit].get("port")
-
-        if (
-            message_host
-            and message_port
-            and (
-                self.state.message_host != message_host
-                or self.state.message_port != message_port
-            )
-        ):
-            self.state.message_host = message_host
-            self.state.message_port = message_port
-            self.on.configure_pod.emit()
-
-    def _on_kafka_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clear kafka relation data.
-
-        Args:
-            event (EventBase): Kafka relation event.
-        """
-        self.state.message_host = None
-        self.state.message_port = None
-        self.on.configure_pod.emit()
-
-    def _on_mongodb_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the DB relation.
-
-        Args:
-            event (EventBase): DB relation event.
-        """
-        database_uri = event.relation.data[event.unit].get("connection_string")
-
-        if database_uri and self.state.database_uri != database_uri:
-            self.state.database_uri = database_uri
-            self.on.configure_pod.emit()
-
-    def _on_mongodb_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clear mongodb relation data.
-
-        Args:
-            event (EventBase): DB relation event.
-        """
-        self.state.database_uri = None
-        self.on.configure_pod.emit()
-
-    def _on_prometheus_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the prometheus relation.
-
-        Args:
-            event (EventBase): Prometheus relation event.
-        """
-        prometheus_host = event.relation.data[event.unit].get("hostname")
-        prometheus_port = event.relation.data[event.unit].get("port")
-
-        if (
-            prometheus_host
-            and prometheus_port
-            and (
-                self.state.prometheus_host != prometheus_host
-                or self.state.prometheus_port != prometheus_port
-            )
-        ):
-            self.state.prometheus_host = prometheus_host
-            self.state.prometheus_port = prometheus_port
-            self.on.configure_pod.emit()
-
-    def _on_prometheus_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clear prometheus relation data.
-
-        Args:
-            event (EventBase): Prometheus relation event.
-        """
-        self.state.prometheus_host = None
-        self.state.prometheus_port = None
-        self.on.configure_pod.emit()
-
-    def _missing_relations(self) -> str:
-        """Checks if there missing relations.
-
-        Returns:
-            str: string with missing relations
-        """
-        data_status = {
-            "kafka": self.state.message_host,
-            "mongodb": self.state.database_uri,
-            "prometheus": self.state.prometheus_host,
-        }
-
-        missing_relations = [k for k, v in data_status.items() if not v]
-
-        return ", ".join(missing_relations)
-
-    @property
-    def relation_state(self) -> Dict[str, Any]:
-        """Collects relation state configuration for pod spec assembly.
-
-        Returns:
-            Dict[str, Any]: relation state information.
-        """
-        relation_state = {
-            "message_host": self.state.message_host,
-            "message_port": self.state.message_port,
-            "database_uri": self.state.database_uri,
-            "prometheus_host": self.state.prometheus_host,
-            "prometheus_port": self.state.prometheus_port,
-        }
-
-        return relation_state
-
-    def configure_pod(self, event: EventBase) -> NoReturn:
-        """Assemble the pod spec and apply it, if possible.
-
-        Args:
-            event (EventBase): Hook or Relation event that started the
-                               function.
-        """
-        if missing := self._missing_relations():
-            self.unit.status = BlockedStatus(
-                "Waiting for {0} relation{1}".format(
-                    missing, "s" if "," in missing else ""
-                )
-            )
-            return
-
-        if not self.unit.is_leader():
-            self.unit.status = ActiveStatus("ready")
-            return
-
-        self.unit.status = MaintenanceStatus("Assembling pod spec")
-
-        # Fetch image information
-        try:
-            self.unit.status = MaintenanceStatus("Fetching image information")
-            image_info = self.image.fetch()
-        except OCIImageResourceError:
-            self.unit.status = BlockedStatus("Error fetching image information")
-            return
-
-        try:
-            pod_spec = make_pod_spec(
-                image_info,
-                self.model.config,
-                self.relation_state,
-                self.model.app.name,
-                self.port,
-            )
-        except ValueError as exc:
-            LOGGER.exception("Config/Relation data validation error")
-            self.unit.status = BlockedStatus(str(exc))
-            return
-
-        if self.state.pod_spec != pod_spec:
-            self.model.pod.set_spec(pod_spec)
-            self.state.pod_spec = pod_spec
 
-        self.unit.status = ActiveStatus("ready")
+        container = container_builder.build()
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        return pod_spec_builder.build()
 
 
 if __name__ == "__main__":
index 1c50565..dcadfc0 100644 (file)
@@ -36,15 +36,18 @@ def _validate_data(
         relation_data (Dict[str, Any]): relation data.
     """
     config_validators = {
-        "openstack_default_granularity": lambda value, _: isinstance(value, int)
-        and value > 0,
+        "openstack_default_granularity": lambda value, _: (
+            isinstance(value, int) and value > 0
+        ),
         "global_request_timeout": lambda value, _: isinstance(value, int) and value > 0,
-        "log_level": lambda value, _: isinstance(value, str)
-        and value in ("INFO", "DEBUG"),
+        "log_level": lambda value, _: (
+            isinstance(value, str) and value in ("INFO", "DEBUG")
+        ),
         "collector_interval": lambda value, _: isinstance(value, int) and value > 0,
         "evaluator_interval": lambda value, _: isinstance(value, int) and value > 0,
-        "database_commonkey": lambda value, _: isinstance(value, str)
-        and len(value) > 0,
+        "database_commonkey": lambda value, _: (
+            isinstance(value, str) and len(value) > 0
+        ),
         "vca_host": lambda value, _: isinstance(value, str) and len(value) > 0,
         "vca_user": lambda value, _: isinstance(value, str) and len(value) > 0,
         "vca_password": lambda value, _: isinstance(value, str) and len(value) > 0,
@@ -53,8 +56,9 @@ def _validate_data(
     relation_validators = {
         "message_host": lambda value, _: isinstance(value, str) and len(value) > 0,
         "message_port": lambda value, _: isinstance(value, int) and value > 0,
-        "database_uri": lambda value, _: isinstance(value, str)
-        and value.startswith("mongodb://"),
+        "database_uri": lambda value, _: (
+            isinstance(value, str) and value.startswith("mongodb://")
+        ),
         "prometheus_host": lambda value, _: isinstance(value, str) and len(value) > 0,
         "prometheus_port": lambda value, _: isinstance(value, int) and value > 0,
     }
index 010262f..6fcd6a6 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-from ops.model import BlockedStatus
-
+from ops.model import ActiveStatus, BlockedStatus
 from ops.testing import Harness
 
 from charm import MonCharm
 
 
 class TestCharm(unittest.TestCase):
-    """MON Charm unit tests."""
+    """Prometheus Charm unit tests."""
 
     def setUp(self) -> NoReturn:
         """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
         self.harness = Harness(MonCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.config = {
+            "vca_host": "192.168.0.13",
+            "vca_user": "admin",
+            "vca_password": "admin",
+            "vca_cacert": "cacert",
+            "database_commonkey": "commonkey",
+            "log_level": "INFO",
+            "openstack_default_granularity": 10,
+            "global_request_timeout": 10,
+            "collector_interval": 30,
+            "evaluator_interval": 30,
+        }
+        self.harness.update_config(self.config)
 
-    def test_on_start_without_relations(self) -> NoReturn:
-        """Test installation without any relation."""
-        self.harness.charm.on.start.emit()
+    def test_config_changed_no_relations(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
 
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+        self.harness.charm.on.config_changed.emit()
 
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
         self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
+            all(
+                relation in self.harness.charm.unit.status.message
+                for relation in ["mongodb", "kafka", "prometheus"]
+            )
         )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_start_with_relations(self) -> NoReturn:
-        """Test deployment without keystone."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "mon",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "mon",
-                            "containerPort": 8000,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "ALLOW_ANONYMOUS_LOGIN": "yes",
-                        "OSMMON_OPENSTACK_DEFAULT_GRANULARITY": 300,
-                        "OSMMON_GLOBAL_REQUEST_TIMEOUT": 10,
-                        "OSMMON_GLOBAL_LOGLEVEL": "INFO",
-                        "OSMMON_COLLECTOR_INTERVAL": 30,
-                        "OSMMON_EVALUATOR_INTERVAL": 30,
-                        "OSMMON_MESSAGE_DRIVER": "kafka",
-                        "OSMMON_MESSAGE_HOST": "kafka",
-                        "OSMMON_MESSAGE_PORT": 9092,
-                        "OSMMON_DATABASE_DRIVER": "mongo",
-                        "OSMMON_DATABASE_URI": "mongodb://mongo:27017",
-                        "OSMMON_DATABASE_COMMONKEY": "osm",
-                        "OSMMON_PROMETHEUS_URL": "http://prometheus:9090",
-                        "OSMMON_VCA_HOST": "admin",
-                        "OSMMON_VCA_USER": "admin",
-                        "OSMMON_VCA_SECRET": "secret",
-                        "OSMMON_VCA_CACERT": "",
-                    },
-                }
-            ],
-            "kubernetesResources": {"ingressResources": []},
-        }
-
-        self.harness.charm.on.start.emit()
 
-        # Check if kafka datastore is initialized
-        self.assertIsNone(self.harness.charm.state.message_host)
-        self.assertIsNone(self.harness.charm.state.message_port)
-
-        # Check if mongodb datastore is initialized
-        self.assertIsNone(self.harness.charm.state.database_uri)
-
-        # Check if prometheus datastore is initialized
-        self.assertIsNone(self.harness.charm.state.prometheus_host)
-        self.assertIsNone(self.harness.charm.state.prometheus_port)
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
+        self.harness.charm.on.config_changed.emit()
+
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
+
+    def test_with_relations(
+        self,
+    ) -> NoReturn:
+        "Test with relations (internal)"
+        self.initialize_kafka_relation()
+        self.initialize_mongo_relation()
+        self.initialize_prometheus_relation()
+        # Verifying status
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
 
-        # Initializing the kafka relation
+    def initialize_kafka_relation(self):
         kafka_relation_id = self.harness.add_relation("kafka", "kafka")
         self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
         self.harness.update_relation_data(
             kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
         )
 
-        # Initializing the mongo relation
+    def initialize_mongo_relation(self):
         mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
         self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
         self.harness.update_relation_data(
@@ -124,118 +104,227 @@ class TestCharm(unittest.TestCase):
             {"connection_string": "mongodb://mongo:27017"},
         )
 
-        # Initializing the prometheus relation
+    def initialize_prometheus_relation(self):
         prometheus_relation_id = self.harness.add_relation("prometheus", "prometheus")
         self.harness.add_relation_unit(prometheus_relation_id, "prometheus/0")
         self.harness.update_relation_data(
             prometheus_relation_id,
-            "prometheus/0",
+            "prometheus",
             {"hostname": "prometheus", "port": 9090},
         )
 
-        # Checking if kafka data is stored
-        self.assertEqual(self.harness.charm.state.message_host, "kafka")
-        self.assertEqual(self.harness.charm.state.message_port, 9092)
-
-        # Checking if mongodb data is stored
-        self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
-
-        # Checking if prometheus data is stored
-        self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
-        self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
-
-        # Verifying status
-        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_on_kafka_unit_relation_changed(self) -> NoReturn:
-        """Test to see if kafka relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.message_host)
-        self.assertIsNone(self.harness.charm.state.message_port)
-
-        relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            relation_id, "kafka/0", {"host": "kafka", "port": 9092}
-        )
-
-        self.assertEqual(self.harness.charm.state.message_host, "kafka")
-        self.assertEqual(self.harness.charm.state.message_port, 9092)
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertNotIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
-        """Test to see if mongodb relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.database_uri)
-
-        relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            relation_id, "mongodb/0", {"connection_string": "mongodb://mongo:27017"}
-        )
-
-        self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertNotIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_prometheus_unit_relation_changed(self) -> NoReturn:
-        """Test to see if prometheus relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.prometheus_host)
-        self.assertIsNone(self.harness.charm.state.prometheus_port)
 
-        relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            relation_id, "prometheus/0", {"hostname": "prometheus", "port": 9090}
-        )
-
-        self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
-        self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+if __name__ == "__main__":
+    unittest.main()
 
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertNotIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+# class TestCharm(unittest.TestCase):
+#     """MON Charm unit tests."""
+
+#     def setUp(self) -> NoReturn:
+#         """Test setup"""
+#         self.harness = Harness(MonCharm)
+#         self.harness.set_leader(is_leader=True)
+#         self.harness.begin()
+
+#     def test_on_start_without_relations(self) -> NoReturn:
+#         """Test installation without any relation."""
+#         self.harness.charm.on.start.emit()
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertIn("prometheus", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+#     def test_on_start_with_relations(self) -> NoReturn:
+#         """Test deployment without keystone."""
+#         expected_result = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": "mon",
+#                     "imageDetails": self.harness.charm.image.fetch(),
+#                     "imagePullPolicy": "Always",
+#                     "ports": [
+#                         {
+#                             "name": "mon",
+#                             "containerPort": 8000,
+#                             "protocol": "TCP",
+#                         }
+#                     ],
+#                     "envConfig": {
+#                         "ALLOW_ANONYMOUS_LOGIN": "yes",
+#                         "OSMMON_OPENSTACK_DEFAULT_GRANULARITY": 300,
+#                         "OSMMON_GLOBAL_REQUEST_TIMEOUT": 10,
+#                         "OSMMON_GLOBAL_LOGLEVEL": "INFO",
+#                         "OSMMON_COLLECTOR_INTERVAL": 30,
+#                         "OSMMON_EVALUATOR_INTERVAL": 30,
+#                         "OSMMON_MESSAGE_DRIVER": "kafka",
+#                         "OSMMON_MESSAGE_HOST": "kafka",
+#                         "OSMMON_MESSAGE_PORT": 9092,
+#                         "OSMMON_DATABASE_DRIVER": "mongo",
+#                         "OSMMON_DATABASE_URI": "mongodb://mongo:27017",
+#                         "OSMMON_DATABASE_COMMONKEY": "osm",
+#                         "OSMMON_PROMETHEUS_URL": "http://prometheus:9090",
+#                         "OSMMON_VCA_HOST": "admin",
+#                         "OSMMON_VCA_USER": "admin",
+#                         "OSMMON_VCA_SECRET": "secret",
+#                         "OSMMON_VCA_CACERT": "",
+#                     },
+#                 }
+#             ],
+#             "kubernetesResources": {"ingressResources": []},
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         # Check if kafka datastore is initialized
+#         self.assertIsNone(self.harness.charm.state.message_host)
+#         self.assertIsNone(self.harness.charm.state.message_port)
+
+#         # Check if mongodb datastore is initialized
+#         self.assertIsNone(self.harness.charm.state.database_uri)
+
+#         # Check if prometheus datastore is initialized
+#         self.assertIsNone(self.harness.charm.state.prometheus_host)
+#         self.assertIsNone(self.harness.charm.state.prometheus_port)
+
+#         # Initializing the kafka relation
+#         kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+#         )
+
+#         # Initializing the mongo relation
+#         mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             mongodb_relation_id,
+#             "mongodb/0",
+#             {"connection_string": "mongodb://mongo:27017"},
+#         )
+
+#         # Initializing the prometheus relation
+#         prometheus_relation_id = self.harness.add_relation("prometheus", "prometheus")
+#         self.harness.add_relation_unit(prometheus_relation_id, "prometheus/0")
+#         self.harness.update_relation_data(
+#             prometheus_relation_id,
+#             "prometheus",
+#             {"hostname": "prometheus", "port": 9090},
+#         )
+
+#         # Checking if kafka data is stored
+#         self.assertEqual(self.harness.charm.state.message_host, "kafka")
+#         self.assertEqual(self.harness.charm.state.message_port, 9092)
+
+#         # Checking if mongodb data is stored
+#         self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
+
+#         # Checking if prometheus data is stored
+#         self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
+#         self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
+
+#         # Verifying status
+#         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         pod_spec, _ = self.harness.get_pod_spec()
+
+#         self.assertDictEqual(expected_result, pod_spec)
+
+#     def test_on_kafka_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if kafka relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         self.assertIsNone(self.harness.charm.state.message_host)
+#         self.assertIsNone(self.harness.charm.state.message_port)
+
+#         relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+#         )
+
+#         self.assertEqual(self.harness.charm.state.message_host, "kafka")
+#         self.assertEqual(self.harness.charm.state.message_port, 9092)
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertNotIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertIn("prometheus", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+#     def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if mongodb relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         self.assertIsNone(self.harness.charm.state.database_uri)
+
+#         relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             relation_id, "mongodb/0", {"connection_string": "mongodb://mongo:27017"}
+#         )
+
+#         self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertNotIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertIn("prometheus", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+#     def test_on_prometheus_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if prometheus relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         self.assertIsNone(self.harness.charm.state.prometheus_host)
+#         self.assertIsNone(self.harness.charm.state.prometheus_port)
+
+#         relation_id = self.harness.add_relation("prometheus", "prometheus")
+#         self.harness.add_relation_unit(relation_id, "prometheus/0")
+#         self.harness.update_relation_data(
+#             relation_id, "prometheus", {"hostname": "prometheus", "port": 9090}
+#         )
+
+#         self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
+#         self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertNotIn("prometheus", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
 
 
-if __name__ == "__main__":
-    unittest.main()
+if __name__ == "__main__":
+    unittest.main()
index 777c18e..6f61916 100644 (file)
@@ -199,7 +199,8 @@ class TestPodSpec(unittest.TestCase):
                         "OSMMON_DATABASE_DRIVER": "mongo",
                         "OSMMON_DATABASE_URI": relation_state["database_uri"],
                         "OSMMON_DATABASE_COMMONKEY": config["database_commonkey"],
-                        "OSMMON_PROMETHEUS_URL": f"http://{relation_state['prometheus_host']}:{relation_state['prometheus_port']}",
+                        "OSMMON_PROMETHEUS_URL": 
+                            f"http://{relation_state['prometheus_host']}:{relation_state['prometheus_port']}",
                         "OSMMON_VCA_HOST": config["vca_host"],
                         "OSMMON_VCA_USER": config["vca_user"],
                         "OSMMON_VCA_SECRET": config["vca_password"],
index 7ddaf8d..1f9442e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = mon
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release mon.charm
   charmcraft build
-  unzip mon.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/|release/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/ release/"
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =  
-  .tox/*
-  tests/*
index 372886b..5fc255e 100644 (file)
@@ -37,6 +37,7 @@ class TestCharm(unittest.TestCase):
         self.harness = Harness(MongodbExporterCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.harness.update_config(self.config)
 
     def test_on_start_without_relations(self) -> NoReturn:
         """Test installation without any relation."""
index 05bdb52..21a2062 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 venv
 .vscode
 build
+<<<<<<< HEAD
 nbi.charm
 .coverage*
+=======
+*.charm
+.coverage
+coverage.xml
+>>>>>>> WIP: Improve osm charms
 .stestr
-cover
\ No newline at end of file
+cover
+release
\ No newline at end of file
diff --git a/installers/charm/nbi/.jujuignore b/installers/charm/nbi/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index c20ac8d..d71fb69 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -28,6 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
+  cover/
   build/
-  mod/
-  lib/
+  venv
+  release/
index fadfa46..098387f 100644 (file)
@@ -41,12 +41,16 @@ resources:
 requires:
   kafka:
     interface: kafka
+    limit: 1
   mongodb:
     interface: mongodb
+    limit: 1
   keystone:
     interface: keystone
+    limit: 1
   prometheus:
     interface: prometheus
+    limit: 1
 provides:
   nbi:
-    interface: osm-nbi
+    interface: http
diff --git a/installers/charm/nbi/requirements-test.txt b/installers/charm/nbi/requirements-test.txt
new file mode 100644 (file)
index 0000000..d7585f3
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index a26601f..f10a199 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -19,5 +19,4 @@
 # osm-charmers@lists.launchpad.net
 ##
 
-ops
-git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index 848b53d..9d62fe2 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
-import logging
-from typing import Any, Dict, NoReturn
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, StoredState
-from ops.main import main
-from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus
-from oci_image import OCIImageResource, OCIImageResourceError
-
-from pod_spec import make_pod_spec
-
-logger = logging.getLogger(__name__)
+# pylint: disable=E0213
 
-NBI_PORT = 9999
 
+import logging
+from typing import Optional, NoReturn
+from ipaddress import ip_network
+from urllib.parse import urlparse
 
-class ConfigurePodEvent(EventBase):
-    """Configure Pod event"""
+from ops.main import main
 
-    pass
+from opslib.osm.charm import CharmedOsmBase, RelationsMissing
 
+from opslib.osm.pod import (
+    ContainerV3Builder,
+    PodSpecV3Builder,
+    IngressResourceV3Builder,
+)
 
-class NbiEvents(CharmEvents):
-    """NBI Events"""
 
-    configure_pod = EventSource(ConfigurePodEvent)
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
 
+from opslib.osm.interfaces.kafka import KafkaClient
+from opslib.osm.interfaces.mongo import MongoClient
+from opslib.osm.interfaces.prometheus import PrometheusClient
+from opslib.osm.interfaces.keystone import KeystoneClient
+from opslib.osm.interfaces.http import HttpServer
 
-class NbiCharm(CharmBase):
-    """NBI Charm."""
 
-    state = StoredState()
-    on = NbiEvents()
+logger = logging.getLogger(__name__)
 
+PORT = 9999
+
+
+class ConfigModel(ModelValidator):
+    enable_test: bool
+    auth_backend: str
+    database_commonkey: str
+    log_level: str
+    max_file_size: int
+    site_url: Optional[str]
+    ingress_whitelist_source_range: Optional[str]
+    tls_secret_name: Optional[str]
+
+    @validator("auth_backend")
+    def validate_auth_backend(cls, v):
+        if v not in {"internal", "keystone"}:
+            raise ValueError("value must be 'internal' or 'keystone'")
+        return v
+
+    @validator("log_level")
+    def validate_log_level(cls, v):
+        if v not in {"INFO", "DEBUG"}:
+            raise ValueError("value must be INFO or DEBUG")
+        return v
+
+    @validator("max_file_size")
+    def validate_max_file_size(cls, v):
+        if v < 0:
+            raise ValueError("value must be equal or greater than 0")
+        return v
+
+    @validator("site_url")
+    def validate_site_url(cls, v):
+        if v:
+            parsed = urlparse(v)
+            if not parsed.scheme.startswith("http"):
+                raise ValueError("value must start with http")
+        return v
+
+    @validator("ingress_whitelist_source_range")
+    def validate_ingress_whitelist_source_range(cls, v):
+        if v:
+            ip_network(v)
+        return v
+
+
+class NbiCharm(CharmedOsmBase):
     def __init__(self, *args) -> NoReturn:
-        """NBI Charm constructor."""
-        super().__init__(*args)
-
-        # Internal state initialization
-        self.state.set_default(pod_spec=None)
+        super().__init__(*args, oci_image="image")
 
-        # Message bus data initialization
-        self.state.set_default(message_host=None)
-        self.state.set_default(message_port=None)
+        self.kafka_client = KafkaClient(self, "kafka")
+        self.framework.observe(self.on["kafka"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["kafka"].relation_broken, self.configure_pod)
 
-        # Database data initialization
-        self.state.set_default(database_uri=None)
-
-        # Prometheus data initialization
-        self.state.set_default(prometheus_host=None)
-        self.state.set_default(prometheus_port=None)
-
-        # Keystone data initialization
-        self.state.set_default(keystone_host=None)
-        self.state.set_default(keystone_port=None)
-        self.state.set_default(keystone_user_domain_name=None)
-        self.state.set_default(keystone_project_domain_name=None)
-        self.state.set_default(keystone_username=None)
-        self.state.set_default(keystone_password=None)
-        self.state.set_default(keystone_service=None)
-
-        self.port = NBI_PORT
-        self.image = OCIImageResource(self, "image")
-
-        # Registering regular events
-        self.framework.observe(self.on.start, self.configure_pod)
-        self.framework.observe(self.on.config_changed, self.configure_pod)
-        self.framework.observe(self.on.upgrade_charm, self.configure_pod)
-
-        # Registering custom internal events
-        self.framework.observe(self.on.configure_pod, self.configure_pod)
-
-        # Registering required relation changed events
-        self.framework.observe(
-            self.on.kafka_relation_changed, self._on_kafka_relation_changed
-        )
-        self.framework.observe(
-            self.on.mongodb_relation_changed, self._on_mongodb_relation_changed
-        )
-        self.framework.observe(
-            self.on.keystone_relation_changed, self._on_keystone_relation_changed
-        )
-        self.framework.observe(
-            self.on.prometheus_relation_changed, self._on_prometheus_relation_changed
-        )
+        self.mongodb_client = MongoClient(self, "mongodb")
+        self.framework.observe(self.on["mongodb"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["mongodb"].relation_broken, self.configure_pod)
 
-        # Registering required relation departed events
-        self.framework.observe(
-            self.on.kafka_relation_departed, self._on_kafka_relation_departed
-        )
-        self.framework.observe(
-            self.on.mongodb_relation_departed, self._on_mongodb_relation_departed
-        )
+        self.prometheus_client = PrometheusClient(self, "prometheus")
         self.framework.observe(
-            self.on.keystone_relation_departed, self._on_keystone_relation_departed
+            self.on["prometheus"].relation_changed, self.configure_pod
         )
         self.framework.observe(
-            self.on.prometheus_relation_departed, self._on_prometheus_relation_departed
+            self.on["prometheus"].relation_broken, self.configure_pod
         )
 
-        # Registering provided relation events
-        self.framework.observe(self.on.nbi_relation_joined, self._publish_nbi_info)
-
-    def _on_kafka_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the kafka relation.
-
-        Args:
-            event (EventBase): Kafka relation event.
-        """
-        message_host = event.relation.data[event.unit].get("host")
-        message_port = event.relation.data[event.unit].get("port")
-
-        if (
-            message_host
-            and message_port
-            and (
-                self.state.message_host != message_host
-                or self.state.message_port != message_port
-            )
-        ):
-            self.state.message_host = message_host
-            self.state.message_port = int(message_port)
-            self.on.configure_pod.emit()
-
-    def _on_kafka_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clears data from kafka relation.
-
-        Args:
-            event (EventBase): Kafka relation event.
-        """
-        self.state.message_host = None
-        self.state.message_port = None
-        self.on.configure_pod.emit()
-
-    def _on_mongodb_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the DB relation.
+        self.keystone_client = KeystoneClient(self, "keystone")
+        self.framework.observe(self.on["keystone"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["keystone"].relation_broken, self.configure_pod)
 
-        Args:
-            event (EventBase): DB relation event.
-        """
-        database_uri = event.relation.data[event.unit].get("connection_string")
+        self.http_server = HttpServer(self, "nbi")
+        self.framework.observe(self.on["nbi"].relation_joined, self._publish_nbi_info)
 
-        if database_uri and self.state.database_uri != database_uri:
-            self.state.database_uri = database_uri
-            self.on.configure_pod.emit()
-
-    def _on_mongodb_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clears data from mongodb relation.
-
-        Args:
-            event (EventBase): DB relation event.
-        """
-        self.state.database_uri = None
-        self.on.configure_pod.emit()
-
-    def _on_keystone_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the keystone relation.
+    def _publish_nbi_info(self, event):
+        """Publishes NBI information.
 
         Args:
-            event (EventBase): Keystone relation event.
+            event (EventBase): RO relation event.
         """
-        keystone_host = event.relation.data[event.unit].get("host")
-        keystone_port = event.relation.data[event.unit].get("port")
-        keystone_user_domain_name = event.relation.data[event.unit].get(
-            "user_domain_name"
+        if self.unit.is_leader():
+            self.http_server.publish_info(self.app.name, PORT)
+
+    def _check_missing_dependencies(self, config: ConfigModel):
+        missing_relations = []
+
+        if self.kafka_client.is_missing_data_in_unit():
+            missing_relations.append("kafka")
+        if self.mongodb_client.is_missing_data_in_unit():
+            missing_relations.append("mongodb")
+        if self.prometheus_client.is_missing_data_in_app():
+            missing_relations.append("prometheus")
+        if config.auth_backend == "keystone":
+            if self.keystone_client.is_missing_data_in_app():
+                missing_relations.append("keystone")
+
+        if missing_relations:
+            raise RelationsMissing(missing_relations)
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        # Check relations
+        self._check_missing_dependencies(config)
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Init Container
+        pod_spec_builder.add_init_container(
+            {
+                "name": "init-check",
+                "image": "alpine:latest",
+                "command": [
+                    "sh",
+                    "-c",
+                    f"until (nc -zvw1 {self.kafka_client.host} {self.kafka_client.port} ); do sleep 3; done; exit 0",
+                ],
+            }
         )
-        keystone_project_domain_name = event.relation.data[event.unit].get(
-            "project_domain_name"
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=PORT)
+        container_builder.add_tcpsocket_readiness_probe(
+            PORT,
+            initial_delay_seconds=5,
+            timeout_seconds=5,
         )
-        keystone_username = event.relation.data[event.unit].get("username")
-        keystone_password = event.relation.data[event.unit].get("password")
-        keystone_service = event.relation.data[event.unit].get("service")
-
-        if (
-            keystone_host
-            and keystone_port
-            and keystone_user_domain_name
-            and keystone_project_domain_name
-            and keystone_username
-            and keystone_password
-            and keystone_service
-            and (
-                self.state.keystone_host != keystone_host
-                or self.state.keystone_port != keystone_port
-                or self.state.keystone_user_domain_name != keystone_user_domain_name
-                or self.state.keystone_project_domain_name
-                != keystone_project_domain_name
-                or self.state.keystone_username != keystone_username
-                or self.state.keystone_password != keystone_password
-                or self.state.keystone_service != keystone_service
-            )
-        ):
-            self.state.keystone_host = keystone_host
-            self.state.keystone_port = int(keystone_port)
-            self.state.keystone_user_domain_name = keystone_user_domain_name
-            self.state.keystone_project_domain_name = keystone_project_domain_name
-            self.state.keystone_username = keystone_username
-            self.state.keystone_password = keystone_password
-            self.state.keystone_service = keystone_service
-            self.on.configure_pod.emit()
-
-    def _on_keystone_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clears data from keystone relation.
-
-        Args:
-            event (EventBase): Keystone relation event.
-        """
-        self.state.keystone_host = None
-        self.state.keystone_port = None
-        self.state.keystone_user_domain_name = None
-        self.state.keystone_project_domain_name = None
-        self.state.keystone_username = None
-        self.state.keystone_password = None
-        self.state.keystone_service = None
-        self.on.configure_pod.emit()
-
-    def _on_prometheus_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the prometheus relation.
-
-        Args:
-            event (EventBase): Prometheus relation event.
-        """
-        prometheus_host = event.relation.data[event.unit].get("hostname")
-        prometheus_port = event.relation.data[event.unit].get("port")
-
-        if (
-            prometheus_host
-            and prometheus_port
-            and (
-                self.state.prometheus_host != prometheus_host
-                or self.state.prometheus_port != prometheus_port
-            )
-        ):
-            self.state.prometheus_host = prometheus_host
-            self.state.prometheus_port = int(prometheus_port)
-            self.on.configure_pod.emit()
-
-    def _on_prometheus_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clears data from prometheus relation.
-
-        Args:
-            event (EventBase): Prometheus relation event.
-        """
-        self.state.prometheus_host = None
-        self.state.prometheus_port = None
-        self.on.configure_pod.emit()
-
-    def _publish_nbi_info(self, event: EventBase) -> NoReturn:
-        """Publishes NBI information.
-
-        Args:
-            event (EventBase): NBI relation event.
-        """
-        rel_data = {
-            "host": self.model.app.name,
-            "port": str(NBI_PORT),
-        }
-        for k, v in rel_data.items():
-            event.relation.data[self.unit][k] = v
-
-    def _missing_relations(self) -> str:
-        """Checks if there missing relations.
-
-        Returns:
-            str: string with missing relations
-        """
-        data_status = {
-            "kafka": self.state.message_host,
-            "mongodb": self.state.database_uri,
-            "prometheus": self.state.prometheus_host,
-        }
-
-        if self.model.config["auth_backend"] == "keystone":
-            data_status["keystone"] = self.state.keystone_host
-
-        missing_relations = [k for k, v in data_status.items() if not v]
-
-        return ", ".join(missing_relations)
-
-    @property
-    def relation_state(self) -> Dict[str, Any]:
-        """Collects relation state configuration for pod spec assembly.
-
-        Returns:
-            Dict[str, Any]: relation state information.
-        """
-        relation_state = {
-            "message_host": self.state.message_host,
-            "message_port": self.state.message_port,
-            "database_uri": self.state.database_uri,
-            "prometheus_host": self.state.prometheus_host,
-            "prometheus_port": self.state.prometheus_port,
-        }
-
-        if self.model.config["auth_backend"] == "keystone":
-            relation_state.update(
+        container_builder.add_tcpsocket_liveness_probe(
+            PORT,
+            initial_delay_seconds=45,
+            timeout_seconds=10,
+        )
+        container_builder.add_envs(
+            {
+                # General configuration
+                "ALLOW_ANONYMOUS_LOGIN": "yes",
+                "OSMNBI_SERVER_ENABLE_TEST": config.enable_test,
+                "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
+                # Kafka configuration
+                "OSMNBI_MESSAGE_HOST": self.kafka_client.host,
+                "OSMNBI_MESSAGE_DRIVER": "kafka",
+                "OSMNBI_MESSAGE_PORT": self.kafka_client.port,
+                # Database configuration
+                "OSMNBI_DATABASE_DRIVER": "mongo",
+                "OSMNBI_DATABASE_URI": self.mongodb_client.connection_string,
+                "OSMNBI_DATABASE_COMMONKEY": config.database_commonkey,
+                # Storage configuration
+                "OSMNBI_STORAGE_DRIVER": "mongo",
+                "OSMNBI_STORAGE_PATH": "/app/storage",
+                "OSMNBI_STORAGE_COLLECTION": "files",
+                "OSMNBI_STORAGE_URI": self.mongodb_client.connection_string,
+                # Prometheus configuration
+                "OSMNBI_PROMETHEUS_HOST": self.prometheus_client.hostname,
+                "OSMNBI_PROMETHEUS_PORT": self.prometheus_client.port,
+                # Log configuration
+                "OSMNBI_LOG_LEVEL": config.log_level,
+            }
+        )
+        if config.auth_backend == "internal":
+            container_builder.add_env("OSMNBI_AUTHENTICATION_BACKEND", "internal")
+        elif config.auth_backend == "keystone":
+            container_builder.add_envs(
                 {
-                    "keystone_host": self.state.keystone_host,
-                    "keystone_port": self.state.keystone_port,
-                    "keystone_user_domain_name": self.state.keystone_user_domain_name,
-                    "keystone_project_domain_name": self.state.keystone_project_domain_name,
-                    "keystone_username": self.state.keystone_username,
-                    "keystone_password": self.state.keystone_password,
-                    "keystone_service": self.state.keystone_service,
+                    "OSMNBI_AUTHENTICATION_BACKEND": "keystone",
+                    "OSMNBI_AUTHENTICATION_AUTH_URL": self.keystone_client.host,
+                    "OSMNBI_AUTHENTICATION_AUTH_PORT": self.keystone_client.port,
+                    "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": self.keystone_client.user_domain_name,
+                    "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": self.keystone_client.project_domain_name,
+                    "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": self.keystone_client.username,
+                    "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": self.keystone_client.password,
+                    "OSMNBI_AUTHENTICATION_SERVICE_PROJECT": self.keystone_client.service,
                 }
             )
-
-        return relation_state
-
-    def configure_pod(self, event: EventBase) -> NoReturn:
-        """Assemble the pod spec and apply it, if possible.
-
-        Args:
-            event (EventBase): Hook or Relation event that started the
-                               function.
-        """
-        if missing := self._missing_relations():
-            self.unit.status = BlockedStatus(
-                f"Waiting for {missing} relation{'s' if ',' in missing else ''}"
+        container = container_builder.build()
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        # Add ingress resources to pod spec if site url exists
+        if config.site_url:
+            parsed = urlparse(config.site_url)
+            annotations = {
+                "nginx.ingress.kubernetes.io/proxy-body-size": "{}".format(
+                    str(config.max_file_size) + "m"
+                    if config.max_file_size > 0
+                    else config.max_file_size
+                ),
+                "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS",
+            }
+            ingress_resource_builder = IngressResourceV3Builder(
+                f"{self.app.name}-ingress", annotations
             )
-            return
-
-        if not self.unit.is_leader():
-            self.unit.status = ActiveStatus("ready")
-            return
-
-        self.unit.status = MaintenanceStatus("Assembling pod spec")
-
-        # Fetch image information
-        try:
-            self.unit.status = MaintenanceStatus("Fetching image information")
-            image_info = self.image.fetch()
-        except OCIImageResourceError:
-            self.unit.status = BlockedStatus("Error fetching image information")
-            return
-
-        try:
-            pod_spec = make_pod_spec(
-                image_info,
-                self.model.config,
-                self.relation_state,
-                self.model.app.name,
-                self.port,
-            )
-        except ValueError as exc:
-            logger.exception("Config/Relation data validation error")
-            self.unit.status = BlockedStatus(str(exc))
-            return
-
-        if self.state.pod_spec != pod_spec:
-            self.model.pod.set_spec(pod_spec)
-            self.state.pod_spec = pod_spec
 
-        self.unit.status = ActiveStatus("ready")
+            if config.ingress_whitelist_source_range:
+                annotations[
+                    "nginx.ingress.kubernetes.io/whitelist-source-range"
+                ] = config.ingress_whitelist_source_range
+
+            if parsed.scheme == "https":
+                ingress_resource_builder.add_tls(
+                    [parsed.hostname], config.tls_secret_name
+                )
+            else:
+                annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
+
+            ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
+            ingress_resource = ingress_resource_builder.build()
+            pod_spec_builder.add_ingress_resource(ingress_resource)
+        logger.debug(pod_spec_builder.build())
+        return pod_spec_builder.build()
 
 
 if __name__ == "__main__":
index b6291d4..b8f5904 100644 (file)
@@ -98,12 +98,15 @@ def _validate_data(
     """
     config_validators = {
         "enable_test": lambda value, _: isinstance(value, bool),
-        "database_commonkey": lambda value, _: isinstance(value, str)
-        and len(value) > 1,
-        "log_level": lambda value, _: isinstance(value, str)
-        and value in ("INFO", "DEBUG"),
-        "auth_backend": lambda value, _: isinstance(value, str)
-        and (value == "internal" or value == "keystone"),
+        "database_commonkey": lambda value, _: (
+            isinstance(value, str) and len(value) > 1
+        ),
+        "log_level": lambda value, _: (
+            isinstance(value, str) and value in ("INFO", "DEBUG")
+        ),
+        "auth_backend": lambda value, _: (
+            isinstance(value, str) and (value == "internal" or value == "keystone")
+        ),
         "site_url": lambda value, _: isinstance(value, str)
         if value is not None
         else True,
@@ -118,8 +121,9 @@ def _validate_data(
     relation_validators = {
         "message_host": lambda value, _: isinstance(value, str),
         "message_port": lambda value, _: isinstance(value, int) and value > 0,
-        "database_uri": lambda value, _: isinstance(value, str)
-        and value.startswith("mongodb://"),
+        "database_uri": lambda value, _: (
+            isinstance(value, str) and value.startswith("mongodb://")
+        ),
         "prometheus_host": lambda value, _: isinstance(value, str),
         "prometheus_port": lambda value, _: isinstance(value, int) and value > 0,
         "keystone_host": lambda value, _: _validate_keystone_config(
index 657e89b..be24aa6 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-from ops.model import BlockedStatus
-
+from ops.model import ActiveStatus, BlockedStatus
 from ops.testing import Harness
 
 from charm import NbiCharm
 
 
 class TestCharm(unittest.TestCase):
-    """NBI Charm unit tests."""
+    """Prometheus Charm unit tests."""
 
     def setUp(self) -> NoReturn:
         """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
         self.harness = Harness(NbiCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.config = {
+            "enable_test": False,
+            "auth_backend": "internal",
+            "database_commonkey": "key",
+            "log_level": "INFO",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "",
+            "tls_secret_name": "",
+            "site_url": "https://nbi.192.168.100.100.xip.io",
+        }
+        self.harness.update_config(self.config)
 
-    def test_on_start_without_relations(self) -> NoReturn:
-        """Test installation without any relation."""
-        self.harness.charm.on.start.emit()
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertNotIn("keystone", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_start_without_relations_with_keystone(self) -> NoReturn:
-        """Test installation without any relation and keystone enabled."""
-        self.harness.update_config({"auth_backend": "keystone"})
+    def test_config_changed_no_relations(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
 
-        self.harness.charm.on.start.emit()
+        self.harness.charm.on.config_changed.emit()
 
-        # Verifying status
+        # Assertions
         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
         self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertIn("keystone", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_start_with_relations(self) -> NoReturn:
-        """Test deployment without keystone."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "nbi",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "nbi",
-                            "containerPort": 9999,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "ALLOW_ANONYMOUS_LOGIN": "yes",
-                        "OSMNBI_SERVER_ENABLE_TEST": False,
-                        "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
-                        "OSMNBI_MESSAGE_HOST": "kafka",
-                        "OSMNBI_MESSAGE_DRIVER": "kafka",
-                        "OSMNBI_MESSAGE_PORT": 9092,
-                        "OSMNBI_DATABASE_DRIVER": "mongo",
-                        "OSMNBI_DATABASE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_DATABASE_COMMONKEY": "osm",
-                        "OSMNBI_STORAGE_DRIVER": "mongo",
-                        "OSMNBI_STORAGE_PATH": "/app/storage",
-                        "OSMNBI_STORAGE_COLLECTION": "files",
-                        "OSMNBI_STORAGE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_PROMETHEUS_HOST": "prometheus",
-                        "OSMNBI_PROMETHEUS_PORT": 9090,
-                        "OSMNBI_LOG_LEVEL": "INFO",
-                        "OSMNBI_AUTHENTICATION_BACKEND": "internal",
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [],
-            },
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Check if kafka datastore is initialized
-        self.assertIsNone(self.harness.charm.state.message_host)
-        self.assertIsNone(self.harness.charm.state.message_port)
-
-        # Check if mongodb datastore is initialized
-        self.assertIsNone(self.harness.charm.state.database_uri)
-
-        # Check if prometheus datastore is initialized
-        self.assertIsNone(self.harness.charm.state.prometheus_host)
-        self.assertIsNone(self.harness.charm.state.prometheus_port)
-
-        # Initializing the kafka relation
-        kafka_relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
-        )
-
-        # Initializing the mongo relation
-        mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            mongodb_relation_id,
-            "mongodb/0",
-            {"connection_string": "mongodb://mongo:27017"},
-        )
-
-        # Initializing the prometheus relation
-        prometheus_relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(prometheus_relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            prometheus_relation_id,
-            "prometheus/0",
-            {"hostname": "prometheus", "port": 9090},
+            all(
+                relation in self.harness.charm.unit.status.message
+                for relation in ["mongodb", "kafka", "prometheus"]
+            )
         )
 
-        # Checking if kafka data is stored
-        self.assertEqual(self.harness.charm.state.message_host, "kafka")
-        self.assertEqual(self.harness.charm.state.message_port, 9092)
-
-        # Checking if mongodb data is stored
-        self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
-
-        # Checking if prometheus data is stored
-        self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
-        self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
-
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
+        self.harness.charm.on.config_changed.emit()
+
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
+
+    def test_with_relations_internal(
+        self,
+    ) -> NoReturn:
+        "Test with relations (internal)"
+        self.initialize_kafka_relation()
+        self.initialize_mongo_relation()
+        self.initialize_prometheus_relation()
         # Verifying status
         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
 
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_on_start_with_relations_with_keystone(self) -> NoReturn:
-        """Test deployment with keystone."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "nbi",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "nbi",
-                            "containerPort": 9999,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "ALLOW_ANONYMOUS_LOGIN": "yes",
-                        "OSMNBI_SERVER_ENABLE_TEST": False,
-                        "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
-                        "OSMNBI_MESSAGE_HOST": "kafka",
-                        "OSMNBI_MESSAGE_DRIVER": "kafka",
-                        "OSMNBI_MESSAGE_PORT": 9092,
-                        "OSMNBI_DATABASE_DRIVER": "mongo",
-                        "OSMNBI_DATABASE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_DATABASE_COMMONKEY": "osm",
-                        "OSMNBI_STORAGE_DRIVER": "mongo",
-                        "OSMNBI_STORAGE_PATH": "/app/storage",
-                        "OSMNBI_STORAGE_COLLECTION": "files",
-                        "OSMNBI_STORAGE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_PROMETHEUS_HOST": "prometheus",
-                        "OSMNBI_PROMETHEUS_PORT": 9090,
-                        "OSMNBI_LOG_LEVEL": "INFO",
-                        "OSMNBI_AUTHENTICATION_BACKEND": "keystone",
-                        "OSMNBI_AUTHENTICATION_AUTH_URL": "keystone",
-                        "OSMNBI_AUTHENTICATION_AUTH_PORT": 5000,
-                        "OSMNBI_AUTHENTICATION_USER_DOMAIN_NAME": "default",
-                        "OSMNBI_AUTHENTICATION_PROJECT_DOMAIN_NAME": "default",
-                        "OSMNBI_AUTHENTICATION_SERVICE_USERNAME": "nbi",
-                        "OSMNBI_AUTHENTICATION_SERVICE_PASSWORD": "nbi",
-                        "OSMNBI_AUTHENTICATION_SERVICE_PROJECT": "service",
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [],
-            },
-        }
-
+    def test_with_relations_keystone_missing(
+        self,
+    ) -> NoReturn:
+        "Test with relations (keystone)"
         self.harness.update_config({"auth_backend": "keystone"})
+        self.initialize_kafka_relation()
+        self.initialize_mongo_relation()
+        self.initialize_prometheus_relation()
+        # Verifying status
+        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+        self.assertTrue("keystone" in self.harness.charm.unit.status.message)
 
-        self.harness.charm.on.start.emit()
-
-        # Check if kafka datastore is initialized
-        self.assertIsNone(self.harness.charm.state.message_host)
-        self.assertIsNone(self.harness.charm.state.message_port)
-
-        # Check if mongodb datastore is initialized
-        self.assertIsNone(self.harness.charm.state.database_uri)
-
-        # Check if prometheus datastore is initialized
-        self.assertIsNone(self.harness.charm.state.prometheus_host)
-        self.assertIsNone(self.harness.charm.state.prometheus_port)
-
-        # Check if keystone datastore is initialized
-        self.assertIsNone(self.harness.charm.state.keystone_host)
-        self.assertIsNone(self.harness.charm.state.keystone_port)
-        self.assertIsNone(self.harness.charm.state.keystone_user_domain_name)
-        self.assertIsNone(self.harness.charm.state.keystone_project_domain_name)
-        self.assertIsNone(self.harness.charm.state.keystone_username)
-        self.assertIsNone(self.harness.charm.state.keystone_password)
-        self.assertIsNone(self.harness.charm.state.keystone_service)
+    def test_with_relations_keystone(
+        self,
+    ) -> NoReturn:
+        "Test with relations (keystone)"
+        self.harness.update_config({"auth_backend": "keystone"})
+        self.initialize_kafka_relation()
+        self.initialize_mongo_relation()
+        self.initialize_prometheus_relation()
+        self.initialize_keystone_relation()
+        # Verifying status
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
 
-        # Initializing the kafka relation
+    def initialize_kafka_relation(self):
         kafka_relation_id = self.harness.add_relation("kafka", "kafka")
         self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
         self.harness.update_relation_data(
             kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
         )
 
-        # Initializing the mongodb relation
+    def initialize_mongo_relation(self):
         mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
         self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
         self.harness.update_relation_data(
@@ -263,608 +126,122 @@ class TestCharm(unittest.TestCase):
             {"connection_string": "mongodb://mongo:27017"},
         )
 
-        # Initializing the prometheus relation
-        promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            promethues_relation_id,
-            "prometheus/0",
-            {"hostname": "prometheus", "port": 9090},
-        )
-
-        # Initializing the keystone relation
+    def initialize_keystone_relation(self):
         keystone_relation_id = self.harness.add_relation("keystone", "keystone")
         self.harness.add_relation_unit(keystone_relation_id, "keystone/0")
         self.harness.update_relation_data(
             keystone_relation_id,
-            "keystone/0",
+            "keystone",
             {
-                "host": "keystone",
+                "host": "host",
                 "port": 5000,
-                "user_domain_name": "default",
-                "project_domain_name": "default",
-                "username": "nbi",
-                "password": "nbi",
-                "service": "service",
-            },
-        )
-
-        # Checking if kafka data is stored
-        self.assertEqual(self.harness.charm.state.message_host, "kafka")
-        self.assertEqual(self.harness.charm.state.message_port, 9092)
-
-        # Checking if mongodb data is stored
-        self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
-
-        # Checking if prometheus data is stored
-        self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
-        self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
-
-        # Checking if keystone data is stored
-        self.assertEqual(self.harness.charm.state.keystone_host, "keystone")
-        self.assertEqual(self.harness.charm.state.keystone_port, 5000)
-        self.assertEqual(self.harness.charm.state.keystone_user_domain_name, "default")
-        self.assertEqual(
-            self.harness.charm.state.keystone_project_domain_name, "default"
-        )
-        self.assertEqual(self.harness.charm.state.keystone_username, "nbi")
-        self.assertEqual(self.harness.charm.state.keystone_password, "nbi")
-        self.assertEqual(self.harness.charm.state.keystone_service, "service")
-
-        # Verifying status
-        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_ingress_resources_without_http(self) -> NoReturn:
-        """Test ingress resources without HTTP."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "nbi",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "nbi",
-                            "containerPort": 9999,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "ALLOW_ANONYMOUS_LOGIN": "yes",
-                        "OSMNBI_SERVER_ENABLE_TEST": False,
-                        "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
-                        "OSMNBI_MESSAGE_HOST": "kafka",
-                        "OSMNBI_MESSAGE_DRIVER": "kafka",
-                        "OSMNBI_MESSAGE_PORT": 9092,
-                        "OSMNBI_DATABASE_DRIVER": "mongo",
-                        "OSMNBI_DATABASE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_DATABASE_COMMONKEY": "osm",
-                        "OSMNBI_STORAGE_DRIVER": "mongo",
-                        "OSMNBI_STORAGE_PATH": "/app/storage",
-                        "OSMNBI_STORAGE_COLLECTION": "files",
-                        "OSMNBI_STORAGE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_PROMETHEUS_HOST": "prometheus",
-                        "OSMNBI_PROMETHEUS_PORT": 9090,
-                        "OSMNBI_LOG_LEVEL": "INFO",
-                        "OSMNBI_AUTHENTICATION_BACKEND": "internal",
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [],
-            },
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Initializing the kafka relation
-        kafka_relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
-        )
-
-        # Initializing the mongodb relation
-        mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            mongodb_relation_id,
-            "mongodb/0",
-            {"connection_string": "mongodb://mongo:27017"},
-        )
-
-        # Initializing the prometheus relation
-        promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            promethues_relation_id,
-            "prometheus/0",
-            {"hostname": "prometheus", "port": 9090},
-        )
-
-        self.harness.update_config({"site_url": "nbi"})
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_ingress_resources_with_http(self) -> NoReturn:
-        """Test ingress resources with HTTP."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "nbi",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "nbi",
-                            "containerPort": 9999,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "ALLOW_ANONYMOUS_LOGIN": "yes",
-                        "OSMNBI_SERVER_ENABLE_TEST": False,
-                        "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
-                        "OSMNBI_MESSAGE_HOST": "kafka",
-                        "OSMNBI_MESSAGE_DRIVER": "kafka",
-                        "OSMNBI_MESSAGE_PORT": 9092,
-                        "OSMNBI_DATABASE_DRIVER": "mongo",
-                        "OSMNBI_DATABASE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_DATABASE_COMMONKEY": "osm",
-                        "OSMNBI_STORAGE_DRIVER": "mongo",
-                        "OSMNBI_STORAGE_PATH": "/app/storage",
-                        "OSMNBI_STORAGE_COLLECTION": "files",
-                        "OSMNBI_STORAGE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_PROMETHEUS_HOST": "prometheus",
-                        "OSMNBI_PROMETHEUS_PORT": 9090,
-                        "OSMNBI_LOG_LEVEL": "INFO",
-                        "OSMNBI_AUTHENTICATION_BACKEND": "internal",
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "nbi-ingress",
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-                            "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-                            "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS",
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": "nbi",
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": "nbi",
-                                                    "servicePort": 9999,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ]
-                        },
-                    }
-                ],
+                "user_domain_name": "ud",
+                "project_domain_name": "pd",
+                "username": "u",
+                "password": "p",
+                "service": "s",
+                "keystone_db_password": "something",
+                "region_id": "something",
+                "admin_username": "something",
+                "admin_password": "something",
+                "admin_project_name": "something",
             },
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Initializing the kafka relation
-        kafka_relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
-        )
-
-        # Initializing the mongodb relation
-        mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            mongodb_relation_id,
-            "mongodb/0",
-            {"connection_string": "mongodb://mongo:27017"},
         )
 
-        # Initializing the prometheus relation
-        promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            promethues_relation_id,
-            "prometheus/0",
-            {"hostname": "prometheus", "port": 9090},
-        )
-
-        self.harness.update_config({"site_url": "http://nbi"})
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_ingress_resources_with_https(self) -> NoReturn:
-        """Test ingress resources with HTTPS."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "nbi",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "nbi",
-                            "containerPort": 9999,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "ALLOW_ANONYMOUS_LOGIN": "yes",
-                        "OSMNBI_SERVER_ENABLE_TEST": False,
-                        "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
-                        "OSMNBI_MESSAGE_HOST": "kafka",
-                        "OSMNBI_MESSAGE_DRIVER": "kafka",
-                        "OSMNBI_MESSAGE_PORT": 9092,
-                        "OSMNBI_DATABASE_DRIVER": "mongo",
-                        "OSMNBI_DATABASE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_DATABASE_COMMONKEY": "osm",
-                        "OSMNBI_STORAGE_DRIVER": "mongo",
-                        "OSMNBI_STORAGE_PATH": "/app/storage",
-                        "OSMNBI_STORAGE_COLLECTION": "files",
-                        "OSMNBI_STORAGE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_PROMETHEUS_HOST": "prometheus",
-                        "OSMNBI_PROMETHEUS_PORT": 9090,
-                        "OSMNBI_LOG_LEVEL": "INFO",
-                        "OSMNBI_AUTHENTICATION_BACKEND": "internal",
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "nbi-ingress",
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-                            "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS",
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": "nbi",
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": "nbi",
-                                                    "servicePort": 9999,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ],
-                            "tls": [{"hosts": ["nbi"], "secretName": "nbi"}],
-                        },
-                    }
-                ],
-            },
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Initializing the kafka relation
-        kafka_relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
-        )
-
-        # Initializing the mongodb relation
-        mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            mongodb_relation_id,
-            "mongodb/0",
-            {"connection_string": "mongodb://mongo:27017"},
-        )
-
-        # Initializing the prometheus relation
-        promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            promethues_relation_id,
-            "prometheus/0",
-            {"hostname": "prometheus", "port": 9090},
-        )
-
-        self.harness.update_config(
-            {"site_url": "https://nbi", "tls_secret_name": "nbi"}
-        )
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
-        """Test ingress resources with HTTPS and ingress whitelist."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "nbi",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "nbi",
-                            "containerPort": 9999,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "ALLOW_ANONYMOUS_LOGIN": "yes",
-                        "OSMNBI_SERVER_ENABLE_TEST": False,
-                        "OSMNBI_STATIC_DIR": "/app/osm_nbi/html_public",
-                        "OSMNBI_MESSAGE_HOST": "kafka",
-                        "OSMNBI_MESSAGE_DRIVER": "kafka",
-                        "OSMNBI_MESSAGE_PORT": 9092,
-                        "OSMNBI_DATABASE_DRIVER": "mongo",
-                        "OSMNBI_DATABASE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_DATABASE_COMMONKEY": "osm",
-                        "OSMNBI_STORAGE_DRIVER": "mongo",
-                        "OSMNBI_STORAGE_PATH": "/app/storage",
-                        "OSMNBI_STORAGE_COLLECTION": "files",
-                        "OSMNBI_STORAGE_URI": "mongodb://mongo:27017",
-                        "OSMNBI_PROMETHEUS_HOST": "prometheus",
-                        "OSMNBI_PROMETHEUS_PORT": 9090,
-                        "OSMNBI_LOG_LEVEL": "INFO",
-                        "OSMNBI_AUTHENTICATION_BACKEND": "internal",
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "nbi-ingress",
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-                            "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS",
-                            "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": "nbi",
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": "nbi",
-                                                    "servicePort": 9999,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ],
-                            "tls": [{"hosts": ["nbi"], "secretName": "nbi"}],
-                        },
-                    }
-                ],
-            },
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Initializing the kafka relation
-        kafka_relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
-        )
-
-        # Initializing the mongodb relation
-        mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            mongodb_relation_id,
-            "mongodb/0",
-            {"connection_string": "mongodb://mongo:27017"},
-        )
-
-        # Initializing the prometheus relation
-        promethues_relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(promethues_relation_id, "prometheus/0")
+    def initialize_prometheus_relation(self):
+        prometheus_relation_id = self.harness.add_relation("prometheus", "prometheus")
+        self.harness.add_relation_unit(prometheus_relation_id, "prometheus/0")
         self.harness.update_relation_data(
-            promethues_relation_id,
-            "prometheus/0",
+            prometheus_relation_id,
+            "prometheus",
             {"hostname": "prometheus", "port": 9090},
         )
 
-        self.harness.update_config(
-            {
-                "site_url": "https://nbi",
-                "tls_secret_name": "nbi",
-                "ingress_whitelist_source_range": "0.0.0.0/0",
-            }
-        )
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_on_kafka_unit_relation_changed(self) -> NoReturn:
-        """Test to see if kafka relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.message_host)
-        self.assertIsNone(self.harness.charm.state.message_port)
-
-        relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            relation_id, "kafka/0", {"host": "kafka", "port": 9092}
-        )
-
-        self.assertEqual(self.harness.charm.state.message_host, "kafka")
-        self.assertEqual(self.harness.charm.state.message_port, 9092)
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertNotIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertNotIn("keystone", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
-        """Test to see if mongodb relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.database_uri)
-
-        relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            relation_id, "mongodb/0", {"connection_string": "mongodb://mongo:27017"}
-        )
-
-        self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertNotIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertNotIn("keystone", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_prometheus_unit_relation_changed(self) -> NoReturn:
-        """Test to see if prometheus relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.prometheus_host)
-        self.assertIsNone(self.harness.charm.state.prometheus_port)
-
-        relation_id = self.harness.add_relation("prometheus", "prometheus")
-        self.harness.add_relation_unit(relation_id, "prometheus/0")
-        self.harness.update_relation_data(
-            relation_id, "prometheus/0", {"hostname": "prometheus", "port": 9090}
-        )
-
-        self.assertEqual(self.harness.charm.state.prometheus_host, "prometheus")
-        self.assertEqual(self.harness.charm.state.prometheus_port, 9090)
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertNotIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertNotIn("keystone", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_keystone_unit_relation_changed(self) -> NoReturn:
-        """Test to see if keystone relation is updated."""
-        self.harness.update_config({"auth_backend": "keystone"})
-
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.keystone_host)
-        self.assertIsNone(self.harness.charm.state.keystone_port)
-        self.assertIsNone(self.harness.charm.state.keystone_user_domain_name)
-        self.assertIsNone(self.harness.charm.state.keystone_project_domain_name)
-        self.assertIsNone(self.harness.charm.state.keystone_username)
-        self.assertIsNone(self.harness.charm.state.keystone_password)
-        self.assertIsNone(self.harness.charm.state.keystone_service)
-
-        relation_id = self.harness.add_relation("keystone", "keystone")
-        self.harness.add_relation_unit(relation_id, "keystone/0")
-        self.harness.update_relation_data(
-            relation_id,
-            "keystone/0",
-            {
-                "host": "keystone",
-                "port": 5000,
-                "user_domain_name": "default",
-                "project_domain_name": "default",
-                "username": "nbi",
-                "password": "nbi",
-                "service": "service",
-            },
-        )
-
-        self.assertEqual(self.harness.charm.state.keystone_host, "keystone")
-        self.assertEqual(self.harness.charm.state.keystone_port, 5000)
-        self.assertEqual(self.harness.charm.state.keystone_user_domain_name, "default")
-        self.assertEqual(
-            self.harness.charm.state.keystone_project_domain_name, "default"
-        )
-        self.assertEqual(self.harness.charm.state.keystone_username, "nbi")
-        self.assertEqual(self.harness.charm.state.keystone_password, "nbi")
-        self.assertEqual(self.harness.charm.state.keystone_service, "service")
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertIn("prometheus", self.harness.charm.unit.status.message)
-        self.assertNotIn("keystone", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_publish_nbi_info(self) -> NoReturn:
-        """Test to see if nbi relation is updated."""
-        expected_result = {
-            "host": "nbi",
-            "port": "9999",
-        }
-
-        self.harness.charm.on.start.emit()
-
-        relation_id = self.harness.add_relation("nbi", "ng-ui")
-        self.harness.add_relation_unit(relation_id, "ng-ui/0")
-        relation_data = self.harness.get_relation_data(relation_id, "nbi/0")
-
-        self.assertDictEqual(expected_result, relation_data)
-
 
 if __name__ == "__main__":
     unittest.main()
+
+
+# class TestCharm(unittest.TestCase):
+#     """Prometheus Charm unit tests."""
+
+#     def setUp(self) -> NoReturn:
+#         """Test setup"""
+#         self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
+#         self.harness = Harness(NbiCharm)
+#         self.harness.set_leader(is_leader=True)
+#         self.harness.begin()
+#         self.config = {
+#             "enable_ng_ro": True,
+#             "database_commonkey": "commonkey",
+#             "log_level": "INFO",
+#             "vim_database": "db_name",
+#             "ro_database": "ro_db_name",
+#             "openmano_tenant": "mano",
+#         }
+
+#     def test_config_changed_no_relations(
+#         self,
+#     ) -> NoReturn:
+#         """Test ingress resources without HTTP."""
+
+#         self.harness.charm.on.config_changed.emit()
+
+#         # Assertions
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+#         self.assertTrue(
+#             all(
+#                 relation in self.harness.charm.unit.status.message
+#                 for relation in ["mongodb", "kafka"]
+#             )
+#         )
+
+#         # Disable ng-ro
+#         self.harness.update_config({"enable_ng_ro": False})
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+#         self.assertTrue(
+#             all(
+#                 relation in self.harness.charm.unit.status.message
+#                 for relation in ["mysql"]
+#             )
+#         )
+
+#     def test_config_changed_non_leader(
+#         self,
+#     ) -> NoReturn:
+#         """Test ingress resources without HTTP."""
+#         self.harness.set_leader(is_leader=False)
+#         self.harness.charm.on.config_changed.emit()
+
+#         # Assertions
+#         self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
+
+#     def test_with_relations_ng(
+#         self,
+#     ) -> NoReturn:
+#         "Test with relations (ng-ro)"
+
+#         # Initializing the kafka relation
+#         kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+#         )
+
+#         # Initializing the mongo relation
+#         mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             mongodb_relation_id,
+#             "mongodb/0",
+#             {"connection_string": "mongodb://mongo:27017"},
+#         )
+
+#         self.harness.charm.on.config_changed.emit()
+
+#         # Verifying status
+#         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+
+# if __name__ == "__main__":
+#     unittest.main()
index 5989669..1f9442e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = nbi
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release nbi.charm
   charmcraft build
-  unzip nbi.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
-
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/"
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =
-  .tox/*
-  tests/*
index 9c7c049..ecfb4dc 100644 (file)
@@ -1,19 +1,37 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
 .vscode
 build
+<<<<<<< HEAD
 ng-ui.charm
 .stestr
 .coverage*
-cover/
\ No newline at end of file
+cover/
+=======
+*.charm
+.coverage
+coverage.xml
+.stestr
+cover
+release
+>>>>>>> WIP: Improve osm charms
diff --git a/installers/charm/ng-ui/.jujuignore b/installers/charm/ng-ui/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index ab52c60..d71fb69 100644 (file)
@@ -1,16 +1,24 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
 ---
 extends: default
 
@@ -20,4 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
-  mod
+  cover/
+  build/
+  venv
+  release/
index 7765b26..3ff7a97 100644 (file)
@@ -24,7 +24,7 @@ deployment:
   service: cluster
 requires:
   nbi:
-    interface: osm-nbi
+    interface: http
 resources:
   image:
     type: oci-image
diff --git a/installers/charm/ng-ui/requirements-test.txt b/installers/charm/ng-ui/requirements-test.txt
new file mode 100644 (file)
index 0000000..d7585f3
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index a178e33..2eaba28 100644 (file)
@@ -1,16 +1,23 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
-ops
-pydantic
-git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+pydantic  # TODO: remove it
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index 944d8ce..4d2bb85 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
-import logging
-from typing import Any, Dict, NoReturn
-from pydantic import ValidationError
-
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, StoredState
-from ops.main import main
-from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus
-from oci_image import OCIImageResource, OCIImageResourceError
-
-from pod_spec import make_pod_spec
-
-logger = logging.getLogger(__name__)
+# pylint: disable=E0213
 
-NGUI_PORT = 80
 
+import logging
+from typing import Optional, NoReturn
+from ipaddress import ip_network
+from urllib.parse import urlparse
 
-class ConfigurePodEvent(EventBase):
-    """Configure Pod event"""
+from ops.main import main
 
-    pass
+from opslib.osm.charm import CharmedOsmBase, RelationsMissing
 
+from opslib.osm.pod import (
+    ContainerV3Builder,
+    PodSpecV3Builder,
+    FilesV3Builder,
+    IngressResourceV3Builder,
+)
 
-class NgUiEvents(CharmEvents):
-    """NGUI Events"""
 
-    configure_pod = EventSource(ConfigurePodEvent)
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
 
+from opslib.osm.interfaces.http import HttpClient
+from string import Template
+from pathlib import Path
 
-class NgUiCharm(CharmBase):
-    """NGUI Charm."""
+logger = logging.getLogger(__name__)
 
-    state = StoredState()
-    on = NgUiEvents()
 
+class ConfigModel(ModelValidator):
+    port: int
+    server_name: str
+    max_file_size: int
+    site_url: Optional[str]
+    ingress_whitelist_source_range: Optional[str]
+    tls_secret_name: Optional[str]
+
+    @validator("port")
+    def validate_port(cls, v):
+        if v <= 0:
+            raise ValueError("value must be greater than 0")
+        return v
+
+    @validator("max_file_size")
+    def validate_max_file_size(cls, v):
+        if v < 0:
+            raise ValueError("value must be equal or greater than 0")
+        return v
+
+    @validator("site_url")
+    def validate_site_url(cls, v):
+        if v:
+            parsed = urlparse(v)
+            if not parsed.scheme.startswith("http"):
+                raise ValueError("value must start with http")
+        return v
+
+    @validator("ingress_whitelist_source_range")
+    def validate_ingress_whitelist_source_range(cls, v):
+        if v:
+            ip_network(v)
+        return v
+
+
+class NgUiCharm(CharmedOsmBase):
     def __init__(self, *args) -> NoReturn:
-        """NGUI Charm constructor."""
-        super().__init__(*args)
-
-        # Internal state initialization
-        self.state.set_default(pod_spec=None)
-
-        # North bound interface initialization
-        self.state.set_default(nbi_host=None)
-        self.state.set_default(nbi_port=None)
-
-        self.http_port = NGUI_PORT
-        self.image = OCIImageResource(self, "image")
-
-        # Registering regular events
-        self.framework.observe(self.on.start, self.configure_pod)
-        self.framework.observe(self.on.config_changed, self.configure_pod)
-        # self.framework.observe(self.on.upgrade_charm, self.configure_pod)
-
-        # Registering custom internal events
-        self.framework.observe(self.on.configure_pod, self.configure_pod)
-
-        # Registering required relation changed events
-        self.framework.observe(
-            self.on.nbi_relation_changed, self._on_nbi_relation_changed
+        super().__init__(*args, oci_image="image")
+
+        self.nbi_client = HttpClient(self, "nbi")
+        self.framework.observe(self.on["nbi"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["nbi"].relation_broken, self.configure_pod)
+
+    def _check_missing_dependencies(self, config: ConfigModel):
+        missing_relations = []
+
+        if self.nbi_client.is_missing_data_in_app():
+            missing_relations.append("nbi")
+
+        if missing_relations:
+            raise RelationsMissing(missing_relations)
+
+    def _build_files(self, config: ConfigModel):
+        files_builder = FilesV3Builder()
+        files_builder.add_file(
+            "default",
+            Template(Path("files/default").read_text()).substitute(
+                port=config.port,
+                server_name=config.server_name,
+                max_file_size=config.max_file_size,
+                nbi_host=self.nbi_client.host,
+                nbi_port=self.nbi_client.port,
+            ),
         )
-
-        # Registering required relation departed events
-        self.framework.observe(
-            self.on.nbi_relation_departed, self._on_nbi_relation_departed
+        return files_builder.build()
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        # Check relations
+        self._check_missing_dependencies(config)
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=config.port)
+        container = container_builder.build()
+        container_builder.add_tcpsocket_readiness_probe(
+            config.port,
+            initial_delay_seconds=45,
+            timeout_seconds=5,
         )
-
-    def _on_nbi_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the nbi relation.
-
-        Args:
-            event (EventBase): NBI relation event.
-        """
-        if event.unit not in event.relation.data:
-            return
-        relation_data = event.relation.data[event.unit]
-        nbi_host = relation_data.get("host")
-        nbi_port = relation_data.get("port")
-
-        if (
-            nbi_host
-            and nbi_port
-            and (self.state.nbi_host != nbi_host or self.state.nbi_port != nbi_port)
-        ):
-            self.state.nbi_host = nbi_host
-            self.state.nbi_port = nbi_port
-            self.on.configure_pod.emit()
-
-    def _on_nbi_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clears data from nbi relation.
-
-        Args:
-            event (EventBase): NBI relation event.
-        """
-        self.state.nbi_host = None
-        self.state.nbi_port = None
-        self.on.configure_pod.emit()
-
-    def _missing_relations(self) -> str:
-        """Checks if there missing relations.
-
-        Returns:
-            str: string with missing relations
-        """
-        data_status = {
-            "nbi": self.state.nbi_host,
-        }
-
-        missing_relations = [k for k, v in data_status.items() if not v]
-
-        return ", ".join(missing_relations)
-
-    @property
-    def relation_state(self) -> Dict[str, Any]:
-        """Collects relation state configuration for pod spec assembly.
-
-        Returns:
-            Dict[str, Any]: relation state information.
-        """
-        relation_state = {
-            "nbi_host": self.state.nbi_host,
-            "nbi_port": self.state.nbi_port,
-        }
-        return relation_state
-
-    def configure_pod(self, event: EventBase) -> NoReturn:
-        """Assemble the pod spec and apply it, if possible.
-
-        Args:
-            event (EventBase): Hook or Relation event that started the
-                               function.
-        """
-        if missing := self._missing_relations():
-            self.unit.status = BlockedStatus(
-                f"Waiting for {missing} relation{'s' if ',' in missing else ''}"
-            )
-            return
-
-        if not self.unit.is_leader():
-            self.unit.status = ActiveStatus("ready")
-            return
-
-        self.unit.status = MaintenanceStatus("Assembling pod spec")
-
-        # Fetch image information
-        try:
-            self.unit.status = MaintenanceStatus("Fetching image information")
-            image_info = self.image.fetch()
-        except OCIImageResourceError:
-            self.unit.status = BlockedStatus("Error fetching image information")
-            return
-
-        try:
-            pod_spec = make_pod_spec(
-                image_info,
-                self.config,
-                self.relation_state,
-                self.model.app.name,
+        container_builder.add_tcpsocket_liveness_probe(
+            config.port,
+            initial_delay_seconds=45,
+            timeout_seconds=15,
+        )
+        container_builder.add_volume_config(
+            "configuration",
+            "/etc/nginx/sites-available/",
+            self._build_files(config),
+        )
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        # Add ingress resources to pod spec if site url exists
+        if config.site_url:
+            parsed = urlparse(config.site_url)
+            annotations = {
+                "nginx.ingress.kubernetes.io/proxy-body-size": "{}".format(
+                    str(config.max_file_size) + "m"
+                    if config.max_file_size > 0
+                    else config.max_file_size
+                ),
+            }
+            ingress_resource_builder = IngressResourceV3Builder(
+                f"{self.app.name}-ingress", annotations
             )
-        except ValidationError as exc:
-            logger.exception("Config/Relation data validation error")
-            self.unit.status = BlockedStatus(str(exc))
-            return
 
-        if self.state.pod_spec != pod_spec:
-            self.model.pod.set_spec(pod_spec)
-            self.state.pod_spec = pod_spec
+            if config.ingress_whitelist_source_range:
+                annotations[
+                    "nginx.ingress.kubernetes.io/whitelist-source-range"
+                ] = config.ingress_whitelist_source_range
 
-        self.unit.status = ActiveStatus("ready")
+            if parsed.scheme == "https":
+                ingress_resource_builder.add_tls(
+                    [parsed.hostname], config.tls_secret_name
+                )
+            else:
+                annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
+
+            ingress_resource_builder.add_rule(
+                parsed.hostname, self.app.name, config.port
+            )
+            ingress_resource = ingress_resource_builder.build()
+            pod_spec_builder.add_ingress_resource(ingress_resource)
+        return pod_spec_builder.build()
 
 
 if __name__ == "__main__":
index 1687756..95d5f72 100644 (file)
@@ -20,6 +20,9 @@
 # osm-charmers@lists.launchpad.net
 ##
 
+# pylint: disable=E0213,E0611
+
+
 import logging
 from pydantic import (
     BaseModel,
index 6004c6d..d0d973a 100644 (file)
 
 """Init mocking for unit tests."""
 
-import sys
-import mock
+import sys
+import mock
 
-sys.path.append("src")
+sys.path.append("src")
 
-oci_image = mock.MagicMock()
-sys.modules["oci_image"] = oci_image
+oci_image = mock.MagicMock()
+sys.modules["oci_image"] = oci_image
index 1cde2df..d9a4d3e 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-
+from ops.model import ActiveStatus, BlockedStatus
 from ops.testing import Harness
 
 from charm import NgUiCharm
 
 
 class TestCharm(unittest.TestCase):
-    """PLA Charm unit tests."""
+    """Prometheus Charm unit tests."""
 
     def setUp(self) -> NoReturn:
         """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
         self.harness = Harness(NgUiCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.config = {
+            "server_name": "localhost",
+            "port": 80,
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "",
+            "tls_secret_name": "",
+            "site_url": "https://ui.192.168.100.100.xip.io",
+        }
+        self.harness.update_config(self.config)
+
+    def test_config_changed_no_relations(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+
+        self.harness.charm.on.config_changed.emit()
 
-    def test_on_start_without_relations(self) -> NoReturn:
-        """Test installation without any relation."""
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+        self.assertTrue(
+            all(
+                relation in self.harness.charm.unit.status.message
+                for relation in ["nbi"]
+            )
+        )
+
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
         self.harness.charm.on.config_changed.emit()
 
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
+
+    def test_with_relations(
+        self,
+    ) -> NoReturn:
+        "Test with relations (internal)"
+        self.initialize_nbi_relation()
+        # Verifying status
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+    def initialize_nbi_relation(self):
+        http_relation_id = self.harness.add_relation("nbi", "nbi")
+        self.harness.add_relation_unit(http_relation_id, "nbi")
+        self.harness.update_relation_data(
+            http_relation_id,
+            "nbi",
+            {"host": "nbi", "port": 9999},
+        )
+
 
 if __name__ == "__main__":
     unittest.main()
index e60f0b8..1f9442e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = ng-ui
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release ng-ui.charm
   charmcraft build
-  unzip ng-ui.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-  -rrequirements.txt
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
-
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/|release/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/ release/"
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =  
-  .tox/*
-  tests/*
index dad4b21..493739e 100644 (file)
@@ -1,19 +1,30 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
 .vscode
 build
-pla.charm
+*.charm
+.coverage
+coverage.xml
 .stestr
-.coverage*
-cover/
\ No newline at end of file
+cover
+release
diff --git a/installers/charm/pla/.jujuignore b/installers/charm/pla/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index ab52c60..d71fb69 100644 (file)
@@ -1,16 +1,24 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
 ---
 extends: default
 
@@ -20,4 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
-  mod
+  cover/
+  build/
+  venv
+  release/
index 57c5df0..ae90304 100644 (file)
 # limitations under the License.
 
 options:
-  image:
-    description: Docker image name
-    type: string
-    default: opensourcemano/pla:latest
-  image_username:
-    description: Docker repository username
-    type: string
-    default: ""
-  image_password:
-    description: Docker repository password
-    type: string
-    default: ""
-  port:
-    description: Port number
-    type: int
-    default: 8080 # Fake port
   log_level:
     description: Log level
     type: string
     default: INFO
-  database:
-    description: Database name
-    type: string
-    default: pla
-  database_common_key:
+  database_commonkey:
     description: Common Key for Mongo database
     type: string
     default: osm
index 90c9d40..b9a344b 100644 (file)
@@ -22,8 +22,13 @@ min-juju-version: 2.7.0
 deployment:
   type: stateless
   service: cluster
+resources:
+  image:
+    type: oci-image
+    description: OSM docker image for POL
+    upstream-source: "opensourcemano/pla:latest"
 requires:
   kafka:
     interface: kafka
-  mongo:
+  mongodb:
     interface: mongodb
diff --git a/installers/charm/pla/requirements-test.txt b/installers/charm/pla/requirements-test.txt
new file mode 100644 (file)
index 0000000..d7585f3
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index 10ecdcd..f10a199 100644 (file)
@@ -1,14 +1,22 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#     http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#     Unless required by applicable law or agreed to in writing, software
-#     distributed under the License is distributed on an "AS IS" BASIS,
-#     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#     See the License for the specific language governing permissions and
-#     limitations under the License.
-ops
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index 6847580..4f463bf 100755 (executable)
 #!/usr/bin/env python3
-#   Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
 #
-#       http://www.apache.org/licenses/LICENSE-2.0
+#         http://www.apache.org/licenses/LICENSE-2.0
 #
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+# pylint: disable=E0213
+
 
 import logging
+from typing import NoReturn
 
-from ops.charm import CharmBase
-from ops.framework import StoredState
 from ops.main import main
-from ops.model import (
-    ActiveStatus,
-    MaintenanceStatus,
-    WaitingStatus,
+
+from opslib.osm.charm import CharmedOsmBase, RelationsMissing
+
+from opslib.osm.pod import (
+    ContainerV3Builder,
+    PodSpecV3Builder,
 )
-from typing import NoReturn
+
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
+
+from opslib.osm.interfaces.kafka import KafkaClient
+from opslib.osm.interfaces.mongo import MongoClient
+
 
 logger = logging.getLogger(__name__)
 
+PORT = 9999
+
+
+class ConfigModel(ModelValidator):
+    database_commonkey: str
+    log_level: str
 
-class PLACharm(CharmBase):
-    state = StoredState()
+    @validator("log_level")
+    def validate_log_level(cls, v):
+        if v not in {"INFO", "DEBUG"}:
+            raise ValueError("value must be INFO or DEBUG")
+        return v
 
+
+class PlaCharm(CharmedOsmBase):
     def __init__(self, *args) -> NoReturn:
-        super().__init__(*args)
-        self.state.set_default(spec=None)
-        self.state.set_default(kafka_host=None)
-        self.state.set_default(kafka_port=None)
-        self.state.set_default(mongodb_uri=None)
-
-        # Observe Charm related events
-        self.framework.observe(self.on.config_changed, self.on_config_changed)
-        self.framework.observe(self.on.start, self.on_start)
-        self.framework.observe(self.on.upgrade_charm, self.on_upgrade_charm)
-
-        # Relations
-        self.framework.observe(
-            self.on.kafka_relation_changed, self.on_kafka_relation_changed
-        )
-        self.framework.observe(
-            self.on.mongo_relation_changed, self.on_mongo_relation_changed
+        super().__init__(*args, oci_image="image")
+
+        self.kafka_client = KafkaClient(self, "kafka")
+        self.framework.observe(self.on["kafka"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["kafka"].relation_broken, self.configure_pod)
+
+        self.mongodb_client = MongoClient(self, "mongodb")
+        self.framework.observe(self.on["mongodb"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["mongodb"].relation_broken, self.configure_pod)
+
+    def _check_missing_dependencies(self, config: ConfigModel):
+        missing_relations = []
+
+        if self.kafka_client.is_missing_data_in_unit():
+            missing_relations.append("kafka")
+        if self.mongodb_client.is_missing_data_in_unit():
+            missing_relations.append("mongodb")
+
+        if missing_relations:
+            raise RelationsMissing(missing_relations)
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        # Check relations
+        self._check_missing_dependencies(config)
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=PORT)
+        container_builder.add_envs(
+            {
+                # General configuration
+                "ALLOW_ANONYMOUS_LOGIN": "yes",
+                "OSMPLA_GLOBAL_LOG_LEVEL": config.log_level,
+                # Kafka configuration
+                "OSMPLA_MESSAGE_DRIVER": "kafka",
+                "OSMPLA_MESSAGE_HOST": self.kafka_client.host,
+                "OSMPLA_MESSAGE_PORT": self.kafka_client.port,
+                # Database configuration
+                "OSMPLA_DATABASE_DRIVER": "mongo",
+                "OSMPLA_DATABASE_URI": self.mongodb_client.connection_string,
+                "OSMPLA_DATABASE_COMMONKEY": config.database_commonkey,
+            }
         )
 
-    def _apply_spec(self):
-        # Only apply the spec if this unit is a leader.
-        unit = self.model.unit
-        if not unit.is_leader():
-            unit.status = ActiveStatus("ready")
-            return
-        if not self.state.kafka_host or not self.state.kafka_port:
-            unit.status = WaitingStatus("Waiting for Kafka")
-            return
-        if not self.state.mongodb_uri:
-            unit.status = WaitingStatus("Waiting for MongoDB")
-            return
-
-        unit.status = MaintenanceStatus("Applying new pod spec")
-
-        new_spec = self.make_pod_spec()
-        if new_spec == self.state.spec:
-            unit.status = ActiveStatus("ready")
-            return
-        self.framework.model.pod.set_spec(new_spec)
-        self.state.spec = new_spec
-        unit.status = ActiveStatus("ready")
-
-    def make_pod_spec(self):
-        config = self.framework.model.config
-
-        ports = [
-            {
-                "name": "port",
-                "containerPort": config["port"],
-                "protocol": "TCP",
-            },
-        ]
-
-        config_spec = {
-            "OSMPLA_MESSAGE_DRIVER": "kafka",
-            "OSMPLA_MESSAGE_HOST": self.state.kafka_host,
-            "OSMPLA_MESSAGE_PORT": self.state.kafka_port,
-            "OSMPLA_DATABASE_DRIVER": "mongo",
-            "OSMPLA_DATABASE_URI": self.state.mongodb_uri,
-            "OSMPLA_GLOBAL_LOG_LEVEL": config["log_level"],
-            "OSMPLA_DATABASE_COMMONKEY": config["database_common_key"],
-        }
-
-        spec = {
-            "version": 2,
-            "containers": [
-                {
-                    "name": self.framework.model.app.name,
-                    "imageDetails": {
-                        "imagePath": config["image"],
-                        "username": config["image_username"],
-                        "password": config["image_password"],
-                    },
-                    "ports": ports,
-                    "config": config_spec,
-                }
-            ],
-        }
-
-        return spec
-
-    def on_config_changed(self, event):
-        """Handle changes in configuration"""
-        self._apply_spec()
-
-    def on_start(self, event):
-        """Called when the charm is being installed"""
-        self._apply_spec()
-
-    def on_upgrade_charm(self, event):
-        """Upgrade the charm."""
-        unit = self.model.unit
-        unit.status = MaintenanceStatus("Upgrading charm")
-        self._apply_spec()
-
-    def on_kafka_relation_changed(self, event):
-        kafka_host = event.relation.data[event.unit].get("host")
-        kafka_port = event.relation.data[event.unit].get("port")
-        if kafka_host and self.state.kafka_host != kafka_host:
-            self.state.kafka_host = kafka_host
-        if kafka_port and self.state.kafka_port != kafka_port:
-            self.state.kafka_port = kafka_port
-        self._apply_spec()
-
-    def on_mongo_relation_changed(self, event):
-        mongodb_uri = event.relation.data[event.unit].get("connection_string")
-        if mongodb_uri and self.state.mongodb_uri != mongodb_uri:
-            self.state.mongodb_uri = mongodb_uri
-        self._apply_spec()
+        container = container_builder.build()
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        return pod_spec_builder.build()
 
 
 if __name__ == "__main__":
-    main(PLACharm)
+    main(PlaCharm)
index 6004c6d..d0d973a 100644 (file)
 
 """Init mocking for unit tests."""
 
-import sys
-import mock
+import sys
+import mock
 
-sys.path.append("src")
+sys.path.append("src")
 
-oci_image = mock.MagicMock()
-sys.modules["oci_image"] = oci_image
+oci_image = mock.MagicMock()
+sys.modules["oci_image"] = oci_image
index dbc7be3..b1a7820 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-
+from ops.model import ActiveStatus, BlockedStatus
 from ops.testing import Harness
 
-from charm import PLACharm
+from charm import PlaCharm
 
 
 class TestCharm(unittest.TestCase):
-    """PLA Charm unit tests."""
+    """Pla Charm unit tests."""
 
     def setUp(self) -> NoReturn:
         """Test setup"""
-        self.harness = Harness(PLACharm)
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
+        self.harness = Harness(PlaCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.config = {
+            "log_level": "INFO",
+        }
+        self.harness.update_config(self.config)
+
+    def test_config_changed_no_relations(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+
+        self.harness.charm.on.config_changed.emit()
+
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+        self.assertTrue(
+            all(
+                relation in self.harness.charm.unit.status.message
+                for relation in ["mongodb", "kafka"]
+            )
+        )
 
-    def test_on_start_without_relations(self) -> NoReturn:
-        """Test installation without any relation."""
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
         self.harness.charm.on.config_changed.emit()
 
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
+
+    def test_with_relations(
+        self,
+    ) -> NoReturn:
+        "Test with relations (internal)"
+        self.initialize_kafka_relation()
+        self.initialize_mongo_relation()
+        # Verifying status
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+    def initialize_kafka_relation(self):
+        kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+        self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+        self.harness.update_relation_data(
+            kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+        )
+
+    def initialize_mongo_relation(self):
+        mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+        self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+        self.harness.update_relation_data(
+            mongodb_relation_id,
+            "mongodb/0",
+            {"connection_string": "mongodb://mongo:27017"},
+        )
+
 
 if __name__ == "__main__":
     unittest.main()
index 32d78d3..1f9442e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = pla
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release pla.charm
   charmcraft build
-  unzip pla.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
-
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/|release/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/ release/"
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =  
-  .tox/*
-  tests/*
index 4ff0fa3..2885df2 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -22,7 +22,9 @@
 venv
 .vscode
 build
-pol.charm
+*.charm
 .coverage
+coverage.xml
 .stestr
 cover
+release
\ No newline at end of file
diff --git a/installers/charm/pol/.jujuignore b/installers/charm/pol/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index c20ac8d..d71fb69 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -28,6 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
+  cover/
   build/
-  mod/
-  lib/
+  venv
+  release/
diff --git a/installers/charm/pol/requirements-test.txt b/installers/charm/pol/requirements-test.txt
new file mode 100644 (file)
index 0000000..d7585f3
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index a26601f..f10a199 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -19,5 +19,4 @@
 # osm-charmers@lists.launchpad.net
 ##
 
-ops
-git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index 6d15c57..ce1a25e 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+# pylint: disable=E0213
+
+
 import logging
-from typing import Any, Dict, NoReturn
+from typing import NoReturn
 
-from ops.charm import CharmBase, CharmEvents
-from ops.framework import EventBase, EventSource, StoredState
 from ops.main import main
-from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus
-from oci_image import OCIImageResource, OCIImageResourceError
 
-from pod_spec import make_pod_spec
+from opslib.osm.charm import CharmedOsmBase, RelationsMissing
 
-logger = logging.getLogger(__name__)
+from opslib.osm.pod import (
+    ContainerV3Builder,
+    PodSpecV3Builder,
+)
 
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
 
-class ConfigurePodEvent(EventBase):
-    """Configure Pod event"""
+from opslib.osm.interfaces.kafka import KafkaClient
+from opslib.osm.interfaces.mongo import MongoClient
 
-    pass
 
+logger = logging.getLogger(__name__)
 
-class PolEvents(CharmEvents):
-    """POL Events"""
+PORT = 9999
 
-    configure_pod = EventSource(ConfigurePodEvent)
 
+class ConfigModel(ModelValidator):
+    log_level: str
 
-class PolCharm(CharmBase):
-    """POL Charm."""
+    @validator("log_level")
+    def validate_log_level(cls, v):
+        if v not in {"INFO", "DEBUG"}:
+            raise ValueError("value must be INFO or DEBUG")
+        return v
 
-    state = StoredState()
-    on = PolEvents()
 
+class PolCharm(CharmedOsmBase):
     def __init__(self, *args) -> NoReturn:
-        """POL Charm constructor."""
-        super().__init__(*args)
-
-        # Internal state initialization
-        self.state.set_default(pod_spec=None)
-
-        # Message bus data initialization
-        self.state.set_default(message_host=None)
-        self.state.set_default(message_port=None)
-
-        # Database data initialization
-        self.state.set_default(database_uri=None)
-
-        self.image = OCIImageResource(self, "image")
-
-        # Registering regular events
-        self.framework.observe(self.on.start, self.configure_pod)
-        self.framework.observe(self.on.config_changed, self.configure_pod)
-        self.framework.observe(self.on.upgrade_charm, self.configure_pod)
-
-        # Registering custom internal events
-        self.framework.observe(self.on.configure_pod, self.configure_pod)
-
-        # Registering required relation events
-        self.framework.observe(
-            self.on.kafka_relation_changed, self._on_kafka_relation_changed
-        )
-        self.framework.observe(
-            self.on.mongodb_relation_changed, self._on_mongodb_relation_changed
-        )
-
-        # Registering required relation departed events
-        self.framework.observe(
-            self.on.kafka_relation_departed, self._on_kafka_relation_departed
-        )
-        self.framework.observe(
-            self.on.mongodb_relation_departed, self._on_mongodb_relation_departed
+        super().__init__(*args, oci_image="image")
+
+        self.kafka_client = KafkaClient(self, "kafka")
+        self.framework.observe(self.on["kafka"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["kafka"].relation_broken, self.configure_pod)
+
+        self.mongodb_client = MongoClient(self, "mongodb")
+        self.framework.observe(self.on["mongodb"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["mongodb"].relation_broken, self.configure_pod)
+
+    def _check_missing_dependencies(self, config: ConfigModel):
+        missing_relations = []
+
+        if self.kafka_client.is_missing_data_in_unit():
+            missing_relations.append("kafka")
+        if self.mongodb_client.is_missing_data_in_unit():
+            missing_relations.append("mongodb")
+
+        if missing_relations:
+            raise RelationsMissing(missing_relations)
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        # Check relations
+        self._check_missing_dependencies(config)
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=PORT)
+        container_builder.add_envs(
+            {
+                # General configuration
+                "ALLOW_ANONYMOUS_LOGIN": "yes",
+                "OSMPOL_GLOBAL_LOGLEVEL": config.log_level,
+                # Kafka configuration
+                "OSMPOL_MESSAGE_DRIVER": "kafka",
+                "OSMPOL_MESSAGE_HOST": self.kafka_client.host,
+                "OSMPOL_MESSAGE_PORT": self.kafka_client.port,
+                # Database configuration
+                "OSMPOL_DATABASE_DRIVER": "mongo",
+                "OSMPOL_DATABASE_URI": self.mongodb_client.connection_string,
+            }
         )
 
-    def _on_kafka_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the kafka relation.
-
-        Args:
-            event (EventBase): Kafka relation event.
-        """
-        message_host = event.relation.data[event.unit].get("host")
-        message_port = event.relation.data[event.unit].get("port")
-
-        if (
-            message_host
-            and message_port
-            and (
-                self.state.message_host != message_host
-                or self.state.message_port != message_port
-            )
-        ):
-            self.state.message_host = message_host
-            self.state.message_port = message_port
-            self.on.configure_pod.emit()
-
-    def _on_kafka_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clear kafka relation data.
-
-        Args:
-            event (EventBase): Kafka relation event.
-        """
-        self.state.message_host = None
-        self.state.message_port = None
-        self.on.configure_pod.emit()
-
-    def _on_mongodb_relation_changed(self, event: EventBase) -> NoReturn:
-        """Reads information about the DB relation.
-
-        Args:
-            event (EventBase): DB relation event.
-        """
-        database_uri = event.relation.data[event.unit].get("connection_string")
-
-        if database_uri and self.state.database_uri != database_uri:
-            self.state.database_uri = database_uri
-            self.on.configure_pod.emit()
-
-    def _on_mongodb_relation_departed(self, event: EventBase) -> NoReturn:
-        """Clear mongodb relation data.
-
-        Args:
-            event (EventBase): DB relation event.
-        """
-        self.state.database_uri = None
-        self.on.configure_pod.emit()
-
-    def _missing_relations(self) -> str:
-        """Checks if there missing relations.
-
-        Returns:
-            str: string with missing relations
-        """
-        data_status = {
-            "kafka": self.state.message_host,
-            "mongodb": self.state.database_uri,
-        }
-
-        missing_relations = [k for k, v in data_status.items() if not v]
-
-        return ", ".join(missing_relations)
-
-    @property
-    def relation_state(self) -> Dict[str, Any]:
-        """Collects relation state configuration for pod spec assembly.
-
-        Returns:
-            Dict[str, Any]: relation state information.
-        """
-        relation_state = {
-            "message_host": self.state.message_host,
-            "message_port": self.state.message_port,
-            "database_uri": self.state.database_uri,
-        }
-
-        return relation_state
-
-    def configure_pod(self, event: EventBase) -> NoReturn:
-        """Assemble the pod spec and apply it, if possible.
-
-        Args:
-            event (EventBase): Hook or Relation event that started the
-                               function.
-        """
-        if missing := self._missing_relations():
-            self.unit.status = BlockedStatus(
-                "Waiting for {0} relation{1}".format(
-                    missing, "s" if "," in missing else ""
-                )
-            )
-            return
-
-        if not self.unit.is_leader():
-            self.unit.status = ActiveStatus("ready")
-            return
-
-        self.unit.status = MaintenanceStatus("Assembling pod spec")
-
-        # Fetch image information
-        try:
-            self.unit.status = MaintenanceStatus("Fetching image information")
-            image_info = self.image.fetch()
-        except OCIImageResourceError:
-            self.unit.status = BlockedStatus("Error fetching image information")
-            return
-
-        try:
-            pod_spec = make_pod_spec(
-                image_info,
-                self.model.config,
-                self.relation_state,
-                self.model.app.name,
-            )
-        except ValueError as exc:
-            logger.exception("Config/Relation data validation error")
-            self.unit.status = BlockedStatus(str(exc))
-            return
-
-        if self.state.pod_spec != pod_spec:
-            self.model.pod.set_spec(pod_spec)
-            self.state.pod_spec = pod_spec
-
-        self.unit.status = ActiveStatus("ready")
+        container = container_builder.build()
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        return pod_spec_builder.build()
 
 
 if __name__ == "__main__":
index ec7f2d9..5ad4217 100644 (file)
@@ -36,14 +36,16 @@ def _validate_data(
         relation_data (Dict[str, Any]): relation data.
     """
     config_validators = {
-        "log_level": lambda value, _: isinstance(value, str)
-        and value in ("INFO", "DEBUG"),
+        "log_level": lambda value, _: (
+            isinstance(value, str) and value in ("INFO", "DEBUG")
+        ),
     }
     relation_validators = {
         "message_host": lambda value, _: isinstance(value, str) and len(value) > 0,
         "message_port": lambda value, _: isinstance(value, int) and value > 0,
-        "database_uri": lambda value, _: isinstance(value, str)
-        and value.startswith("mongodb://"),
+        "database_uri": lambda value, _: (
+            isinstance(value, str) and value.startswith("mongodb://")
+        ),
     }
     problems = []
 
index a2e81cc..2214397 100644 (file)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-from ops.model import BlockedStatus
-
+from ops.model import ActiveStatus, BlockedStatus
 from ops.testing import Harness
 
 from charm import PolCharm
 
 
 class TestCharm(unittest.TestCase):
-    """POL Charm unit tests."""
+    """Pol Charm unit tests."""
 
     def setUp(self) -> NoReturn:
         """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
         self.harness = Harness(PolCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.config = {
+            "log_level": "INFO",
+        }
+        self.harness.update_config(self.config)
 
-    def test_on_start_without_relations(self) -> NoReturn:
-        """Test installation without any relation."""
-        self.harness.charm.on.start.emit()
+    def test_config_changed_no_relations(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
 
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+        self.harness.charm.on.config_changed.emit()
 
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
         self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
+            all(
+                relation in self.harness.charm.unit.status.message
+                for relation in ["mongodb", "kafka"]
+            )
         )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_on_start_with_relations(self) -> NoReturn:
-        """Test deployment without keystone."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "pol",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "pol",
-                            "containerPort": 80,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "ALLOW_ANONYMOUS_LOGIN": "yes",
-                        "OSMPOL_GLOBAL_LOGLEVEL": "INFO",
-                        "OSMPOL_MESSAGE_HOST": "kafka",
-                        "OSMPOL_MESSAGE_DRIVER": "kafka",
-                        "OSMPOL_MESSAGE_PORT": 9092,
-                        "OSMPOL_DATABASE_DRIVER": "mongo",
-                        "OSMPOL_DATABASE_URI": "mongodb://mongo:27017",
-                    },
-                }
-            ],
-            "kubernetesResources": {"ingressResources": []},
-        }
 
-        self.harness.charm.on.start.emit()
-
-        # Check if kafka datastore is initialized
-        self.assertIsNone(self.harness.charm.state.message_host)
-        self.assertIsNone(self.harness.charm.state.message_port)
-
-        # Check if mongodb datastore is initialized
-        self.assertIsNone(self.harness.charm.state.database_uri)
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
+        self.harness.charm.on.config_changed.emit()
+
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
+
+    def test_with_relations(
+        self,
+    ) -> NoReturn:
+        "Test with relations (internal)"
+        self.initialize_kafka_relation()
+        self.initialize_mongo_relation()
+        # Verifying status
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
 
-        # Initializing the kafka relation
+    def initialize_kafka_relation(self):
         kafka_relation_id = self.harness.add_relation("kafka", "kafka")
         self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
         self.harness.update_relation_data(
             kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
         )
 
-        # Initializing the mongo relation
+    def initialize_mongo_relation(self):
         mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
         self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
         self.harness.update_relation_data(
@@ -109,74 +94,159 @@ class TestCharm(unittest.TestCase):
             {"connection_string": "mongodb://mongo:27017"},
         )
 
-        # Checking if kafka data is stored
-        self.assertEqual(self.harness.charm.state.message_host, "kafka")
-        self.assertEqual(self.harness.charm.state.message_port, 9092)
-
-        # Checking if mongodb data is stored
-        self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
-
-        # Verifying status
-        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_on_kafka_unit_relation_changed(self) -> NoReturn:
-        """Test to see if kafka relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.message_host)
-        self.assertIsNone(self.harness.charm.state.message_port)
-
-        relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            relation_id, "kafka/0", {"host": "kafka", "port": 9092}
-        )
-
-        self.assertEqual(self.harness.charm.state.message_host, "kafka")
-        self.assertEqual(self.harness.charm.state.message_port, 9092)
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertNotIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-
-    def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
-        """Test to see if mongodb relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        self.assertIsNone(self.harness.charm.state.database_uri)
-
-        relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            relation_id, "mongodb/0", {"connection_string": "mongodb://mongo:27017"}
-        )
-
-        self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertNotIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-
 
 if __name__ == "__main__":
     unittest.main()
+
+
+# class TestCharm(unittest.TestCase):
+#     """POL Charm unit tests."""
+
+#     def setUp(self) -> NoReturn:
+#         """Test setup"""
+#         self.harness = Harness(PolCharm)
+#         self.harness.set_leader(is_leader=True)
+#         self.harness.begin()
+
+#     def test_on_start_without_relations(self) -> NoReturn:
+#         """Test installation without any relation."""
+#         self.harness.charm.on.start.emit()
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+#     def test_on_start_with_relations(self) -> NoReturn:
+#         """Test deployment without keystone."""
+#         expected_result = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": "pol",
+#                     "imageDetails": self.harness.charm.image.fetch(),
+#                     "imagePullPolicy": "Always",
+#                     "ports": [
+#                         {
+#                             "name": "pol",
+#                             "containerPort": 80,
+#                             "protocol": "TCP",
+#                         }
+#                     ],
+#                     "envConfig": {
+#                         "ALLOW_ANONYMOUS_LOGIN": "yes",
+#                         "OSMPOL_GLOBAL_LOGLEVEL": "INFO",
+#                         "OSMPOL_MESSAGE_HOST": "kafka",
+#                         "OSMPOL_MESSAGE_DRIVER": "kafka",
+#                         "OSMPOL_MESSAGE_PORT": 9092,
+#                         "OSMPOL_DATABASE_DRIVER": "mongo",
+#                         "OSMPOL_DATABASE_URI": "mongodb://mongo:27017",
+#                     },
+#                 }
+#             ],
+#             "kubernetesResources": {"ingressResources": []},
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         # Check if kafka datastore is initialized
+#         self.assertIsNone(self.harness.charm.state.message_host)
+#         self.assertIsNone(self.harness.charm.state.message_port)
+
+#         # Check if mongodb datastore is initialized
+#         self.assertIsNone(self.harness.charm.state.database_uri)
+
+#         # Initializing the kafka relation
+#         kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+#         )
+
+#         # Initializing the mongo relation
+#         mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             mongodb_relation_id,
+#             "mongodb/0",
+#             {"connection_string": "mongodb://mongo:27017"},
+#         )
+
+#         # Checking if kafka data is stored
+#         self.assertEqual(self.harness.charm.state.message_host, "kafka")
+#         self.assertEqual(self.harness.charm.state.message_port, 9092)
+
+#         # Checking if mongodb data is stored
+#         self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
+
+#         # Verifying status
+#         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         pod_spec, _ = self.harness.get_pod_spec()
+
+#         self.assertDictEqual(expected_result, pod_spec)
+
+#     def test_on_kafka_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if kafka relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         self.assertIsNone(self.harness.charm.state.message_host)
+#         self.assertIsNone(self.harness.charm.state.message_port)
+
+#         relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             relation_id, "kafka/0", {"host": "kafka", "port": 9092}
+#         )
+
+#         self.assertEqual(self.harness.charm.state.message_host, "kafka")
+#         self.assertEqual(self.harness.charm.state.message_port, 9092)
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertNotIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
+
+#     def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if mongodb relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         self.assertIsNone(self.harness.charm.state.database_uri)
+
+#         relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             relation_id, "mongodb/0", {"connection_string": "mongodb://mongo:27017"}
+#         )
+
+#         self.assertEqual(self.harness.charm.state.database_uri, "mongodb://mongo:27017")
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertNotIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
+
+
+# if __name__ == "__main__":
+#     unittest.main()
index ae324f8..1f9442e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = pol
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release pol.charm
   charmcraft build
-  unzip pol.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
-
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/"
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =
-  .tox/*
-  tests/*
index bf04eb4..2885df2 100644 (file)
@@ -22,7 +22,9 @@
 venv
 .vscode
 build
-prometheus.charm
+*.charm
 .coverage
+coverage.xml
 .stestr
 cover
+release
\ No newline at end of file
diff --git a/installers/charm/prometheus/.jujuignore b/installers/charm/prometheus/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index f3ecd3a..d71fb69 100644 (file)
@@ -28,6 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
+  cover/
   build/
-  mod/
-  lib/
+  venv
+  release/
index baa04cd..d953de3 100644 (file)
 ##
 
 options:
-  web_subpath:
+  web-subpath:
     description: Subpath for accessing Prometheus
     type: string
     default: /
-  default_target:
+  default-target:
     description: Default target to be added in Prometheus
     type: string
     default: ""
index 960904b..f021418 100644 (file)
@@ -41,3 +41,7 @@ resources:
 provides:
   prometheus:
     interface: prometheus
+storage:
+  data:
+    type: filesystem
+    location: /prometheus
diff --git a/installers/charm/prometheus/requirements-test.txt b/installers/charm/prometheus/requirements-test.txt
new file mode 100644 (file)
index 0000000..d7585f3
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index 884cf9f..f10a199 100644 (file)
@@ -19,5 +19,4 @@
 # osm-charmers@lists.launchpad.net
 ##
 
-ops
-git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index 4371d47..3d72cac 100755 (executable)
 # osm-charmers@lists.launchpad.net
 ##
 
+# pylint: disable=E0213
+
 import logging
-from typing import Dict, List, NoReturn
+from typing import Optional, NoReturn
+from ipaddress import ip_network
 
-from ops.charm import CharmBase
-from ops.framework import EventBase, StoredState
+from ops.framework import EventBase
 from ops.main import main
-from ops.model import ActiveStatus, Application, BlockedStatus, MaintenanceStatus, Unit
-from oci_image import OCIImageResource, OCIImageResourceError
 
-from pod_spec import make_pod_spec
+from opslib.osm.charm import CharmedOsmBase
+
+from opslib.osm.pod import (
+    IngressResourceV3Builder,
+    FilesV3Builder,
+    ContainerV3Builder,
+    PodSpecV3Builder,
+)
+
+
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
+
+from opslib.osm.interfaces.prometheus import PrometheusServer
+from urllib.parse import urlparse
 
 logger = logging.getLogger(__name__)
 
-PROMETHEUS_PORT = 9090
+PORT = 9090
 
 
-class RelationsMissing(Exception):
-    def __init__(self, missing_relations: List):
-        self.message = ""
-        if missing_relations and isinstance(missing_relations, list):
-            self.message += f'Waiting for {", ".join(missing_relations)} relation'
-            if "," in self.message:
-                self.message += "s"
+class ConfigModel(ModelValidator):
+    web_subpath: str
+    default_target: str
+    max_file_size: int
+    site_url: Optional[str]
+    ingress_whitelist_source_range: Optional[str]
+    tls_secret_name: Optional[str]
+    enable_web_admin_api: bool
 
+    @validator("web_subpath")
+    def validate_web_subpath(cls, v):
+        if len(v) < 1:
+            raise ValueError("web-subpath must be a non-empty string")
+        return v
 
-class RelationDefinition:
-    def __init__(self, relation_name: str, keys: List, source_type):
-        if source_type != Application and source_type != Unit:
-            raise TypeError(
-                "source_type should be ops.model.Application or ops.model.Unit"
-            )
-        self.relation_name = relation_name
-        self.keys = keys
-        self.source_type = source_type
-
-
-def check_missing_relation_data(
-    data: Dict,
-    expected_relations_data: List[RelationDefinition],
-):
-    missing_relations = []
-    for relation_data in expected_relations_data:
-        if not all(
-            f"{relation_data.relation_name}_{k}" in data for k in relation_data.keys
-        ):
-            missing_relations.append(relation_data.relation_name)
-    if missing_relations:
-        raise RelationsMissing(missing_relations)
-
-
-def get_relation_data(
-    charm: CharmBase,
-    relation_data: RelationDefinition,
-) -> Dict:
-    data = {}
-    relation = charm.model.get_relation(relation_data.relation_name)
-    if relation:
-        self_app_unit = (
-            charm.app if relation_data.source_type == Application else charm.unit
-        )
-        expected_type = relation_data.source_type
-        for app_unit in relation.data:
-            if app_unit != self_app_unit and isinstance(app_unit, expected_type):
-                if all(k in relation.data[app_unit] for k in relation_data.keys):
-                    for k in relation_data.keys:
-                        data[f"{relation_data.relation_name}_{k}"] = relation.data[
-                            app_unit
-                        ].get(k)
-                    break
-    return data
-
-
-class PrometheusCharm(CharmBase):
-    """Prometheus Charm."""
+    @validator("max_file_size")
+    def validate_max_file_size(cls, v):
+        if v < 0:
+            raise ValueError("value must be equal or greater than 0")
+        return v
 
-    state = StoredState()
+    @validator("site_url")
+    def validate_site_url(cls, v):
+        if v:
+            parsed = urlparse(v)
+            if not parsed.scheme.startswith("http"):
+                raise ValueError("value must start with http")
+        return v
 
-    def __init__(self, *args) -> NoReturn:
-        """Prometheus Charm constructor."""
-        super().__init__(*args)
+    @validator("ingress_whitelist_source_range")
+    def validate_ingress_whitelist_source_range(cls, v):
+        if v:
+            ip_network(v)
+        return v
 
-        # Internal state initialization
-        self.state.set_default(pod_spec=None)
 
-        self.port = PROMETHEUS_PORT
-        self.image = OCIImageResource(self, "image")
+class PrometheusCharm(CharmedOsmBase):
 
-        # Registering regular events
-        self.framework.observe(self.on.start, self.configure_pod)
-        self.framework.observe(self.on.config_changed, self.configure_pod)
+    """Prometheus Charm."""
+
+    def __init__(self, *args) -> NoReturn:
+        """Prometheus Charm constructor."""
+        super().__init__(*args, oci_image="image")
 
         # Registering provided relation events
+        self.prometheus = PrometheusServer(self, "prometheus")
         self.framework.observe(
-            self.on.prometheus_relation_joined, self._publish_prometheus_info
+            self.on.prometheus_relation_joined,  # pylint: disable=E1101
+            self._publish_prometheus_info,
         )
 
     def _publish_prometheus_info(self, event: EventBase) -> NoReturn:
-        """Publishes Prometheus information.
-
-        Args:
-            event (EventBase): Prometheus relation event.
-        """
-        if self.unit.is_leader():
-            rel_data = {
-                "host": self.model.app.name,
-                "port": str(PROMETHEUS_PORT),
+        self.prometheus.publish_info(self.app.name, PORT)
+
+    def _build_files(self, config: ConfigModel):
+        files_builder = FilesV3Builder()
+        files_builder.add_file(
+            "prometheus.yml",
+            (
+                "global:\n"
+                "  scrape_interval: 15s\n"
+                "  evaluation_interval: 15s\n"
+                "alerting:\n"
+                "  alertmanagers:\n"
+                "    - static_configs:\n"
+                "        - targets:\n"
+                "rule_files:\n"
+                "scrape_configs:\n"
+                "  - job_name: 'prometheus'\n"
+                "    static_configs:\n"
+                f"      - targets: [{config.default_target}]\n"
+            ),
+        )
+        return files_builder.build()
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=PORT)
+        container_builder.add_http_readiness_probe(
+            "/-/ready",
+            PORT,
+            initial_delay_seconds=10,
+            timeout_seconds=30,
+        )
+        container_builder.add_http_liveness_probe(
+            "/-/healthy",
+            PORT,
+            initial_delay_seconds=30,
+            period_seconds=30,
+        )
+        command = [
+            "/bin/prometheus",
+            "--config.file=/etc/prometheus/prometheus.yml",
+            "--storage.tsdb.path=/prometheus",
+            "--web.console.libraries=/usr/share/prometheus/console_libraries",
+            "--web.console.templates=/usr/share/prometheus/consoles",
+            f"--web.route-prefix={config.web_subpath}",
+            f"--web.external-url=http://localhost:{PORT}{config.web_subpath}",
+        ]
+        if config.enable_web_admin_api:
+            command.append("--web.enable-admin-api")
+        container_builder.add_command(command)
+        container_builder.add_volume_config(
+            "config", "/etc/prometheus", self._build_files(config)
+        )
+        container = container_builder.build()
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        # Add ingress resources to pod spec if site url exists
+        if config.site_url:
+            parsed = urlparse(config.site_url)
+            annotations = {
+                "nginx.ingress.kubernetes.io/proxy-body-size": "{}".format(
+                    str(config.max_file_size) + "m"
+                    if config.max_file_size > 0
+                    else config.max_file_size
+                ),
             }
-            for k, v in rel_data.items():
-                event.relation.data[self.app][k] = v
-
-    @property
-    def relations_requirements(self):
-        return []
-
-    def get_relation_state(self):
-        relation_state = {}
-        for relation_requirements in self.relations_requirements:
-            data = get_relation_data(self, relation_requirements)
-            relation_state = {**relation_state, **data}
-        check_missing_relation_data(relation_state, self.relations_requirements)
-        return relation_state
-
-    def configure_pod(self, _=None) -> NoReturn:
-        """Assemble the pod spec and apply it, if possible.
-
-        Args:
-            event (EventBase): Hook or Relation event that started the
-                               function.
-        """
-        if not self.unit.is_leader():
-            self.unit.status = ActiveStatus("ready")
-            return
-
-        relation_state = None
-        try:
-            relation_state = self.get_relation_state()
-        except RelationsMissing as exc:
-            logger.exception("Relation missing error")
-            self.unit.status = BlockedStatus(exc.message)
-            return
-
-        self.unit.status = MaintenanceStatus("Assembling pod spec")
-
-        # Fetch image information
-        try:
-            self.unit.status = MaintenanceStatus("Fetching image information")
-            image_info = self.image.fetch()
-        except OCIImageResourceError:
-            self.unit.status = BlockedStatus("Error fetching image information")
-            return
-
-        try:
-            pod_spec = make_pod_spec(
-                image_info,
-                self.model.config,
-                relation_state,
-                self.model.app.name,
-                self.port,
+            ingress_resource_builder = IngressResourceV3Builder(
+                f"{self.app.name}-ingress", annotations
             )
-        except ValueError as exc:
-            logger.exception("Config/Relation data validation error")
-            self.unit.status = BlockedStatus(str(exc))
-            return
-
-        if self.state.pod_spec != pod_spec:
-            self.model.pod.set_spec(pod_spec)
-            self.state.pod_spec = pod_spec
 
-        self.unit.status = ActiveStatus("ready")
+            if config.ingress_whitelist_source_range:
+                annotations[
+                    "nginx.ingress.kubernetes.io/whitelist-source-range"
+                ] = config.ingress_whitelist_source_range
+
+            if parsed.scheme == "https":
+                ingress_resource_builder.add_tls(
+                    [parsed.hostname], config.tls_secret_name
+                )
+            else:
+                annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
+
+            ingress_resource_builder.add_rule(parsed.hostname, self.app.name, PORT)
+            ingress_resource = ingress_resource_builder.build()
+            pod_spec_builder.add_ingress_resource(ingress_resource)
+        return pod_spec_builder.build()
 
 
 if __name__ == "__main__":
index 87d7bc5..b1848bd 100644 (file)
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-
-from ops.model import BlockedStatus
+from ops.model import ActiveStatus
 from ops.testing import Harness
 
 from charm import PrometheusCharm
@@ -34,445 +34,63 @@ class TestCharm(unittest.TestCase):
 
     def setUp(self) -> NoReturn:
         """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
         self.harness = Harness(PrometheusCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
-
-    def test_ingress_resources_without_http(self) -> NoReturn:
-        """Test ingress resources without HTTP."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "prometheus",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "prometheus",
-                            "containerPort": 9090,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "config",
-                            "mountPath": "/etc/prometheus",
-                            "files": [
-                                {
-                                    "path": "prometheus.yml",
-                                    "content": (
-                                        "global:\n"
-                                        "  scrape_interval: 15s\n"
-                                        "  evaluation_interval: 15s\n"
-                                        "alerting:\n"
-                                        "  alertmanagers:\n"
-                                        "    - static_configs:\n"
-                                        "        - targets:\n"
-                                        "rule_files:\n"
-                                        "scrape_configs:\n"
-                                        "  - job_name: 'prometheus'\n"
-                                        "    static_configs:\n"
-                                        "      - targets: [{}]\n".format("")
-                                    ),
-                                }
-                            ],
-                        }
-                    ],
-                    "command": [
-                        "/bin/prometheus",
-                        "--config.file=/etc/prometheus/prometheus.yml",
-                        "--storage.tsdb.path=/prometheus",
-                        "--web.console.libraries=/usr/share/prometheus/console_libraries",
-                        "--web.console.templates=/usr/share/prometheus/consoles",
-                        "--web.route-prefix={}".format("/"),
-                        "--web.external-url=http://localhost:{}{}".format(9090, "/"),
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/-/ready",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 10,
-                            "timeoutSeconds": 30,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/-/healthy",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 30,
-                            "periodSeconds": 30,
-                        },
-                    },
-                }
-            ],
-            "kubernetesResources": {"ingressResources": []},
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Verifying status
-        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_ingress_resources_with_http(self) -> NoReturn:
-        """Test ingress resources with HTTP."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "prometheus",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "prometheus",
-                            "containerPort": 9090,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "config",
-                            "mountPath": "/etc/prometheus",
-                            "files": [
-                                {
-                                    "path": "prometheus.yml",
-                                    "content": (
-                                        "global:\n"
-                                        "  scrape_interval: 15s\n"
-                                        "  evaluation_interval: 15s\n"
-                                        "alerting:\n"
-                                        "  alertmanagers:\n"
-                                        "    - static_configs:\n"
-                                        "        - targets:\n"
-                                        "rule_files:\n"
-                                        "scrape_configs:\n"
-                                        "  - job_name: 'prometheus'\n"
-                                        "    static_configs:\n"
-                                        "      - targets: [{}]\n".format("")
-                                    ),
-                                }
-                            ],
-                        }
-                    ],
-                    "command": [
-                        "/bin/prometheus",
-                        "--config.file=/etc/prometheus/prometheus.yml",
-                        "--storage.tsdb.path=/prometheus",
-                        "--web.console.libraries=/usr/share/prometheus/console_libraries",
-                        "--web.console.templates=/usr/share/prometheus/consoles",
-                        "--web.route-prefix={}".format("/"),
-                        "--web.external-url=http://localhost:{}{}".format(9090, "/"),
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/-/ready",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 10,
-                            "timeoutSeconds": 30,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/-/healthy",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 30,
-                            "periodSeconds": 30,
-                        },
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "prometheus-ingress",
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-                            "nginx.ingress.kubernetes.io/ssl-redirect": "false",
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": "prometheus",
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": "prometheus",
-                                                    "servicePort": 9090,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ]
-                        },
-                    }
-                ],
-            },
+        self.config = {
+            "web-subpath": "/",
+            "default-target": "",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "",
+            "tls_secret_name": "",
+            "site_url": "https://prometheus.192.168.100.100.xip.io",
+            "enable_web_admin_api": False,
         }
+        self.harness.update_config(self.config)
 
-        self.harness.charm.on.start.emit()
-
-        self.harness.update_config({"site_url": "http://prometheus"})
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_ingress_resources_with_https(self) -> NoReturn:
-        """Test ingress resources with HTTPS."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "prometheus",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "prometheus",
-                            "containerPort": 9090,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "config",
-                            "mountPath": "/etc/prometheus",
-                            "files": [
-                                {
-                                    "path": "prometheus.yml",
-                                    "content": (
-                                        "global:\n"
-                                        "  scrape_interval: 15s\n"
-                                        "  evaluation_interval: 15s\n"
-                                        "alerting:\n"
-                                        "  alertmanagers:\n"
-                                        "    - static_configs:\n"
-                                        "        - targets:\n"
-                                        "rule_files:\n"
-                                        "scrape_configs:\n"
-                                        "  - job_name: 'prometheus'\n"
-                                        "    static_configs:\n"
-                                        "      - targets: [{}]\n".format("")
-                                    ),
-                                }
-                            ],
-                        }
-                    ],
-                    "command": [
-                        "/bin/prometheus",
-                        "--config.file=/etc/prometheus/prometheus.yml",
-                        "--storage.tsdb.path=/prometheus",
-                        "--web.console.libraries=/usr/share/prometheus/console_libraries",
-                        "--web.console.templates=/usr/share/prometheus/consoles",
-                        "--web.route-prefix={}".format("/"),
-                        "--web.external-url=http://localhost:{}{}".format(9090, "/"),
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/-/ready",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 10,
-                            "timeoutSeconds": 30,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/-/healthy",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 30,
-                            "periodSeconds": 30,
-                        },
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "prometheus-ingress",
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": "prometheus",
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": "prometheus",
-                                                    "servicePort": 9090,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ],
-                            "tls": [
-                                {"hosts": ["prometheus"], "secretName": "prometheus"}
-                            ],
-                        },
-                    }
-                ],
-            },
-        }
+    def test_config_changed(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
 
-        self.harness.charm.on.start.emit()
+        self.harness.charm.on.config_changed.emit()
 
-        self.harness.update_config(
-            {"site_url": "https://prometheus", "tls_secret_name": "prometheus"}
-        )
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
 
-        pod_spec, _ = self.harness.get_pod_spec()
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
+        self.harness.charm.on.config_changed.emit()
 
-        self.assertDictEqual(expected_result, pod_spec)
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
 
-    def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
-        """Test ingress resources with HTTPS and ingress whitelist."""
+    def test_publish_prometheus_info(
+        self,
+    ) -> NoReturn:
+        """Test to see if prometheus relation is updated."""
         expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "prometheus",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "prometheus",
-                            "containerPort": 9090,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {},
-                    "volumeConfig": [
-                        {
-                            "name": "config",
-                            "mountPath": "/etc/prometheus",
-                            "files": [
-                                {
-                                    "path": "prometheus.yml",
-                                    "content": (
-                                        "global:\n"
-                                        "  scrape_interval: 15s\n"
-                                        "  evaluation_interval: 15s\n"
-                                        "alerting:\n"
-                                        "  alertmanagers:\n"
-                                        "    - static_configs:\n"
-                                        "        - targets:\n"
-                                        "rule_files:\n"
-                                        "scrape_configs:\n"
-                                        "  - job_name: 'prometheus'\n"
-                                        "    static_configs:\n"
-                                        "      - targets: [{}]\n".format("")
-                                    ),
-                                }
-                            ],
-                        }
-                    ],
-                    "command": [
-                        "/bin/prometheus",
-                        "--config.file=/etc/prometheus/prometheus.yml",
-                        "--storage.tsdb.path=/prometheus",
-                        "--web.console.libraries=/usr/share/prometheus/console_libraries",
-                        "--web.console.templates=/usr/share/prometheus/consoles",
-                        "--web.route-prefix={}".format("/"),
-                        "--web.external-url=http://localhost:{}{}".format(9090, "/"),
-                    ],
-                    "kubernetes": {
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/-/ready",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 10,
-                            "timeoutSeconds": 30,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/-/healthy",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 30,
-                            "periodSeconds": 30,
-                        },
-                    },
-                }
-            ],
-            "kubernetesResources": {
-                "ingressResources": [
-                    {
-                        "name": "prometheus-ingress",
-                        "annotations": {
-                            "nginx.ingress.kubernetes.io/proxy-body-size": "0",
-                            "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
-                        },
-                        "spec": {
-                            "rules": [
-                                {
-                                    "host": "prometheus",
-                                    "http": {
-                                        "paths": [
-                                            {
-                                                "path": "/",
-                                                "backend": {
-                                                    "serviceName": "prometheus",
-                                                    "servicePort": 9090,
-                                                },
-                                            }
-                                        ]
-                                    },
-                                }
-                            ],
-                            "tls": [
-                                {"hosts": ["prometheus"], "secretName": "prometheus"}
-                            ],
-                        },
-                    }
-                ],
-            },
+            "hostname": "prometheus",
+            "port": "9090",
         }
 
-        self.harness.charm.on.start.emit()
-
-        self.harness.update_config(
-            {
-                "site_url": "https://prometheus",
-                "tls_secret_name": "prometheus",
-                "ingress_whitelist_source_range": "0.0.0.0/0",
-            }
-        )
-
-        pod_spec, _ = self.harness.get_pod_spec()
+        relation_id = self.harness.add_relation("prometheus", "mon")
+        self.harness.add_relation_unit(relation_id, "mon/0")
+        relation_data = self.harness.get_relation_data(relation_id, "prometheus")
 
-        self.assertDictEqual(expected_result, pod_spec)
+        self.assertDictEqual(expected_result, relation_data)
 
-    def test_publish_prometheus_info(self) -> NoReturn:
+    def test_publish_prometheus_info_non_leader(
+        self,
+    ) -> NoReturn:
         """Test to see if prometheus relation is updated."""
-        expected_result = {
-            "host": "prometheus",
-            "port": "9090",
-        }
-
-        self.harness.charm.on.start.emit()
+        expected_result = {}
 
+        self.harness.set_leader(is_leader=False)
         relation_id = self.harness.add_relation("prometheus", "mon")
         self.harness.add_relation_unit(relation_id, "mon/0")
         relation_data = self.harness.get_relation_data(relation_id, "prometheus")
index 22f6bf5..1adbae6 100644 (file)
@@ -286,7 +286,6 @@ class TestPodSpec(unittest.TestCase):
         ]
 
         pod_envconfig = pod_spec._make_pod_files(config)
-        print(expected_result, pod_envconfig)
         self.assertListEqual(expected_result, pod_envconfig)
 
     def test_make_readiness_probe(self) -> NoReturn:
index 5491c07..1f9442e 100644 (file)
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = prometheus
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release prometheus.charm
   charmcraft build
-  unzip prometheus.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
-
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/"
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =
-  .tox/*
-  tests/*
index 8790c4d..2a6d62f 100755 (executable)
@@ -29,23 +29,33 @@ for charm in $charms; do
     echo "$charm charm released!"
 done
 
-# New charms (with no resources)
-charms="pla keystone"
+# New charms (with resources)
+charms="ng-ui nbi pla keystone ro lcm mon pol"
 for charm in $charms; do
     echo "Releasing $charm charm"
-    cs_revision=`charm push $charm/$charm.charm cs:~charmed-osm/$charm | tail -n +1 | head -1 | awk '{print $2}'`
-    charm release --channel $channel $cs_revision
+    cs_revision=$(charm push $charm/$charm.charm cs:~charmed-osm/$charm | tail -n +1 | head -1 | awk '{print $2}')
+    resource_revision=$(charm attach $cs_revision image=external::opensourcemano/$charm:$tag | tail -n +1 | sed 's/[^0-9]*//g')
+    image_revision_num=$(echo $resource_revision  | awk '{print $NF}')
+    resources_string="--resource image-$image_revision_num"
+    charm release --channel $channel $cs_revision $resources_string
     echo "$charm charm released!"
 done
 
-# New charms (with resources)
-charms="ng-ui nbi"
+# New charms (with resources) (with oci-images from ubuntu)
+charms="prometheus grafana"
 for charm in $charms; do
     echo "Releasing $charm charm"
     cs_revision=$(charm push $charm/$charm.charm cs:~charmed-osm/$charm | tail -n +1 | head -1 | awk '{print $2}')
-    resource_revision=$(charm attach $cs_revision image=external::opensourcemano/$charm:$tag | tail -n +1 | sed 's/[^0-9]*//g')
+    resource_revision=$(charm attach $cs_revision image=external::ubuntu/$charm:latest | tail -n +1 | sed 's/[^0-9]*//g')
     image_revision_num=$(echo $resource_revision  | awk '{print $NF}')
     resources_string="--resource image-$image_revision_num"
     charm release --channel $channel $cs_revision $resources_string
     echo "$charm charm released!"
 done
+
+# 3. Grant permissions
+all_charms="ng-ui nbi pla keystone ro lcm mon pol grafana prometheus"
+for charm in $all_charms; do
+    echo "Granting permission for $charm charm"
+    charm grant cs:~charmed-osm/$charm --channel $channel --acl read everyone
+done
\ No newline at end of file
index aa3848a..2885df2 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -22,7 +22,9 @@
 venv
 .vscode
 build
-ro.charm
+*.charm
 .coverage
+coverage.xml
 .stestr
 cover
+release
\ No newline at end of file
diff --git a/installers/charm/ro/.jujuignore b/installers/charm/ro/.jujuignore
new file mode 100644 (file)
index 0000000..bf04eb4
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus.charm
+.coverage
+.stestr
+cover
index c20ac8d..d71fb69 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -28,6 +28,7 @@ yaml-files:
   - ".yamllint"
 ignore: |
   .tox
+  cover/
   build/
-  mod/
-  lib/
+  venv
+  release/
index f29f4bc..eea0e9e 100644 (file)
@@ -40,11 +40,14 @@ resources:
     upstream-source: "opensourcemano/ro:8"
 provides:
   ro:
-    interface: osm-ro
+    interface: http
 requires:
   kafka:
     interface: kafka
+    limit: 1
   mongodb:
     interface: mongodb
+    limit: 1
   mysql:
     interface: mysql
+    limit: 1
diff --git a/installers/charm/ro/requirements-test.txt b/installers/charm/ro/requirements-test.txt
new file mode 100644 (file)
index 0000000..1da01c8
--- /dev/null
@@ -0,0 +1,30 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+-r requirements.txt
+coverage
+stestr
+mock
+black
+yamllint
+flake8
+safety
+requests-mock
+asynctest
+nose2
\ No newline at end of file
index a26601f..f10a199 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
@@ -19,5 +19,4 @@
 # osm-charmers@lists.launchpad.net
 ##
 
-ops
-git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
+git+https://github.com/davigar15/ops-lib-charmed-osm/@e7f26cd29b322e175a23cadbe4546b7f2bbf111c
\ No newline at end of file
index 8e6d576..ae92d98 100755 (executable)
@@ -1,5 +1,5 @@
 #!/usr/bin/env python3
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # osm-charmers@lists.launchpad.net
 ##
 
+# pylint: disable=E0213
+
 import logging
-from typing import Dict, List, NoReturn
+from typing import NoReturn
 
-from ops.charm import CharmBase
-from ops.framework import EventBase, StoredState
 from ops.main import main
-from ops.model import ActiveStatus, Application, BlockedStatus, MaintenanceStatus, Unit
-from oci_image import OCIImageResource, OCIImageResourceError
 
-from pod_spec import make_pod_spec
+from opslib.osm.charm import CharmedOsmBase, RelationsMissing
 
-logger = logging.getLogger(__name__)
+from opslib.osm.pod import (
+    ContainerV3Builder,
+    PodSpecV3Builder,
+)
 
-RO_PORT = 9090
 
+from opslib.osm.validator import (
+    ModelValidator,
+    validator,
+)
 
-class RelationsMissing(Exception):
-    def __init__(self, missing_relations: List):
-        self.message = ""
-        if missing_relations and isinstance(missing_relations, list):
-            self.message += f'Waiting for {", ".join(missing_relations)} relation'
-            if "," in self.message:
-                self.message += "s"
+from opslib.osm.interfaces.kafka import KafkaClient
+from opslib.osm.interfaces.mysql import MysqlClient
+from opslib.osm.interfaces.mongo import MongoClient
 
 
-class RelationDefinition:
-    def __init__(self, relation_name: str, keys: List, source_type):
-        if source_type != Application and source_type != Unit:
-            raise TypeError(
-                "source_type should be ops.model.Application or ops.model.Unit"
-            )
-        self.relation_name = relation_name
-        self.keys = keys
-        self.source_type = source_type
-
-
-def check_missing_relation_data(
-    data: Dict,
-    expected_relations_data: List[RelationDefinition],
-):
-    missing_relations = []
-    for relation_data in expected_relations_data:
-        if not all(
-            f"{relation_data.relation_name}_{k}" in data for k in relation_data.keys
-        ):
-            missing_relations.append(relation_data.relation_name)
-    if missing_relations:
-        raise RelationsMissing(missing_relations)
-
-
-def get_relation_data(
-    charm: CharmBase,
-    relation_data: RelationDefinition,
-) -> Dict:
-    data = {}
-    relation = charm.model.get_relation(relation_data.relation_name)
-    if relation:
-        self_app_unit = (
-            charm.app if relation_data.source_type == Application else charm.unit
-        )
-        expected_type = relation_data.source_type
-        for app_unit in relation.data:
-            if app_unit != self_app_unit and isinstance(app_unit, expected_type):
-                if all(k in relation.data[app_unit] for k in relation_data.keys):
-                    for k in relation_data.keys:
-                        data[f"{relation_data.relation_name}_{k}"] = relation.data[
-                            app_unit
-                        ].get(k)
-                    break
-    return data
+logger = logging.getLogger(__name__)
 
+PORT = 9090
 
-class RoCharm(CharmBase):
-    """RO Charm."""
 
-    state = StoredState()
+class ConfigModel(ModelValidator):
+    enable_ng_ro: bool
+    database_commonkey: str
+    log_level: str
+    vim_database: str
+    ro_database: str
+    openmano_tenant: str
 
-    def __init__(self, *args) -> NoReturn:
-        """RO Charm constructor."""
-        super().__init__(*args)
+    @validator("log_level")
+    def validate_log_level(cls, v):
+        if v not in {"INFO", "DEBUG"}:
+            raise ValueError("value must be INFO or DEBUG")
+        return v
 
-        # Internal state initialization
-        self.state.set_default(pod_spec=None)
 
-        self.port = RO_PORT
-        self.image = OCIImageResource(self, "image")
+class RoCharm(CharmedOsmBase):
+    """GrafanaCharm Charm."""
 
-        # Registering regular events
-        self.framework.observe(self.on.start, self.configure_pod)
-        self.framework.observe(self.on.config_changed, self.configure_pod)
+    def __init__(self, *args) -> NoReturn:
+        """Prometheus Charm constructor."""
+        super().__init__(*args, oci_image="image")
 
-        # Registering required relation events
-        self.framework.observe(self.on.kafka_relation_changed, self.configure_pod)
-        self.framework.observe(self.on.mongodb_relation_changed, self.configure_pod)
-        self.framework.observe(self.on.mysql_relation_changed, self.configure_pod)
+        self.kafka_client = KafkaClient(self, "kafka")
+        self.framework.observe(self.on["kafka"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["kafka"].relation_broken, self.configure_pod)
 
-        # Registering required relation departed events
-        self.framework.observe(self.on.kafka_relation_departed, self.configure_pod)
-        self.framework.observe(self.on.mongodb_relation_departed, self.configure_pod)
-        self.framework.observe(self.on.mysql_relation_departed, self.configure_pod)
+        self.mysql_client = MysqlClient(self, "mysql")
+        self.framework.observe(self.on["mysql"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["mysql"].relation_broken, self.configure_pod)
 
-        # Registering required relation broken events
-        self.framework.observe(self.on.kafka_relation_broken, self.configure_pod)
-        self.framework.observe(self.on.mongodb_relation_broken, self.configure_pod)
-        self.framework.observe(self.on.mysql_relation_broken, self.configure_pod)
+        self.mongodb_client = MongoClient(self, "mongodb")
+        self.framework.observe(self.on["mongodb"].relation_changed, self.configure_pod)
+        self.framework.observe(self.on["mongodb"].relation_broken, self.configure_pod)
 
-        # Registering provided relation events
-        self.framework.observe(self.on.ro_relation_joined, self._publish_ro_info)
+        self.framework.observe(self.on["ro"].relation_joined, self._publish_ro_info)
 
-    def _publish_ro_info(self, event: EventBase) -> NoReturn:
+    def _publish_ro_info(self, event):
         """Publishes RO information.
 
         Args:
@@ -138,80 +95,91 @@ class RoCharm(CharmBase):
         if self.unit.is_leader():
             rel_data = {
                 "host": self.model.app.name,
-                "port": str(RO_PORT),
+                "port": str(PORT),
             }
             for k, v in rel_data.items():
                 event.relation.data[self.app][k] = v
 
-    @property
-    def relations_requirements(self):
-        if self.model.config["enable_ng_ro"]:
-            return [
-                RelationDefinition("kafka", ["host", "port"], Unit),
-                RelationDefinition("mongodb", ["connection_string"], Unit),
-            ]
-        else:
-            return [
-                RelationDefinition(
-                    "mysql", ["host", "port", "user", "password", "root_password"], Unit
-                )
-            ]
-
-    def get_relation_state(self):
-        relation_state = {}
-        for relation_requirements in self.relations_requirements:
-            data = get_relation_data(self, relation_requirements)
-            relation_state = {**relation_state, **data}
-        check_missing_relation_data(relation_state, self.relations_requirements)
-        return relation_state
-
-    def configure_pod(self, _=None) -> NoReturn:
-        """Assemble the pod spec and apply it, if possible.
+    def _check_missing_dependencies(self, config: ConfigModel):
+        missing_relations = []
 
-        Args:
-            event (EventBase): Hook or Relation event that started the
-                               function.
-        """
-        if not self.unit.is_leader():
-            self.unit.status = ActiveStatus("ready")
-            return
-
-        relation_state = None
-        try:
-            relation_state = self.get_relation_state()
-        except RelationsMissing as exc:
-            logger.exception("Relation missing error")
-            self.unit.status = BlockedStatus(exc.message)
-            return
-
-        self.unit.status = MaintenanceStatus("Assembling pod spec")
-
-        # Fetch image information
-        try:
-            self.unit.status = MaintenanceStatus("Fetching image information")
-            image_info = self.image.fetch()
-        except OCIImageResourceError:
-            self.unit.status = BlockedStatus("Error fetching image information")
-            return
-
-        try:
-            pod_spec = make_pod_spec(
-                image_info,
-                self.model.config,
-                relation_state,
-                self.model.app.name,
-                self.port,
+        if config.enable_ng_ro:
+            if self.kafka_client.is_missing_data_in_unit():
+                missing_relations.append("kafka")
+            if self.mongodb_client.is_missing_data_in_unit():
+                missing_relations.append("mongodb")
+        else:
+            if self.mysql_client.is_missing_data_in_unit():
+                missing_relations.append("mysql")
+        if missing_relations:
+            raise RelationsMissing(missing_relations)
+
+    def build_pod_spec(self, image_info):
+        # Validate config
+        config = ConfigModel(**dict(self.config))
+        # Check relations
+        self._check_missing_dependencies(config)
+        # Create Builder for the PodSpec
+        pod_spec_builder = PodSpecV3Builder()
+        # Build Container
+        container_builder = ContainerV3Builder(self.app.name, image_info)
+        container_builder.add_port(name=self.app.name, port=PORT)
+        container_builder.add_http_readiness_probe(
+            "/ro/" if config.enable_ng_ro else "/openmano/tenants",
+            PORT,
+            initial_delay_seconds=10,
+            period_seconds=10,
+            timeout_seconds=5,
+            failure_threshold=3,
+        )
+        container_builder.add_http_liveness_probe(
+            "/ro/" if config.enable_ng_ro else "/openmano/tenants",
+            PORT,
+            initial_delay_seconds=600,
+            period_seconds=10,
+            timeout_seconds=5,
+            failure_threshold=3,
+        )
+        container_builder.add_envs(
+            {
+                "OSMRO_LOG_LEVEL": config.log_level,
+            }
+        )
+        if config.enable_ng_ro:
+            container_builder.add_envs(
+                {
+                    "OSMRO_MESSAGE_DRIVER": "kafka",
+                    "OSMRO_MESSAGE_HOST": self.kafka_client.host,
+                    "OSMRO_MESSAGE_PORT": self.kafka_client.port,
+                    # MongoDB configuration
+                    "OSMRO_DATABASE_DRIVER": "mongo",
+                    "OSMRO_DATABASE_URI": self.mongodb_client.connection_string,
+                    "OSMRO_DATABASE_COMMONKEY": config.database_commonkey,
+                }
             )
-        except ValueError as exc:
-            logger.exception("Config/Relation data validation error")
-            self.unit.status = BlockedStatus(str(exc))
-            return
-
-        if self.state.pod_spec != pod_spec:
-            self.model.pod.set_spec(pod_spec)
-            self.state.pod_spec = pod_spec
 
-        self.unit.status = ActiveStatus("ready")
+        else:
+            container_builder.add_envs(
+                {
+                    "RO_DB_HOST": self.mysql_client.host,
+                    "RO_DB_OVIM_HOST": self.mysql_client.host,
+                    "RO_DB_PORT": self.mysql_client.port,
+                    "RO_DB_OVIM_PORT": self.mysql_client.port,
+                    "RO_DB_USER": self.mysql_client.user,
+                    "RO_DB_OVIM_USER": self.mysql_client.user,
+                    "RO_DB_PASSWORD": self.mysql_client.password,
+                    "RO_DB_OVIM_PASSWORD": self.mysql_client.password,
+                    "RO_DB_ROOT_PASSWORD": self.mysql_client.root_password,
+                    "RO_DB_OVIM_ROOT_PASSWORD": self.mysql_client.root_password,
+                    "RO_DB_NAME": config.ro_database,
+                    "RO_DB_OVIM_NAME": config.vim_database,
+                    "OPENMANO_TENANT": config.openmano_tenant,
+                }
+            )
+        container = container_builder.build()
+        # Add container to pod spec
+        pod_spec_builder.add_container(container)
+        return pod_spec_builder.build()
 
 
 if __name__ == "__main__":
index 6c91baf..1beba17 100644 (file)
@@ -45,8 +45,9 @@ def _validate_data(
         )
         if values.get("enable_ng_ro", True)
         else True,
-        "log_level": lambda value, _: isinstance(value, str)
-        and value in ("INFO", "DEBUG"),
+        "log_level": lambda value, _: (
+            isinstance(value, str) and value in ("INFO", "DEBUG")
+        ),
         "vim_database": lambda value, values: (
             isinstance(value, str) and len(value) > 0
         )
index 0aa9b7d..4f14aff 100644 (file)
 # osm-charmers@lists.launchpad.net
 ##
 
+import sys
 from typing import NoReturn
 import unittest
-from ops.model import BlockedStatus
-
+from ops.model import ActiveStatus, BlockedStatus
 from ops.testing import Harness
 
 from charm import RoCharm
 
 
 class TestCharm(unittest.TestCase):
-    """RO Charm unit tests."""
+    """Prometheus Charm unit tests."""
 
     def setUp(self) -> NoReturn:
         """Test setup"""
+        self.image_info = sys.modules["oci_image"].OCIImageResource().fetch()
         self.harness = Harness(RoCharm)
         self.harness.set_leader(is_leader=True)
         self.harness.begin()
+        self.config = {
+            "enable_ng_ro": True,
+            "database_commonkey": "commonkey",
+            "log_level": "INFO",
+            "vim_database": "db_name",
+            "ro_database": "ro_db_name",
+            "openmano_tenant": "mano",
+        }
+        self.harness.update_config(self.config)
 
-    def test_on_start_without_relations_ng_ro(self) -> NoReturn:
-        """Test installation without any relation."""
-        self.harness.charm.on.start.emit()
+    def test_config_changed_no_relations(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
 
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+        self.harness.charm.on.config_changed.emit()
 
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
         self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
+            all(
+                relation in self.harness.charm.unit.status.message
+                for relation in ["mongodb", "kafka"]
+            )
         )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
 
-    def test_on_start_without_relations_no_ng_ro(self) -> NoReturn:
-        """Test installation without any relation."""
+        # Disable ng-ro
         self.harness.update_config({"enable_ng_ro": False})
-
-        self.harness.charm.on.start.emit()
-
-        # Verifying status
         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
         self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
+            all(
+                relation in self.harness.charm.unit.status.message
+                for relation in ["mysql"]
+            )
         )
-        self.assertIn("mysql", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-
-    def test_on_start_with_relations_ng_ro(self) -> NoReturn:
-        """Test deployment with NG-RO."""
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "ro",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "ro",
-                            "containerPort": 9090,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "OSMRO_LOG_LEVEL": "INFO",
-                        "OSMRO_MESSAGE_DRIVER": "kafka",
-                        "OSMRO_MESSAGE_HOST": "kafka",
-                        "OSMRO_MESSAGE_PORT": "9090",
-                        "OSMRO_DATABASE_DRIVER": "mongo",
-                        "OSMRO_DATABASE_URI": "mongodb://mongo",
-                        "OSMRO_DATABASE_COMMONKEY": "osm",
-                    },
-                    "kubernetes": {
-                        "startupProbe": {
-                            "exec": {"command": ["/usr/bin/pgrep", "python3"]},
-                            "initialDelaySeconds": 60,
-                            "timeoutSeconds": 5,
-                        },
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/openmano/tenants",
-                                "port": 9090,
-                            },
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/openmano/tenants",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 600,
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                    },
-                }
-            ],
-            "kubernetesResources": {"ingressResources": []},
-        }
 
-        self.harness.charm.on.start.emit()
+    def test_config_changed_non_leader(
+        self,
+    ) -> NoReturn:
+        """Test ingress resources without HTTP."""
+        self.harness.set_leader(is_leader=False)
+        self.harness.charm.on.config_changed.emit()
 
-        # Initializing the kafka relation
-        relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(relation_id, "kafka/0")
-        self.harness.update_relation_data(
-            relation_id,
-            "kafka/0",
-            {
-                "host": "kafka",
-                "port": "9090",
-            },
-        )
+        # Assertions
+        self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)
 
-        # Initializing the mongodb relation
-        relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(relation_id, "mongodb/0")
-        self.harness.update_relation_data(
-            relation_id,
-            "mongodb/0",
-            {
-                "connection_string": "mongodb://mongo",
-            },
-        )
+    def test_with_relations_ng(
+        self,
+    ) -> NoReturn:
+        "Test with relations (ng-ro)"
 
-        # Verifying status
-        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_on_start_with_relations_no_ng_ro(self) -> NoReturn:
-        """Test deployment with old RO."""
-        self.harness.update_config({"enable_ng_ro": False})
-
-        expected_result = {
-            "version": 3,
-            "containers": [
-                {
-                    "name": "ro",
-                    "imageDetails": self.harness.charm.image.fetch(),
-                    "imagePullPolicy": "Always",
-                    "ports": [
-                        {
-                            "name": "ro",
-                            "containerPort": 9090,
-                            "protocol": "TCP",
-                        }
-                    ],
-                    "envConfig": {
-                        "OSMRO_LOG_LEVEL": "INFO",
-                        "RO_DB_HOST": "mysql",
-                        "RO_DB_OVIM_HOST": "mysql",
-                        "RO_DB_PORT": 3306,
-                        "RO_DB_OVIM_PORT": 3306,
-                        "RO_DB_USER": "mano",
-                        "RO_DB_OVIM_USER": "mano",
-                        "RO_DB_PASSWORD": "manopw",
-                        "RO_DB_OVIM_PASSWORD": "manopw",
-                        "RO_DB_ROOT_PASSWORD": "rootmanopw",
-                        "RO_DB_OVIM_ROOT_PASSWORD": "rootmanopw",
-                        "RO_DB_NAME": "mano_db",
-                        "RO_DB_OVIM_NAME": "mano_vim_db",
-                        "OPENMANO_TENANT": "osm",
-                    },
-                    "kubernetes": {
-                        "startupProbe": {
-                            "exec": {"command": ["/usr/bin/pgrep", "python3"]},
-                            "initialDelaySeconds": 60,
-                            "timeoutSeconds": 5,
-                        },
-                        "readinessProbe": {
-                            "httpGet": {
-                                "path": "/openmano/tenants",
-                                "port": 9090,
-                            },
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                        "livenessProbe": {
-                            "httpGet": {
-                                "path": "/openmano/tenants",
-                                "port": 9090,
-                            },
-                            "initialDelaySeconds": 600,
-                            "periodSeconds": 10,
-                            "timeoutSeconds": 5,
-                            "successThreshold": 1,
-                            "failureThreshold": 3,
-                        },
-                    },
-                }
-            ],
-            "kubernetesResources": {"ingressResources": []},
-        }
-
-        self.harness.charm.on.start.emit()
-
-        # Initializing the mysql relation
-        relation_id = self.harness.add_relation("mysql", "mysql")
-        self.harness.add_relation_unit(relation_id, "mysql/0")
-        self.harness.update_relation_data(
-            relation_id,
-            "mysql/0",
-            {
-                "host": "mysql",
-                "port": 3306,
-                "user": "mano",
-                "password": "manopw",
-                "root_password": "rootmanopw",
-            },
-        )
-
-        # Verifying status
-        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        pod_spec, _ = self.harness.get_pod_spec()
-
-        self.assertDictEqual(expected_result, pod_spec)
-
-    def test_on_kafka_unit_relation_changed(self) -> NoReturn:
-        """Test to see if kafka relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        relation_id = self.harness.add_relation("kafka", "kafka")
-        self.harness.add_relation_unit(relation_id, "kafka/0")
+        # Initializing the kafka relation
+        kafka_relation_id = self.harness.add_relation("kafka", "kafka")
+        self.harness.add_relation_unit(kafka_relation_id, "kafka/0")
         self.harness.update_relation_data(
-            relation_id,
-            "kafka/0",
-            {
-                "host": "kafka",
-                "port": 9090,
-            },
+            kafka_relation_id, "kafka/0", {"host": "kafka", "port": 9092}
         )
 
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-
-    def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
-        """Test to see if mongodb relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        relation_id = self.harness.add_relation("mongodb", "mongodb")
-        self.harness.add_relation_unit(relation_id, "mongodb/0")
+        # Initializing the mongo relation
+        mongodb_relation_id = self.harness.add_relation("mongodb", "mongodb")
+        self.harness.add_relation_unit(mongodb_relation_id, "mongodb/0")
         self.harness.update_relation_data(
-            relation_id,
+            mongodb_relation_id,
             "mongodb/0",
-            {
-                "connection_string": "mongodb://mongo",
-            },
-        )
-
-        # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
-
-    def test_on_mysql_unit_relation_changed(self) -> NoReturn:
-        """Test to see if mysql relation is updated."""
-        self.harness.charm.on.start.emit()
-
-        relation_id = self.harness.add_relation("mysql", "mysql")
-        self.harness.add_relation_unit(relation_id, "mysql/0")
-        self.harness.update_relation_data(
-            relation_id,
-            "mysql/0",
-            {
-                "host": "mysql",
-                "port": 3306,
-                "user": "mano",
-                "password": "manopw",
-                "root_password": "rootmanopw",
-            },
+            {"connection_string": "mongodb://mongo:27017"},
         )
 
         # Verifying status
-        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
-
-        # Verifying status message
-        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
-        self.assertTrue(
-            self.harness.charm.unit.status.message.startswith("Waiting for ")
-        )
-        self.assertIn("kafka", self.harness.charm.unit.status.message)
-        self.assertIn("mongodb", self.harness.charm.unit.status.message)
-        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
-
-    def test_publish_ro_info(self) -> NoReturn:
-        """Test to see if ro relation is updated."""
-        expected_result = {
-            "host": "ro",
-            "port": "9090",
-        }
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
 
-        self.harness.charm.on.start.emit()
 
-        relation_id = self.harness.add_relation("ro", "lcm")
-        self.harness.add_relation_unit(relation_id, "lcm/0")
-        relation_data = self.harness.get_relation_data(relation_id, "ro")
+if __name__ == "__main__":
+    unittest.main()
 
-        self.assertDictEqual(expected_result, relation_data)
+# class TestCharm(unittest.TestCase):
+#     """RO Charm unit tests."""
+
+#     def setUp(self) -> NoReturn:
+#         """Test setup"""
+#         self.harness = Harness(RoCharm)
+#         self.harness.set_leader(is_leader=True)
+#         self.harness.begin()
+
+#     def test_on_start_without_relations_ng_ro(self) -> NoReturn:
+#         """Test installation without any relation."""
+#         self.harness.charm.on.start.emit()
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+#     def test_on_start_without_relations_no_ng_ro(self) -> NoReturn:
+#         """Test installation without any relation."""
+#         self.harness.update_config({"enable_ng_ro": False})
+
+#         self.harness.charm.on.start.emit()
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("mysql", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
+
+#     def test_on_start_with_relations_ng_ro(self) -> NoReturn:
+#         """Test deployment with NG-RO."""
+#         expected_result = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": "ro",
+#                     "imageDetails": self.harness.charm.image.fetch(),
+#                     "imagePullPolicy": "Always",
+#                     "ports": [
+#                         {
+#                             "name": "ro",
+#                             "containerPort": 9090,
+#                             "protocol": "TCP",
+#                         }
+#                     ],
+#                     "envConfig": {
+#                         "OSMRO_LOG_LEVEL": "INFO",
+#                         "OSMRO_MESSAGE_DRIVER": "kafka",
+#                         "OSMRO_MESSAGE_HOST": "kafka",
+#                         "OSMRO_MESSAGE_PORT": "9090",
+#                         "OSMRO_DATABASE_DRIVER": "mongo",
+#                         "OSMRO_DATABASE_URI": "mongodb://mongo",
+#                         "OSMRO_DATABASE_COMMONKEY": "osm",
+#                     },
+#                     "kubernetes": {
+#                         "startupProbe": {
+#                             "exec": {"command": ["/usr/bin/pgrep", "python3"]},
+#                             "initialDelaySeconds": 60,
+#                             "timeoutSeconds": 5,
+#                         },
+#                         "readinessProbe": {
+#                             "httpGet": {
+#                                 "path": "/openmano/tenants",
+#                                 "port": 9090,
+#                             },
+#                             "periodSeconds": 10,
+#                             "timeoutSeconds": 5,
+#                             "successThreshold": 1,
+#                             "failureThreshold": 3,
+#                         },
+#                         "livenessProbe": {
+#                             "httpGet": {
+#                                 "path": "/openmano/tenants",
+#                                 "port": 9090,
+#                             },
+#                             "initialDelaySeconds": 600,
+#                             "periodSeconds": 10,
+#                             "timeoutSeconds": 5,
+#                             "successThreshold": 1,
+#                             "failureThreshold": 3,
+#                         },
+#                     },
+#                 }
+#             ],
+#             "kubernetesResources": {"ingressResources": []},
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         # Initializing the kafka relation
+#         relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "kafka/0",
+#             {
+#                 "host": "kafka",
+#                 "port": "9090",
+#             },
+#         )
+
+#         # Initializing the mongodb relation
+#         relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "mongodb/0",
+#             {
+#                 "connection_string": "mongodb://mongo",
+#             },
+#         )
+
+#         # Verifying status
+#         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         pod_spec, _ = self.harness.get_pod_spec()
+
+#         self.assertDictEqual(expected_result, pod_spec)
+
+#     def test_on_start_with_relations_no_ng_ro(self) -> NoReturn:
+#         """Test deployment with old RO."""
+#         self.harness.update_config({"enable_ng_ro": False})
+
+#         expected_result = {
+#             "version": 3,
+#             "containers": [
+#                 {
+#                     "name": "ro",
+#                     "imageDetails": self.harness.charm.image.fetch(),
+#                     "imagePullPolicy": "Always",
+#                     "ports": [
+#                         {
+#                             "name": "ro",
+#                             "containerPort": 9090,
+#                             "protocol": "TCP",
+#                         }
+#                     ],
+#                     "envConfig": {
+#                         "OSMRO_LOG_LEVEL": "INFO",
+#                         "RO_DB_HOST": "mysql",
+#                         "RO_DB_OVIM_HOST": "mysql",
+#                         "RO_DB_PORT": 3306,
+#                         "RO_DB_OVIM_PORT": 3306,
+#                         "RO_DB_USER": "mano",
+#                         "RO_DB_OVIM_USER": "mano",
+#                         "RO_DB_PASSWORD": "manopw",
+#                         "RO_DB_OVIM_PASSWORD": "manopw",
+#                         "RO_DB_ROOT_PASSWORD": "rootmanopw",
+#                         "RO_DB_OVIM_ROOT_PASSWORD": "rootmanopw",
+#                         "RO_DB_NAME": "mano_db",
+#                         "RO_DB_OVIM_NAME": "mano_vim_db",
+#                         "OPENMANO_TENANT": "osm",
+#                     },
+#                     "kubernetes": {
+#                         "startupProbe": {
+#                             "exec": {"command": ["/usr/bin/pgrep", "python3"]},
+#                             "initialDelaySeconds": 60,
+#                             "timeoutSeconds": 5,
+#                         },
+#                         "readinessProbe": {
+#                             "httpGet": {
+#                                 "path": "/openmano/tenants",
+#                                 "port": 9090,
+#                             },
+#                             "periodSeconds": 10,
+#                             "timeoutSeconds": 5,
+#                             "successThreshold": 1,
+#                             "failureThreshold": 3,
+#                         },
+#                         "livenessProbe": {
+#                             "httpGet": {
+#                                 "path": "/openmano/tenants",
+#                                 "port": 9090,
+#                             },
+#                             "initialDelaySeconds": 600,
+#                             "periodSeconds": 10,
+#                             "timeoutSeconds": 5,
+#                             "successThreshold": 1,
+#                             "failureThreshold": 3,
+#                         },
+#                     },
+#                 }
+#             ],
+#             "kubernetesResources": {"ingressResources": []},
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         # Initializing the mysql relation
+#         relation_id = self.harness.add_relation("mysql", "mysql")
+#         self.harness.add_relation_unit(relation_id, "mysql/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "mysql/0",
+#             {
+#                 "host": "mysql",
+#                 "port": 3306,
+#                 "user": "mano",
+#                 "password": "manopw",
+#                 "root_password": "rootmanopw",
+#             },
+#         )
+
+#         # Verifying status
+#         self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         pod_spec, _ = self.harness.get_pod_spec()
+
+#         self.assertDictEqual(expected_result, pod_spec)
+
+#     def test_on_kafka_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if kafka relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         relation_id = self.harness.add_relation("kafka", "kafka")
+#         self.harness.add_relation_unit(relation_id, "kafka/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "kafka/0",
+#             {
+#                 "host": "kafka",
+#                 "port": 9090,
+#             },
+#         )
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
+
+#     def test_on_mongodb_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if mongodb relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         relation_id = self.harness.add_relation("mongodb", "mongodb")
+#         self.harness.add_relation_unit(relation_id, "mongodb/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "mongodb/0",
+#             {
+#                 "connection_string": "mongodb://mongo",
+#             },
+#         )
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
+
+#     def test_on_mysql_unit_relation_changed(self) -> NoReturn:
+#         """Test to see if mysql relation is updated."""
+#         self.harness.charm.on.start.emit()
+
+#         relation_id = self.harness.add_relation("mysql", "mysql")
+#         self.harness.add_relation_unit(relation_id, "mysql/0")
+#         self.harness.update_relation_data(
+#             relation_id,
+#             "mysql/0",
+#             {
+#                 "host": "mysql",
+#                 "port": 3306,
+#                 "user": "mano",
+#                 "password": "manopw",
+#                 "root_password": "rootmanopw",
+#             },
+#         )
+
+#         # Verifying status
+#         self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+#         # Verifying status message
+#         self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+#         self.assertTrue(
+#             self.harness.charm.unit.status.message.startswith("Waiting for ")
+#         )
+#         self.assertIn("kafka", self.harness.charm.unit.status.message)
+#         self.assertIn("mongodb", self.harness.charm.unit.status.message)
+#         self.assertTrue(self.harness.charm.unit.status.message.endswith(" relations"))
+
+#     def test_publish_ro_info(self) -> NoReturn:
+#         """Test to see if ro relation is updated."""
+#         expected_result = {
+#             "host": "ro",
+#             "port": "9090",
+#         }
+
+#         self.harness.charm.on.start.emit()
+
+#         relation_id = self.harness.add_relation("ro", "lcm")
+#         self.harness.add_relation_unit(relation_id, "lcm/0")
+#         relation_data = self.harness.get_relation_data(relation_id, "ro")
+
+#         self.assertDictEqual(expected_result, relation_data)
 
 
 if __name__ == "__main__":
index 8fd07d3..1f9442e 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright 2020 Canonical Ltd.
+# Copyright 2021 Canonical Ltd.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
 # not use this file except in compliance with the License. You may obtain
 # To get in touch with the maintainers, please contact:
 # osm-charmers@lists.launchpad.net
 ##
+#######################################################################################
 
 [tox]
+envlist = flake8, cover, pylint, safety, yamllint
 skipsdist = True
-envlist = unit, lint
-sitepackages = False
-skip_missing_interpreters = False
 
 [testenv]
 basepython = python3.8
 setenv =
+  VIRTUAL_ENV={envdir}
   PYTHONHASHSEED=0
   PYTHONPATH = {toxinidir}/src
-  CHARM_NAME = ro
+deps =  -r{toxinidir}/requirements.txt
 
+#######################################################################################
+[testenv:cover]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        sh -c 'rm -f nosetests.xml'
+        coverage erase
+        nose2 -C --coverage src
+        coverage report --omit='*tests*'
+        coverage html -d ./cover --omit='*tests*'
+        coverage xml -o coverage.xml --omit=*tests*
+whitelist_externals = sh
+
+#######################################################################################
+[testenv:safety]
+setenv =
+        LC_ALL=C.UTF-8
+        LANG=C.UTF-8
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+commands =
+        - safety check --full-report
+
+#######################################################################################
+[testenv:flake8]
+deps = flake8
+commands =
+        flake8 src/ tests/
+
+#######################################################################################
+[testenv:pylint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        pylint
+commands =
+    pylint -E src
+
+#######################################################################################
+[testenv:black]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        black
+commands =  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+
+#######################################################################################
+[testenv:yamllint]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        yamllint
+commands = yamllint .
+
+#######################################################################################
 [testenv:build]
 passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-test.txt
+        charmcraft
 whitelist_externals =
   charmcraft
-  rm
-  unzip
+  cp
 commands =
-  rm -rf release ro.charm
   charmcraft build
-  unzip ro.charm -d release
+  cp -r build release
 
-[testenv:unit]
-commands =
-  coverage erase
-  stestr run --slowest --test-path=./tests --top-dir=./
-  coverage combine
-  coverage html -d cover
-  coverage xml -o cover/coverage.xml
-  coverage report
-deps =
-  coverage
-  stestr
-  mock
-  ops
-setenv =
-  {[testenv]setenv}
-  PYTHON=coverage run
-
-[testenv:lint]
-deps =
-  black
-  yamllint
-  flake8
-commands =
-  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
-  yamllint .
-  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/"
+#######################################################################################
+[flake8]
+ignore =
+        W291,
+        W293,
+        E123,
+        E125,
+        E226,
+        E241,
+exclude =
+        .git,
+        __pycache__,
+        .tox,
+max-line-length = 120
+show-source = True
+builtins = _
 
-[coverage:run]
-branch = True
-concurrency = multiprocessing
-parallel = True
-source =
-  .
-omit =
-  .tox/*
-  tests/*
index 2348328..e196706 100755 (executable)
@@ -15,7 +15,7 @@
 
 # set -eux
 
-JUJU_AGENT_VERSION=2.8.6
+JUJU_AGENT_VERSION=2.8.9
 K8S_CLOUD_NAME="k8s-cloud"
 KUBECTL="microk8s.kubectl"
 MICROK8S_VERSION=1.19
@@ -25,9 +25,9 @@ PATH=/snap/bin:${PATH}
 
 MODEL_NAME=osm
 
-OSM_BUNDLE=cs:osm-54
-OSM_HA_BUNDLE=cs:osm-ha-40
-TAG=9
+OSM_BUNDLE=cs:osm-58
+OSM_HA_BUNDLE=cs:osm-ha-43
+TAG=testing-daily
 
 function check_arguments(){
     while [ $# -gt 0 ] ; do
@@ -255,9 +255,6 @@ function deploy_charmed_osm(){
         juju deploy -m $MODEL_NAME $OSM_BUNDLE --overlay ~/.osm/vca-overlay.yaml $images_overlay
     fi
 
-    echo "Waiting for deployment to finish..."
-    check_osm_deployed
-    echo "OSM with charms deployed"
     if [ ! -v KUBECFG ]; then
         API_SERVER=${DEFAULT_IP}
     else
@@ -268,24 +265,15 @@ function deploy_charmed_osm(){
         hostport="$(echo ${url/$user@/} | cut -d/ -f1)"
         API_SERVER="$(echo $hostport | sed -e 's,:.*,,g')"
     fi
-
     # Expose OSM services
-    # Expose NBI
     juju config -m $MODEL_NAME nbi site_url=https://nbi.${API_SERVER}.xip.io
     juju config -m $MODEL_NAME ng-ui site_url=https://ui.${API_SERVER}.xip.io
+    juju config -m $MODEL_NAME grafana site_url=https://grafana.${API_SERVER}.xip.io
+    juju config -m $MODEL_NAME prometheus site_url=https://prometheus.${API_SERVER}.xip.io
 
-    # Expose Grafana
-    juju config -m $MODEL_NAME grafana-k8s juju-external-hostname=grafana.${API_SERVER}.xip.io
-    juju expose -m $MODEL_NAME grafana-k8s
-    wait_for_port grafana-k8s 0
-
-    # Expose Prometheus
-    juju config -m $MODEL_NAME prometheus-k8s juju-external-hostname=prometheus.${API_SERVER}.xip.io
-    juju expose -m $MODEL_NAME prometheus-k8s
-    wait_for_port prometheus-k8s 1
-
-    # Apply annotations
-    sg ${KUBEGRP} -c "${KUBECTL} annotate ingresses.networking nginx.ingress.kubernetes.io/proxy-body-size=0 -n osm -l juju-app=ng-ui"
+    echo "Waiting for deployment to finish..."
+    check_osm_deployed
+    echo "OSM with charms deployed"
 }
 
 function check_osm_deployed() {
@@ -332,7 +320,7 @@ function create_overlay() {
     # Generate a new overlay.yaml, overriding any existing one
     cat << EOF > /tmp/vca-overlay.yaml
 applications:
-  lcm-k8s:
+  lcm:
     options:
       vca_user: $vca_user
       vca_password: $vca_password
@@ -342,7 +330,7 @@ applications:
       vca_cacert: $vca_cacert
       vca_cloud: $vca_cloud
       vca_k8s_cloud: $K8S_CLOUD_NAME
-  mon-k8s:
+  mon:
     options:
       vca_user: $vca_user
       vca_password: $vca_password
@@ -354,52 +342,26 @@ EOF
 }
 
 function generate_images_overlay(){
-    cat << EOF > /tmp/nbi_registry.yaml
-registrypath: ${REGISTRY_URL}opensourcemano/nbi:$TAG
-EOF
-    cat << EOF > /tmp/ng_ui_registry.yaml
-registrypath: ${REGISTRY_URL}opensourcemano/ng-ui:$TAG
-EOF
-    if [ ! -z "$REGISTRY_USERNAME" ] ; then
-        REGISTRY_CREDENTIALS=$(cat <<EOF
+    echo "applications:" > /tmp/images-overlay.yaml
 
-      image_username: $REGISTRY_USERNAME
-      image_password: $REGISTRY_PASSWORD
+    charms_with_resources="nbi lcm mon pol ng-ui ro pla keystone"
+    for charm in $charms_with_resources; do
+        cat << EOF > /tmp/${charm}_registry.yaml
+registrypath: ${REGISTRY_URL}opensourcemano/${charm}:$TAG
 EOF
-    );
-    echo username: $REGISTRY_USERNAME >> /tmp/nbi_registry.yaml
-    echo password: $REGISTRY_PASSWORD >> /tmp/nbi_registry.yaml
-    echo username: $REGISTRY_USERNAME >> /tmp/ng_ui_registry.yaml
-    echo password: $REGISTRY_PASSWORD >> /tmp/ng_ui_registry.yaml
-fi
+        if [ ! -z "$REGISTRY_USERNAME" ] ; then
+            echo username: $REGISTRY_USERNAME >> /tmp/${charm}_registry.yaml
+            echo password: $REGISTRY_PASSWORD >> /tmp/${charm}_registry.yaml
+        fi
 
-    cat << EOF > /tmp/images-overlay.yaml
-applications:
-  lcm-k8s:
-    options:
-      image: ${REGISTRY_URL}opensourcemano/lcm:$TAG ${REGISTRY_CREDENTIALS}
-  mon-k8s:
-    options:
-      image: ${REGISTRY_URL}opensourcemano/mon:$TAG ${REGISTRY_CREDENTIALS}
-  ro-k8s:
-    options:
-      image: ${REGISTRY_URL}opensourcemano/ro:$TAG ${REGISTRY_CREDENTIALS}
-  nbi:
-    resources:
-      image: /tmp/nbi_registry.yaml
-  pol-k8s:
-    options:
-      image: ${REGISTRY_URL}opensourcemano/pol:$TAG ${REGISTRY_CREDENTIALS}
-  pla:
-    options:
-      image: ${REGISTRY_URL}opensourcemano/pla:$TAG ${REGISTRY_CREDENTIALS}
-  ng-ui:
+        cat << EOF >> /tmp/images-overlay.yaml
+  ${charm}:
     resources:
-      image: /tmp/ng_ui_registry.yaml
-  keystone:
-    options:
-      image: ${REGISTRY_URL}opensourcemano/keystone:$TAG ${REGISTRY_CREDENTIALS}
+      image: /tmp/${charm}_registry.yaml
+
 EOF
+    done
+
     mv /tmp/images-overlay.yaml $IMAGES_OVERLAY_FILE
 }
 
@@ -454,7 +416,7 @@ function install_microstack() {
     ubuntu1604
     ssh-keygen -t rsa -N "" -f ~/.ssh/microstack
     microstack.openstack keypair create --public-key ~/.ssh/microstack.pub microstack
-    export OSM_HOSTNAME=`juju status --format json | jq -rc '.applications."nbi".address'`
+    export OSM_HOSTNAME=`juju status -m $MODEL_NAME --format json | jq -rc '.applications."nbi".address'`
     osm vim-create --name microstack-site \
     --user admin \
     --password keystone \
@@ -480,7 +442,8 @@ install_snaps
 bootstrap_k8s_lxd
 deploy_charmed_osm
 install_osmclient
-export OSM_HOSTNAME=$(juju config nbi site_url | sed "s/http.*\?:\/\///"):443
+export OSM_HOSTNAME=$(juju config -m $MODEL_NAME nbi site_url | sed "s/http.*\?:\/\///"):443
+sleep 10
 add_local_k8scluster
 
 if [ -v MICROSTACK ]; then