Adding Prometheus Kafka Exporter Charm 61/10261/5
authorsousaedu <eduardo.sousa@canonical.com>
Mon, 16 Nov 2020 14:40:14 +0000 (14:40 +0000)
committerbeierlm <mark.beierl@canonical.com>
Thu, 25 Feb 2021 16:40:35 +0000 (17:40 +0100)
Change-Id: Ibfe71c17ab17e6f8fdf4d6117edc58de61891356
Signed-off-by: sousaedu <eduardo.sousa@canonical.com>
13 files changed:
installers/charm/prometheus-kafka-exporter/.gitignore [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/.yamllint.yaml [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/README.md [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/config.yaml [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/files/kafka_exporter_dashboard.yaml [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/metadata.yaml [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/requirements.txt [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/src/charm.py [new file with mode: 0755]
installers/charm/prometheus-kafka-exporter/src/pod_spec.py [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/tests/__init__.py [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/tests/test_charm.py [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/tests/test_pod_spec.py [new file with mode: 0644]
installers/charm/prometheus-kafka-exporter/tox.ini [new file with mode: 0644]

diff --git a/installers/charm/prometheus-kafka-exporter/.gitignore b/installers/charm/prometheus-kafka-exporter/.gitignore
new file mode 100644 (file)
index 0000000..d1c8218
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+venv
+.vscode
+build
+prometheus-kafka-exporter.charm
+.coverage
+.stestr
+cover
diff --git a/installers/charm/prometheus-kafka-exporter/.yamllint.yaml b/installers/charm/prometheus-kafka-exporter/.yamllint.yaml
new file mode 100644 (file)
index 0000000..f300159
--- /dev/null
@@ -0,0 +1,31 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+---
+extends: default
+
+yaml-files:
+  - "*.yaml"
+  - "*.yml"
+  - ".yamllint"
+ignore: |
+  .tox
+  build/
diff --git a/installers/charm/prometheus-kafka-exporter/README.md b/installers/charm/prometheus-kafka-exporter/README.md
new file mode 100644 (file)
index 0000000..ae9babf
--- /dev/null
@@ -0,0 +1,23 @@
+<!-- Copyright 2021 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may
+not use this file except in compliance with the License. You may obtain
+a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+For those usages not covered by the Apache License, Version 2.0 please
+contact: legal@canonical.com
+
+To get in touch with the maintainers, please contact:
+osm-charmers@lists.launchpad.net -->
+
+# Prometheus kafka exporter operator Charm for Kubernetes
+
+## Requirements
diff --git a/installers/charm/prometheus-kafka-exporter/config.yaml b/installers/charm/prometheus-kafka-exporter/config.yaml
new file mode 100644 (file)
index 0000000..a3aaa21
--- /dev/null
@@ -0,0 +1,39 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+options:
+  ingress_whitelist_source_range:
+    type: string
+    description: |
+      A comma-separated list of CIDRs to store in the
+      ingress.kubernetes.io/whitelist-source-range annotation.
+
+      This can be used to lock down access to
+      Keystone based on source IP address.
+    default: ""
+  tls_secret_name:
+    type: string
+    description: TLS Secret name
+    default: ""
+  site_url:
+    type: string
+    description: Ingress URL
+    default: ""
diff --git a/installers/charm/prometheus-kafka-exporter/files/kafka_exporter_dashboard.yaml b/installers/charm/prometheus-kafka-exporter/files/kafka_exporter_dashboard.yaml
new file mode 100644 (file)
index 0000000..afcb7e3
--- /dev/null
@@ -0,0 +1,543 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+---
+annotations:
+  list:
+    - builtIn: 1
+      datasource: '-- Grafana --'
+      enable: true
+      hide: true
+      iconColor: 'rgba(0, 211, 255, 1)'
+      name: Annotations & Alerts
+      type: dashboard
+description: Kafka resource usage and throughput
+editable: true
+gnetId: 7589
+graphTooltip: 0
+id: 10
+iteration: 1578848023483
+links: []
+panels:
+  - aliasColors: {}
+    bars: false
+    dashLength: 10
+    dashes: false
+    datasource: Prometheus
+    fill: 0
+    fillGradient: 0
+    gridPos:
+      h: 10
+      w: 10
+      x: 0
+      'y': 0
+    id: 14
+    legend:
+      alignAsTable: true
+      avg: false
+      current: true
+      max: true
+      min: false
+      rightSide: false
+      show: true
+      sideWidth: 480
+      sort: max
+      sortDesc: true
+      total: false
+      values: true
+    lines: true
+    linewidth: 1
+    links: []
+    nullPointMode: connected
+    options:
+      dataLinks: []
+    percentage: false
+    pointradius: 5
+    points: false
+    renderer: flot
+    seriesOverrides: []
+    spaceLength: 10
+    stack: false
+    steppedLine: false
+    targets:
+      - expr: >-
+          sum(kafka_topic_partition_current_offset -
+          kafka_topic_partition_oldest_offset{instance="$instance",
+          topic=~"$topic"}) by (topic)
+        format: time_series
+        intervalFactor: 1
+        legendFormat: '{{topic}}'
+        refId: B
+    thresholds: []
+    timeFrom: null
+    timeRegions: []
+    timeShift: null
+    title: Messages stored per topic
+    tooltip:
+      shared: true
+      sort: 0
+      value_type: individual
+    type: graph
+    xaxis:
+      buckets: null
+      mode: time
+      name: null
+      show: true
+      values: []
+    yaxes:
+      - format: short
+        label: null
+        logBase: 1
+        max: null
+        min: '0'
+        show: true
+      - format: short
+        label: null
+        logBase: 1
+        max: null
+        min: null
+        show: true
+    yaxis:
+      align: false
+      alignLevel: null
+  - aliasColors: {}
+    bars: false
+    dashLength: 10
+    dashes: false
+    datasource: Prometheus
+    fill: 0
+    fillGradient: 0
+    gridPos:
+      h: 10
+      w: 10
+      x: 10
+      'y': 0
+    id: 12
+    legend:
+      alignAsTable: true
+      avg: false
+      current: true
+      max: true
+      min: false
+      rightSide: false
+      show: true
+      sideWidth: 480
+      sort: max
+      sortDesc: true
+      total: false
+      values: true
+    lines: true
+    linewidth: 1
+    links: []
+    nullPointMode: connected
+    options:
+      dataLinks: []
+    percentage: false
+    pointradius: 5
+    points: false
+    renderer: flot
+    seriesOverrides: []
+    spaceLength: 10
+    stack: false
+    steppedLine: false
+    targets:
+      - expr: >-
+          sum(kafka_consumergroup_lag{instance="$instance",topic=~"$topic"}) by
+          (consumergroup, topic)
+        format: time_series
+        instant: false
+        interval: ''
+        intervalFactor: 1
+        legendFormat: ' {{topic}} ({{consumergroup}})'
+        refId: A
+    thresholds: []
+    timeFrom: null
+    timeRegions: []
+    timeShift: null
+    title: Lag by  Consumer Group
+    tooltip:
+      shared: true
+      sort: 2
+      value_type: individual
+    type: graph
+    xaxis:
+      buckets: null
+      mode: time
+      name: null
+      show: true
+      values: []
+    yaxes:
+      - format: short
+        label: ''
+        logBase: 1
+        max: null
+        min: '0'
+        show: true
+      - format: short
+        label: null
+        logBase: 1
+        max: null
+        min: null
+        show: true
+    yaxis:
+      align: false
+      alignLevel: null
+  - aliasColors: {}
+    bars: false
+    dashLength: 10
+    dashes: false
+    datasource: Prometheus
+    fill: 0
+    fillGradient: 0
+    gridPos:
+      h: 10
+      w: 10
+      x: 0
+      'y': 10
+    id: 16
+    legend:
+      alignAsTable: true
+      avg: false
+      current: true
+      max: true
+      min: false
+      rightSide: false
+      show: true
+      sideWidth: 480
+      total: false
+      values: true
+    lines: true
+    linewidth: 1
+    links: []
+    nullPointMode: connected
+    options:
+      dataLinks: []
+    percentage: false
+    pointradius: 5
+    points: false
+    renderer: flot
+    seriesOverrides: []
+    spaceLength: 10
+    stack: false
+    steppedLine: false
+    targets:
+      - expr: >-
+          sum(delta(kafka_topic_partition_current_offset{instance=~'$instance',
+          topic=~"$topic"}[5m])/5) by (topic)
+        format: time_series
+        intervalFactor: 1
+        legendFormat: '{{topic}}'
+        refId: A
+    thresholds: []
+    timeFrom: null
+    timeRegions: []
+    timeShift: null
+    title: Messages produced per minute
+    tooltip:
+      shared: true
+      sort: 0
+      value_type: individual
+    type: graph
+    xaxis:
+      buckets: null
+      mode: time
+      name: null
+      show: true
+      values: []
+    yaxes:
+      - format: short
+        label: null
+        logBase: 1
+        max: null
+        min: null
+        show: true
+      - format: short
+        label: null
+        logBase: 1
+        max: null
+        min: null
+        show: true
+    yaxis:
+      align: false
+      alignLevel: null
+  - aliasColors: {}
+    bars: false
+    dashLength: 10
+    dashes: false
+    datasource: Prometheus
+    fill: 0
+    fillGradient: 0
+    gridPos:
+      h: 10
+      w: 10
+      x: 10
+      'y': 10
+    id: 18
+    legend:
+      alignAsTable: true
+      avg: false
+      current: true
+      max: true
+      min: false
+      rightSide: false
+      show: true
+      sideWidth: 480
+      sort: current
+      sortDesc: true
+      total: false
+      values: true
+    lines: true
+    linewidth: 1
+    links: []
+    nullPointMode: connected
+    options:
+      dataLinks: []
+    percentage: false
+    pointradius: 5
+    points: false
+    renderer: flot
+    seriesOverrides: []
+    spaceLength: 10
+    stack: false
+    steppedLine: false
+    targets:
+      - expr: >-
+          sum(delta(kafka_consumergroup_current_offset{instance=~'$instance',topic=~"$topic"}[5m])/5)
+          by (consumergroup, topic)
+        format: time_series
+        intervalFactor: 1
+        legendFormat: ' {{topic}} ({{consumergroup}})'
+        refId: A
+    thresholds: []
+    timeFrom: null
+    timeRegions: []
+    timeShift: null
+    title: Messages consumed per minute
+    tooltip:
+      shared: true
+      sort: 0
+      value_type: individual
+    type: graph
+    xaxis:
+      buckets: null
+      mode: time
+      name: null
+      show: true
+      values: []
+    yaxes:
+      - format: short
+        label: null
+        logBase: 1
+        max: null
+        min: null
+        show: true
+      - format: short
+        label: null
+        logBase: 1
+        max: null
+        min: null
+        show: true
+    yaxis:
+      align: false
+      alignLevel: null
+  - aliasColors: {}
+    bars: true
+    dashLength: 10
+    dashes: false
+    datasource: Prometheus
+    fill: 1
+    fillGradient: 0
+    gridPos:
+      h: 7
+      w: 20
+      x: 0
+      'y': 20
+    id: 8
+    legend:
+      alignAsTable: true
+      avg: false
+      current: true
+      max: false
+      min: false
+      rightSide: true
+      show: true
+      sideWidth: 420
+      total: false
+      values: true
+    lines: false
+    linewidth: 1
+    links: []
+    nullPointMode: 'null'
+    options:
+      dataLinks: []
+    percentage: false
+    pointradius: 5
+    points: false
+    renderer: flot
+    seriesOverrides: []
+    spaceLength: 10
+    stack: false
+    steppedLine: false
+    targets:
+      - expr: >-
+          sum by(topic)
+          (kafka_topic_partitions{instance="$instance",topic=~"$topic"})
+        format: time_series
+        intervalFactor: 1
+        legendFormat: '{{topic}}'
+        refId: A
+    thresholds: []
+    timeFrom: null
+    timeRegions: []
+    timeShift: null
+    title: Partitions per Topic
+    tooltip:
+      shared: false
+      sort: 0
+      value_type: individual
+    type: graph
+    xaxis:
+      buckets: null
+      mode: series
+      name: null
+      show: false
+      values:
+        - current
+    yaxes:
+      - format: short
+        label: null
+        logBase: 1
+        max: null
+        min: null
+        show: true
+      - format: short
+        label: null
+        logBase: 1
+        max: null
+        min: null
+        show: true
+    yaxis:
+      align: false
+      alignLevel: null
+refresh: 5s
+schemaVersion: 19
+style: dark
+tags: []
+templating:
+  list:
+    - allValue: null
+      current:
+        text: osm-kafka-exporter-service
+        value: osm-kafka-exporter-service
+      datasource: Prometheus
+      definition: ''
+      hide: 0
+      includeAll: false
+      label: Job
+      multi: false
+      name: job
+      options: []
+      query: 'label_values(kafka_consumergroup_current_offset, job)'
+      refresh: 1
+      regex: ''
+      skipUrlSync: false
+      sort: 0
+      tagValuesQuery: ''
+      tags: []
+      tagsQuery: ''
+      type: query
+      useTags: false
+    - allValue: null
+      datasource: Prometheus
+      definition: ''
+      hide: 0
+      includeAll: false
+      label: Instance
+      multi: false
+      name: instance
+      options: []
+      query: >-
+        'label_values(kafka_consumergroup_current_offset{job=~"$job"},'
+        'instance)'
+      refresh: 1
+      regex: ''
+      skipUrlSync: false
+      sort: 0
+      tagValuesQuery: ''
+      tags: []
+      tagsQuery: ''
+      type: query
+      useTags: false
+    - allValue: null
+      current:
+        tags: []
+        text: All
+        value:
+          - $__all
+      datasource: Prometheus
+      definition: ''
+      hide: 0
+      includeAll: true
+      label: Topic
+      multi: true
+      name: topic
+      options: []
+      query: >-
+        label_values(kafka_topic_partition_current_offset{instance='$instance',topic!='__consumer_offsets',topic!='--kafka'},
+        topic)
+      refresh: 1
+      regex: ''
+      skipUrlSync: false
+      sort: 1
+      tagValuesQuery: ''
+      tags: []
+      tagsQuery: topic
+      type: query
+      useTags: false
+time:
+  from: now-1h
+  to: now
+timepicker:
+  refresh_intervals:
+    - 5s
+    - 10s
+    - 30s
+    - 1m
+    - 5m
+    - 15m
+    - 30m
+    - 1h
+    - 2h
+    - 1d
+  time_options:
+    - 5m
+    - 15m
+    - 1h
+    - 6h
+    - 12h
+    - 24h
+    - 2d
+    - 7d
+    - 30d
+timezone: browser
+title: Kafka
+uid: jwPKIsniz
+version: 2
diff --git a/installers/charm/prometheus-kafka-exporter/metadata.yaml b/installers/charm/prometheus-kafka-exporter/metadata.yaml
new file mode 100644 (file)
index 0000000..55f2ab0
--- /dev/null
@@ -0,0 +1,49 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+name: prometheus-kafka-exporter
+summary: OSM Prometheus Kafka Exporter
+description: |
+  A CAAS charm to deploy OSM's Prometheus Kafka Exporter.
+series:
+  - kubernetes
+tags:
+  - kubernetes
+  - osm
+  - prometheus
+  - kafka-exporter
+min-juju-version: 2.8.0
+deployment:
+  type: stateless
+  service: cluster
+resources:
+  image:
+    type: oci-image
+    description: Image of kafka-exporter
+    upstream-source: "bitnami/kafka-exporter:latest"
+requires:
+  kafka:
+    interface: kafka
+provides:
+  prometheus-target:
+    interface: http
+  grafana-dashboard:
+    interface: grafana-dashboard
diff --git a/installers/charm/prometheus-kafka-exporter/requirements.txt b/installers/charm/prometheus-kafka-exporter/requirements.txt
new file mode 100644 (file)
index 0000000..884cf9f
--- /dev/null
@@ -0,0 +1,23 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+ops
+git+https://github.com/juju-solutions/resource-oci-image/@c5778285d332edf3d9a538f9d0c06154b7ec1b0b#egg=oci-image
diff --git a/installers/charm/prometheus-kafka-exporter/src/charm.py b/installers/charm/prometheus-kafka-exporter/src/charm.py
new file mode 100755 (executable)
index 0000000..70a31f8
--- /dev/null
@@ -0,0 +1,216 @@
+#!/usr/bin/env python3
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+import logging
+from pathlib import Path
+from typing import Dict, List, NoReturn
+
+from ops.charm import CharmBase
+from ops.framework import EventBase, StoredState
+from ops.main import main
+from ops.model import ActiveStatus, Application, BlockedStatus, MaintenanceStatus, Unit
+from oci_image import OCIImageResource, OCIImageResourceError
+
+from pod_spec import make_pod_spec
+
+logger = logging.getLogger(__name__)
+
+PROMETHEUS_KAFKA_EXPORTER_PORT = 9308
+
+
+class RelationsMissing(Exception):
+    def __init__(self, missing_relations: List):
+        self.message = ""
+        if missing_relations and isinstance(missing_relations, list):
+            self.message += f'Waiting for {", ".join(missing_relations)} relation'
+            if "," in self.message:
+                self.message += "s"
+
+
+class RelationDefinition:
+    def __init__(self, relation_name: str, keys: List, source_type):
+        if source_type != Application and source_type != Unit:
+            raise TypeError(
+                "source_type should be ops.model.Application or ops.model.Unit"
+            )
+        self.relation_name = relation_name
+        self.keys = keys
+        self.source_type = source_type
+
+
+def check_missing_relation_data(
+    data: Dict,
+    expected_relations_data: List[RelationDefinition],
+):
+    missing_relations = []
+    for relation_data in expected_relations_data:
+        if not all(
+            f"{relation_data.relation_name}_{k}" in data for k in relation_data.keys
+        ):
+            missing_relations.append(relation_data.relation_name)
+    if missing_relations:
+        raise RelationsMissing(missing_relations)
+
+
+def get_relation_data(
+    charm: CharmBase,
+    relation_data: RelationDefinition,
+) -> Dict:
+    data = {}
+    relation = charm.model.get_relation(relation_data.relation_name)
+    if relation:
+        self_app_unit = (
+            charm.app if relation_data.source_type == Application else charm.unit
+        )
+        expected_type = relation_data.source_type
+        for app_unit in relation.data:
+            if app_unit != self_app_unit and isinstance(app_unit, expected_type):
+                if all(k in relation.data[app_unit] for k in relation_data.keys):
+                    for k in relation_data.keys:
+                        data[f"{relation_data.relation_name}_{k}"] = relation.data[
+                            app_unit
+                        ].get(k)
+                    break
+    return data
+
+
+class PrometheusKafkaExporterCharm(CharmBase):
+    """Prometheus Kafka Exporter Charm."""
+
+    state = StoredState()
+
+    def __init__(self, *args) -> NoReturn:
+        """Prometheus Kafka Exporter Charm constructor."""
+        super().__init__(*args)
+
+        # Internal state initialization
+        self.state.set_default(pod_spec=None)
+
+        self.port = PROMETHEUS_KAFKA_EXPORTER_PORT
+        self.image = OCIImageResource(self, "image")
+
+        # Registering regular events
+        self.framework.observe(self.on.start, self.configure_pod)
+        self.framework.observe(self.on.config_changed, self.configure_pod)
+
+        # Registering required relation events
+        self.framework.observe(self.on.kafka_relation_changed, self.configure_pod)
+
+        # Registering required relation departed events
+        self.framework.observe(self.on.kafka_relation_departed, self.configure_pod)
+
+        # Registering provided relation events
+        self.framework.observe(
+            self.on.prometheus_target_relation_joined, self._publish_target_info
+        )
+        self.framework.observe(
+            self.on.grafana_dashboard_relation_joined, self._publish_dashboard_info
+        )
+
+    def _publish_target_info(self, event: EventBase) -> NoReturn:
+        """Publishes target information.
+
+        Args:
+            event (EventBase): Exporter relation event.
+        """
+        rel_data = {
+            "hostname": self.model.app.name,
+            "port": str(PROMETHEUS_KAFKA_EXPORTER_PORT),
+        }
+        for k, v in rel_data.items():
+            event.relation.data[self.unit][k] = v
+
+    def _publish_dashboard_info(self, event: EventBase) -> NoReturn:
+        """Publishes dashboard information.
+
+        Args:
+            event (EventBase): Exporter relation event.
+        """
+        rel_data = {
+            "dashboard": Path("files/kafka_exporter_dashboard.yaml").read_text(),
+        }
+        for k, v in rel_data.items():
+            event.relation.data[self.unit][k] = v
+
+    @property
+    def relations_requirements(self):
+        return [RelationDefinition("kafka", ["host", "port"], Unit)]
+
+    def get_relation_state(self):
+        relation_state = {}
+        for relation_requirements in self.relations_requirements:
+            data = get_relation_data(self, relation_requirements)
+            relation_state = {**relation_state, **data}
+        check_missing_relation_data(relation_state, self.relations_requirements)
+        return relation_state
+
+    def configure_pod(self, _=None) -> NoReturn:
+        """Assemble the pod spec and apply it, if possible.
+
+        Args:
+            event (EventBase): Hook or Relation event that started the
+                               function.
+        """
+        if not self.unit.is_leader():
+            self.unit.status = ActiveStatus("ready")
+            return
+
+        relation_state = None
+        try:
+            relation_state = self.get_relation_state()
+        except RelationsMissing as exc:
+            logger.exception("Relation missing error")
+            self.unit.status = BlockedStatus(exc.message)
+            return
+
+        self.unit.status = MaintenanceStatus("Assembling pod spec")
+
+        # Fetch image information
+        try:
+            self.unit.status = MaintenanceStatus("Fetching image information")
+            image_info = self.image.fetch()
+        except OCIImageResourceError:
+            self.unit.status = BlockedStatus("Error fetching image information")
+            return
+
+        try:
+            pod_spec = make_pod_spec(
+                image_info,
+                self.model.config,
+                relation_state,
+                self.model.app.name,
+                self.port,
+            )
+        except ValueError as exc:
+            logger.exception("Config/Relation data validation error")
+            self.unit.status = BlockedStatus(str(exc))
+            return
+
+        if self.state.pod_spec != pod_spec:
+            self.model.pod.set_spec(pod_spec)
+            self.state.pod_spec = pod_spec
+
+        self.unit.status = ActiveStatus("ready")
+
+
+if __name__ == "__main__":
+    main(PrometheusKafkaExporterCharm)
diff --git a/installers/charm/prometheus-kafka-exporter/src/pod_spec.py b/installers/charm/prometheus-kafka-exporter/src/pod_spec.py
new file mode 100644 (file)
index 0000000..256c203
--- /dev/null
@@ -0,0 +1,309 @@
+#!/usr/bin/env python3
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+import logging
+from ipaddress import ip_network
+from typing import Any, Dict, List
+from urllib.parse import urlparse
+
+logger = logging.getLogger(__name__)
+
+
+def _validate_ip_network(network: str) -> bool:
+    """Validate IP network.
+
+    Args:
+        network (str): IP network range.
+
+    Returns:
+        bool: True if valid, false otherwise.
+    """
+    if not network:
+        return True
+
+    try:
+        ip_network(network)
+    except ValueError:
+        return False
+
+    return True
+
+
+def _validate_data(config_data: Dict[str, Any], relation_data: Dict[str, Any]) -> bool:
+    """Validates passed information.
+
+    Args:
+        config_data (Dict[str, Any]): configuration information.
+        relation_data (Dict[str, Any]): relation information
+
+    Raises:
+        ValueError: when config and/or relation data is not valid.
+    """
+    config_validators = {
+        "site_url": lambda value, _: isinstance(value, str)
+        if value is not None
+        else True,
+        "ingress_whitelist_source_range": lambda value, _: _validate_ip_network(value),
+        "tls_secret_name": lambda value, _: isinstance(value, str)
+        if value is not None
+        else True,
+    }
+    relation_validators = {
+        "kafka_host": lambda value, _: isinstance(value, str) and len(value) > 0,
+        "kafka_port": lambda value, _: isinstance(value, str)
+        and len(value) > 0
+        and int(value) > 0,
+    }
+    problems = []
+
+    for key, validator in config_validators.items():
+        valid = validator(config_data.get(key), config_data)
+
+        if not valid:
+            problems.append(key)
+
+    for key, validator in relation_validators.items():
+        valid = validator(relation_data.get(key), relation_data)
+
+        if not valid:
+            problems.append(key)
+
+    if len(problems) > 0:
+        raise ValueError("Errors found in: {}".format(", ".join(problems)))
+
+    return True
+
+
+def _make_pod_ports(port: int) -> List[Dict[str, Any]]:
+    """Generate pod ports details.
+
+    Args:
+        port (int): port to expose.
+
+    Returns:
+        List[Dict[str, Any]]: pod port details.
+    """
+    return [
+        {"name": "prometheus-kafka-exporter", "containerPort": port, "protocol": "TCP"}
+    ]
+
+
+def _make_pod_envconfig(
+    config: Dict[str, Any], relation_state: Dict[str, Any]
+) -> Dict[str, Any]:
+    """Generate pod environment configuration.
+
+    Args:
+        config (Dict[str, Any]): configuration information.
+        relation_state (Dict[str, Any]): relation state information.
+
+    Returns:
+        Dict[str, Any]: pod environment configuration.
+    """
+    envconfig = {}
+
+    return envconfig
+
+
+def _make_pod_ingress_resources(
+    config: Dict[str, Any], app_name: str, port: int
+) -> List[Dict[str, Any]]:
+    """Generate pod ingress resources.
+
+    Args:
+        config (Dict[str, Any]): configuration information.
+        app_name (str): application name.
+        port (int): port to expose.
+
+    Returns:
+        List[Dict[str, Any]]: pod ingress resources.
+    """
+    site_url = config.get("site_url")
+
+    if not site_url:
+        return
+
+    parsed = urlparse(site_url)
+
+    if not parsed.scheme.startswith("http"):
+        return
+
+    ingress_whitelist_source_range = config["ingress_whitelist_source_range"]
+
+    annotations = {}
+
+    if ingress_whitelist_source_range:
+        annotations[
+            "nginx.ingress.kubernetes.io/whitelist-source-range"
+        ] = ingress_whitelist_source_range
+
+    ingress_spec_tls = None
+
+    if parsed.scheme == "https":
+        ingress_spec_tls = [{"hosts": [parsed.hostname]}]
+        tls_secret_name = config["tls_secret_name"]
+        if tls_secret_name:
+            ingress_spec_tls[0]["secretName"] = tls_secret_name
+    else:
+        annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false"
+
+    ingress = {
+        "name": "{}-ingress".format(app_name),
+        "annotations": annotations,
+        "spec": {
+            "rules": [
+                {
+                    "host": parsed.hostname,
+                    "http": {
+                        "paths": [
+                            {
+                                "path": "/",
+                                "backend": {
+                                    "serviceName": app_name,
+                                    "servicePort": port,
+                                },
+                            }
+                        ]
+                    },
+                }
+            ]
+        },
+    }
+    if ingress_spec_tls:
+        ingress["spec"]["tls"] = ingress_spec_tls
+
+    return [ingress]
+
+
+def _make_readiness_probe(port: int) -> Dict[str, Any]:
+    """Generate readiness probe.
+
+    Args:
+        port (int): service port.
+
+    Returns:
+        Dict[str, Any]: readiness probe.
+    """
+    return {
+        "httpGet": {
+            "path": "/api/health",
+            "port": port,
+        },
+        "initialDelaySeconds": 10,
+        "periodSeconds": 10,
+        "timeoutSeconds": 5,
+        "successThreshold": 1,
+        "failureThreshold": 3,
+    }
+
+
+def _make_liveness_probe(port: int) -> Dict[str, Any]:
+    """Generate liveness probe.
+
+    Args:
+        port (int): service port.
+
+    Returns:
+        Dict[str, Any]: liveness probe.
+    """
+    return {
+        "httpGet": {
+            "path": "/api/health",
+            "port": port,
+        },
+        "initialDelaySeconds": 60,
+        "timeoutSeconds": 30,
+        "failureThreshold": 10,
+    }
+
+
+def _make_pod_command(relation: Dict[str, Any]) -> List[str]:
+    """Generate the startup command.
+
+    Args:
+        relation (Dict[str, Any]): Relation information.
+
+    Returns:
+        List[str]: command to startup the process.
+    """
+    command = [
+        "kafka-exporter",
+        "--kafka.server={}:{}".format(
+            relation.get("kafka_host"), relation.get("kafka_port")
+        ),
+    ]
+
+    return command
+
+
+def make_pod_spec(
+    image_info: Dict[str, str],
+    config: Dict[str, Any],
+    relation_state: Dict[str, Any],
+    app_name: str = "prometheus-kafka-exporter",
+    port: int = 9308,
+) -> Dict[str, Any]:
+    """Generate the pod spec information.
+
+    Args:
+        image_info (Dict[str, str]): Object provided by
+                                     OCIImageResource("image").fetch().
+        config (Dict[str, Any]): Configuration information.
+        relation_state (Dict[str, Any]): Relation state information.
+        app_name (str, optional): Application name. Defaults to "ro".
+        port (int, optional): Port for the container. Defaults to 9090.
+
+    Returns:
+        Dict[str, Any]: Pod spec dictionary for the charm.
+    """
+    if not image_info:
+        return None
+
+    _validate_data(config, relation_state)
+
+    ports = _make_pod_ports(port)
+    env_config = _make_pod_envconfig(config, relation_state)
+    readiness_probe = _make_readiness_probe(port)
+    liveness_probe = _make_liveness_probe(port)
+    ingress_resources = _make_pod_ingress_resources(config, app_name, port)
+    command = _make_pod_command(relation_state)
+
+    return {
+        "version": 3,
+        "containers": [
+            {
+                "name": app_name,
+                "imageDetails": image_info,
+                "imagePullPolicy": "Always",
+                "ports": ports,
+                "envConfig": env_config,
+                "command": command,
+                "kubernetes": {
+                    "readinessProbe": readiness_probe,
+                    "livenessProbe": liveness_probe,
+                },
+            }
+        ],
+        "kubernetesResources": {
+            "ingressResources": ingress_resources or [],
+        },
+    }
diff --git a/installers/charm/prometheus-kafka-exporter/tests/__init__.py b/installers/charm/prometheus-kafka-exporter/tests/__init__.py
new file mode 100644 (file)
index 0000000..4fd849a
--- /dev/null
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+"""Init mocking for unit tests."""
+
+import sys
+import mock
+
+sys.path.append("src")
+
+oci_image = mock.MagicMock()
+sys.modules["oci_image"] = oci_image
diff --git a/installers/charm/prometheus-kafka-exporter/tests/test_charm.py b/installers/charm/prometheus-kafka-exporter/tests/test_charm.py
new file mode 100644 (file)
index 0000000..6b50874
--- /dev/null
@@ -0,0 +1,469 @@
+#!/usr/bin/env python3
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+from typing import NoReturn
+import unittest
+
+from ops.model import BlockedStatus
+from ops.testing import Harness
+
+from charm import PrometheusKafkaExporterCharm
+
+
+class TestCharm(unittest.TestCase):
+    """Prometheus Kafka Exporter Charm unit tests."""
+
+    def setUp(self) -> NoReturn:
+        """Test setup"""
+        self.harness = Harness(PrometheusKafkaExporterCharm)
+        self.harness.set_leader(is_leader=True)
+        self.harness.begin()
+
+    def test_on_start_without_relations(self) -> NoReturn:
+        """Test installation without any relation."""
+        self.harness.charm.on.start.emit()
+
+        # Verifying status
+        self.assertIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+        # Verifying status message
+        self.assertGreater(len(self.harness.charm.unit.status.message), 0)
+        self.assertTrue(
+            self.harness.charm.unit.status.message.startswith("Waiting for ")
+        )
+        self.assertIn("kafka", self.harness.charm.unit.status.message)
+        self.assertTrue(self.harness.charm.unit.status.message.endswith(" relation"))
+
+    def test_on_start_with_relations_without_http(self) -> NoReturn:
+        """Test deployment."""
+        expected_result = {
+            "version": 3,
+            "containers": [
+                {
+                    "name": "prometheus-kafka-exporter",
+                    "imageDetails": self.harness.charm.image.fetch(),
+                    "imagePullPolicy": "Always",
+                    "ports": [
+                        {
+                            "name": "prometheus-kafka-exporter",
+                            "containerPort": 9308,
+                            "protocol": "TCP",
+                        }
+                    ],
+                    "envConfig": {},
+                    "command": ["kafka-exporter", "--kafka.server=kafka:9090"],
+                    "kubernetes": {
+                        "readinessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": 9308,
+                            },
+                            "initialDelaySeconds": 10,
+                            "periodSeconds": 10,
+                            "timeoutSeconds": 5,
+                            "successThreshold": 1,
+                            "failureThreshold": 3,
+                        },
+                        "livenessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": 9308,
+                            },
+                            "initialDelaySeconds": 60,
+                            "timeoutSeconds": 30,
+                            "failureThreshold": 10,
+                        },
+                    },
+                },
+            ],
+            "kubernetesResources": {"ingressResources": []},
+        }
+
+        self.harness.charm.on.start.emit()
+
+        # Initializing the kafka relation
+        relation_id = self.harness.add_relation("kafka", "kafka")
+        self.harness.add_relation_unit(relation_id, "kafka/0")
+        self.harness.update_relation_data(
+            relation_id,
+            "kafka/0",
+            {
+                "host": "kafka",
+                "port": "9090",
+            },
+        )
+
+        # Verifying status
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+        pod_spec, _ = self.harness.get_pod_spec()
+
+        self.assertDictEqual(expected_result, pod_spec)
+
+    def test_ingress_resources_with_http(self) -> NoReturn:
+        """Test ingress resources with HTTP."""
+        expected_result = {
+            "version": 3,
+            "containers": [
+                {
+                    "name": "prometheus-kafka-exporter",
+                    "imageDetails": self.harness.charm.image.fetch(),
+                    "imagePullPolicy": "Always",
+                    "ports": [
+                        {
+                            "name": "prometheus-kafka-exporter",
+                            "containerPort": 9308,
+                            "protocol": "TCP",
+                        }
+                    ],
+                    "envConfig": {},
+                    "command": ["kafka-exporter", "--kafka.server=kafka:9090"],
+                    "kubernetes": {
+                        "readinessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": 9308,
+                            },
+                            "initialDelaySeconds": 10,
+                            "periodSeconds": 10,
+                            "timeoutSeconds": 5,
+                            "successThreshold": 1,
+                            "failureThreshold": 3,
+                        },
+                        "livenessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": 9308,
+                            },
+                            "initialDelaySeconds": 60,
+                            "timeoutSeconds": 30,
+                            "failureThreshold": 10,
+                        },
+                    },
+                },
+            ],
+            "kubernetesResources": {
+                "ingressResources": [
+                    {
+                        "name": "prometheus-kafka-exporter-ingress",
+                        "annotations": {
+                            "nginx.ingress.kubernetes.io/ssl-redirect": "false",
+                        },
+                        "spec": {
+                            "rules": [
+                                {
+                                    "host": "prometheus-kafka-exporter",
+                                    "http": {
+                                        "paths": [
+                                            {
+                                                "path": "/",
+                                                "backend": {
+                                                    "serviceName": "prometheus-kafka-exporter",
+                                                    "servicePort": 9308,
+                                                },
+                                            }
+                                        ]
+                                    },
+                                }
+                            ]
+                        },
+                    }
+                ],
+            },
+        }
+
+        self.harness.charm.on.start.emit()
+
+        # Initializing the kafka relation
+        relation_id = self.harness.add_relation("kafka", "kafka")
+        self.harness.add_relation_unit(relation_id, "kafka/0")
+        self.harness.update_relation_data(
+            relation_id,
+            "kafka/0",
+            {
+                "host": "kafka",
+                "port": "9090",
+            },
+        )
+
+        self.harness.update_config({"site_url": "http://prometheus-kafka-exporter"})
+
+        pod_spec, _ = self.harness.get_pod_spec()
+
+        self.assertDictEqual(expected_result, pod_spec)
+
+    def test_ingress_resources_with_https(self) -> NoReturn:
+        """Test ingress resources with HTTPS."""
+        expected_result = {
+            "version": 3,
+            "containers": [
+                {
+                    "name": "prometheus-kafka-exporter",
+                    "imageDetails": self.harness.charm.image.fetch(),
+                    "imagePullPolicy": "Always",
+                    "ports": [
+                        {
+                            "name": "prometheus-kafka-exporter",
+                            "containerPort": 9308,
+                            "protocol": "TCP",
+                        }
+                    ],
+                    "envConfig": {},
+                    "command": ["kafka-exporter", "--kafka.server=kafka:9090"],
+                    "kubernetes": {
+                        "readinessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": 9308,
+                            },
+                            "initialDelaySeconds": 10,
+                            "periodSeconds": 10,
+                            "timeoutSeconds": 5,
+                            "successThreshold": 1,
+                            "failureThreshold": 3,
+                        },
+                        "livenessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": 9308,
+                            },
+                            "initialDelaySeconds": 60,
+                            "timeoutSeconds": 30,
+                            "failureThreshold": 10,
+                        },
+                    },
+                },
+            ],
+            "kubernetesResources": {
+                "ingressResources": [
+                    {
+                        "name": "prometheus-kafka-exporter-ingress",
+                        "annotations": {},
+                        "spec": {
+                            "rules": [
+                                {
+                                    "host": "prometheus-kafka-exporter",
+                                    "http": {
+                                        "paths": [
+                                            {
+                                                "path": "/",
+                                                "backend": {
+                                                    "serviceName": "prometheus-kafka-exporter",
+                                                    "servicePort": 9308,
+                                                },
+                                            }
+                                        ]
+                                    },
+                                }
+                            ],
+                            "tls": [
+                                {
+                                    "hosts": ["prometheus-kafka-exporter"],
+                                    "secretName": "prometheus-kafka-exporter",
+                                }
+                            ],
+                        },
+                    }
+                ],
+            },
+        }
+
+        self.harness.charm.on.start.emit()
+
+        # Initializing the kafka relation
+        relation_id = self.harness.add_relation("kafka", "kafka")
+        self.harness.add_relation_unit(relation_id, "kafka/0")
+        self.harness.update_relation_data(
+            relation_id,
+            "kafka/0",
+            {
+                "host": "kafka",
+                "port": "9090",
+            },
+        )
+
+        self.harness.update_config(
+            {
+                "site_url": "https://prometheus-kafka-exporter",
+                "tls_secret_name": "prometheus-kafka-exporter",
+            }
+        )
+
+        pod_spec, _ = self.harness.get_pod_spec()
+
+        self.assertDictEqual(expected_result, pod_spec)
+
+    def test_ingress_resources_with_https_and_ingress_whitelist(self) -> NoReturn:
+        """Test ingress resources with HTTPS and ingress whitelist."""
+        expected_result = {
+            "version": 3,
+            "containers": [
+                {
+                    "name": "prometheus-kafka-exporter",
+                    "imageDetails": self.harness.charm.image.fetch(),
+                    "imagePullPolicy": "Always",
+                    "ports": [
+                        {
+                            "name": "prometheus-kafka-exporter",
+                            "containerPort": 9308,
+                            "protocol": "TCP",
+                        }
+                    ],
+                    "envConfig": {},
+                    "command": ["kafka-exporter", "--kafka.server=kafka:9090"],
+                    "kubernetes": {
+                        "readinessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": 9308,
+                            },
+                            "initialDelaySeconds": 10,
+                            "periodSeconds": 10,
+                            "timeoutSeconds": 5,
+                            "successThreshold": 1,
+                            "failureThreshold": 3,
+                        },
+                        "livenessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": 9308,
+                            },
+                            "initialDelaySeconds": 60,
+                            "timeoutSeconds": 30,
+                            "failureThreshold": 10,
+                        },
+                    },
+                },
+            ],
+            "kubernetesResources": {
+                "ingressResources": [
+                    {
+                        "name": "prometheus-kafka-exporter-ingress",
+                        "annotations": {
+                            "nginx.ingress.kubernetes.io/whitelist-source-range": "0.0.0.0/0",
+                        },
+                        "spec": {
+                            "rules": [
+                                {
+                                    "host": "prometheus-kafka-exporter",
+                                    "http": {
+                                        "paths": [
+                                            {
+                                                "path": "/",
+                                                "backend": {
+                                                    "serviceName": "prometheus-kafka-exporter",
+                                                    "servicePort": 9308,
+                                                },
+                                            }
+                                        ]
+                                    },
+                                }
+                            ],
+                            "tls": [
+                                {
+                                    "hosts": ["prometheus-kafka-exporter"],
+                                    "secretName": "prometheus-kafka-exporter",
+                                }
+                            ],
+                        },
+                    }
+                ],
+            },
+        }
+
+        self.harness.charm.on.start.emit()
+
+        # Initializing the kafka relation
+        relation_id = self.harness.add_relation("kafka", "kafka")
+        self.harness.add_relation_unit(relation_id, "kafka/0")
+        self.harness.update_relation_data(
+            relation_id,
+            "kafka/0",
+            {
+                "host": "kafka",
+                "port": "9090",
+            },
+        )
+
+        self.harness.update_config(
+            {
+                "site_url": "https://prometheus-kafka-exporter",
+                "tls_secret_name": "prometheus-kafka-exporter",
+                "ingress_whitelist_source_range": "0.0.0.0/0",
+            }
+        )
+
+        pod_spec, _ = self.harness.get_pod_spec()
+
+        self.assertDictEqual(expected_result, pod_spec)
+
+    def test_on_kafka_unit_relation_changed(self) -> NoReturn:
+        """Test to see if kafka relation is updated."""
+        self.harness.charm.on.start.emit()
+
+        relation_id = self.harness.add_relation("kafka", "kafka")
+        self.harness.add_relation_unit(relation_id, "kafka/0")
+        self.harness.update_relation_data(
+            relation_id,
+            "kafka/0",
+            {
+                "host": "kafka",
+                "port": "9090",
+            },
+        )
+
+        # Verifying status
+        self.assertNotIsInstance(self.harness.charm.unit.status, BlockedStatus)
+
+    def test_publish_target_info(self) -> NoReturn:
+        """Test to see if target relation is updated."""
+        expected_result = {
+            "hostname": "prometheus-kafka-exporter",
+            "port": "9308",
+        }
+
+        self.harness.charm.on.start.emit()
+
+        relation_id = self.harness.add_relation("prometheus-target", "prometheus")
+        self.harness.add_relation_unit(relation_id, "prometheus/0")
+        relation_data = self.harness.get_relation_data(
+            relation_id, "prometheus-kafka-exporter/0"
+        )
+
+        self.assertDictEqual(expected_result, relation_data)
+
+    def test_publish_dashboard_info(self) -> NoReturn:
+        """Test to see if dashboard relation is updated."""
+        self.harness.charm.on.start.emit()
+
+        relation_id = self.harness.add_relation("grafana-dashboard", "grafana")
+        self.harness.add_relation_unit(relation_id, "grafana/0")
+        relation_data = self.harness.get_relation_data(
+            relation_id, "prometheus-kafka-exporter/0"
+        )
+
+        self.assertTrue("dashboard" in relation_data)
+        self.assertTrue(len(relation_data["dashboard"]) > 0)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/installers/charm/prometheus-kafka-exporter/tests/test_pod_spec.py b/installers/charm/prometheus-kafka-exporter/tests/test_pod_spec.py
new file mode 100644 (file)
index 0000000..b1251f8
--- /dev/null
@@ -0,0 +1,498 @@
+#!/usr/bin/env python3
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+from typing import NoReturn
+import unittest
+
+import pod_spec
+
+
+class TestPodSpec(unittest.TestCase):
+    """Pod spec unit tests."""
+
+    def test_make_pod_ports(self) -> NoReturn:
+        """Testing make pod ports."""
+        port = 9308
+
+        expected_result = [
+            {
+                "name": "prometheus-kafka-exporter",
+                "containerPort": port,
+                "protocol": "TCP",
+            }
+        ]
+
+        pod_ports = pod_spec._make_pod_ports(port)
+
+        self.assertListEqual(expected_result, pod_ports)
+
+    def test_make_pod_envconfig(self) -> NoReturn:
+        """Teting make pod envconfig."""
+        config = {}
+        relation_state = {}
+
+        expected_result = {}
+
+        pod_envconfig = pod_spec._make_pod_envconfig(config, relation_state)
+
+        self.assertDictEqual(expected_result, pod_envconfig)
+
+    def test_make_pod_ingress_resources_without_site_url(self) -> NoReturn:
+        """Testing make pod ingress resources without site_url."""
+        config = {"site_url": ""}
+        app_name = "prometheus-kafka-exporter"
+        port = 9308
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertIsNone(pod_ingress_resources)
+
+    def test_make_pod_ingress_resources(self) -> NoReturn:
+        """Testing make pod ingress resources."""
+        config = {
+            "site_url": "http://prometheus-kafka-exporter",
+            "ingress_whitelist_source_range": "",
+        }
+        app_name = "prometheus-kafka-exporter"
+        port = 9308
+
+        expected_result = [
+            {
+                "name": f"{app_name}-ingress",
+                "annotations": {
+                    "nginx.ingress.kubernetes.io/ssl-redirect": "false",
+                },
+                "spec": {
+                    "rules": [
+                        {
+                            "host": app_name,
+                            "http": {
+                                "paths": [
+                                    {
+                                        "path": "/",
+                                        "backend": {
+                                            "serviceName": app_name,
+                                            "servicePort": port,
+                                        },
+                                    }
+                                ]
+                            },
+                        }
+                    ]
+                },
+            }
+        ]
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertListEqual(expected_result, pod_ingress_resources)
+
+    def test_make_pod_ingress_resources_with_whitelist_source_range(self) -> NoReturn:
+        """Testing make pod ingress resources with whitelist_source_range."""
+        config = {
+            "site_url": "http://prometheus-kafka-exporter",
+            "ingress_whitelist_source_range": "0.0.0.0/0",
+        }
+        app_name = "prometheus-kafka-exporter"
+        port = 9308
+
+        expected_result = [
+            {
+                "name": f"{app_name}-ingress",
+                "annotations": {
+                    "nginx.ingress.kubernetes.io/ssl-redirect": "false",
+                    "nginx.ingress.kubernetes.io/whitelist-source-range": config[
+                        "ingress_whitelist_source_range"
+                    ],
+                },
+                "spec": {
+                    "rules": [
+                        {
+                            "host": app_name,
+                            "http": {
+                                "paths": [
+                                    {
+                                        "path": "/",
+                                        "backend": {
+                                            "serviceName": app_name,
+                                            "servicePort": port,
+                                        },
+                                    }
+                                ]
+                            },
+                        }
+                    ]
+                },
+            }
+        ]
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertListEqual(expected_result, pod_ingress_resources)
+
+    def test_make_pod_ingress_resources_with_https(self) -> NoReturn:
+        """Testing make pod ingress resources with HTTPs."""
+        config = {
+            "site_url": "https://prometheus-kafka-exporter",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "",
+            "tls_secret_name": "",
+        }
+        app_name = "prometheus-kafka-exporter"
+        port = 9308
+
+        expected_result = [
+            {
+                "name": f"{app_name}-ingress",
+                "annotations": {},
+                "spec": {
+                    "rules": [
+                        {
+                            "host": app_name,
+                            "http": {
+                                "paths": [
+                                    {
+                                        "path": "/",
+                                        "backend": {
+                                            "serviceName": app_name,
+                                            "servicePort": port,
+                                        },
+                                    }
+                                ]
+                            },
+                        }
+                    ],
+                    "tls": [{"hosts": [app_name]}],
+                },
+            }
+        ]
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertListEqual(expected_result, pod_ingress_resources)
+
+    def test_make_pod_ingress_resources_with_https_tls_secret_name(self) -> NoReturn:
+        """Testing make pod ingress resources with HTTPs and TLS secret name."""
+        config = {
+            "site_url": "https://prometheus-kafka-exporter",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "",
+            "tls_secret_name": "secret_name",
+        }
+        app_name = "prometheus-kafka-exporter"
+        port = 9308
+
+        expected_result = [
+            {
+                "name": f"{app_name}-ingress",
+                "annotations": {},
+                "spec": {
+                    "rules": [
+                        {
+                            "host": app_name,
+                            "http": {
+                                "paths": [
+                                    {
+                                        "path": "/",
+                                        "backend": {
+                                            "serviceName": app_name,
+                                            "servicePort": port,
+                                        },
+                                    }
+                                ]
+                            },
+                        }
+                    ],
+                    "tls": [
+                        {"hosts": [app_name], "secretName": config["tls_secret_name"]}
+                    ],
+                },
+            }
+        ]
+
+        pod_ingress_resources = pod_spec._make_pod_ingress_resources(
+            config, app_name, port
+        )
+
+        self.assertListEqual(expected_result, pod_ingress_resources)
+
+    def test_make_readiness_probe(self) -> NoReturn:
+        """Testing make readiness probe."""
+        port = 9308
+
+        expected_result = {
+            "httpGet": {
+                "path": "/api/health",
+                "port": port,
+            },
+            "initialDelaySeconds": 10,
+            "periodSeconds": 10,
+            "timeoutSeconds": 5,
+            "successThreshold": 1,
+            "failureThreshold": 3,
+        }
+
+        readiness_probe = pod_spec._make_readiness_probe(port)
+
+        self.assertDictEqual(expected_result, readiness_probe)
+
+    def test_make_liveness_probe(self) -> NoReturn:
+        """Testing make liveness probe."""
+        port = 9308
+
+        expected_result = {
+            "httpGet": {
+                "path": "/api/health",
+                "port": port,
+            },
+            "initialDelaySeconds": 60,
+            "timeoutSeconds": 30,
+            "failureThreshold": 10,
+        }
+
+        liveness_probe = pod_spec._make_liveness_probe(port)
+
+        self.assertDictEqual(expected_result, liveness_probe)
+
+    def test_make_pod_command(self) -> NoReturn:
+        """Testing make pod command."""
+        relation = {
+            "kakfa_host": "kafka",
+            "kafka_port": "9090",
+        }
+
+        expected_result = [
+            "kafka-exporter",
+            "--kafka.server={}:{}".format(
+                relation.get("kafka_host"), relation.get("kafka_port")
+            ),
+        ]
+
+        pod_envconfig = pod_spec._make_pod_command(relation)
+
+        self.assertListEqual(expected_result, pod_envconfig)
+
+    def test_make_pod_spec(self) -> NoReturn:
+        """Testing make pod spec."""
+        image_info = {"upstream-source": "bitnami/kafka-exporter:latest"}
+        config = {
+            "site_url": "",
+        }
+        relation_state = {
+            "kafka_host": "kafka",
+            "kafka_port": "9090",
+        }
+        app_name = "prometheus-kafka-exporter"
+        port = 9308
+
+        expected_result = {
+            "version": 3,
+            "containers": [
+                {
+                    "name": app_name,
+                    "imageDetails": image_info,
+                    "imagePullPolicy": "Always",
+                    "ports": [
+                        {
+                            "name": app_name,
+                            "containerPort": port,
+                            "protocol": "TCP",
+                        }
+                    ],
+                    "envConfig": {},
+                    "command": ["kafka-exporter", "--kafka.server=kafka:9090"],
+                    "kubernetes": {
+                        "readinessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": port,
+                            },
+                            "initialDelaySeconds": 10,
+                            "periodSeconds": 10,
+                            "timeoutSeconds": 5,
+                            "successThreshold": 1,
+                            "failureThreshold": 3,
+                        },
+                        "livenessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": port,
+                            },
+                            "initialDelaySeconds": 60,
+                            "timeoutSeconds": 30,
+                            "failureThreshold": 10,
+                        },
+                    },
+                }
+            ],
+            "kubernetesResources": {"ingressResources": []},
+        }
+
+        spec = pod_spec.make_pod_spec(
+            image_info, config, relation_state, app_name, port
+        )
+
+        self.assertDictEqual(expected_result, spec)
+
+    def test_make_pod_spec_with_ingress(self) -> NoReturn:
+        """Testing make pod spec."""
+        image_info = {"upstream-source": "bitnami/kafka-exporter:latest"}
+        config = {
+            "site_url": "https://prometheus-kafka-exporter",
+            "tls_secret_name": "prometheus-kafka-exporter",
+            "max_file_size": 0,
+            "ingress_whitelist_source_range": "0.0.0.0/0",
+        }
+        relation_state = {
+            "kafka_host": "kafka",
+            "kafka_port": "9090",
+        }
+        app_name = "prometheus-kafka-exporter"
+        port = 9308
+
+        expected_result = {
+            "version": 3,
+            "containers": [
+                {
+                    "name": app_name,
+                    "imageDetails": image_info,
+                    "imagePullPolicy": "Always",
+                    "ports": [
+                        {
+                            "name": app_name,
+                            "containerPort": port,
+                            "protocol": "TCP",
+                        }
+                    ],
+                    "envConfig": {},
+                    "command": ["kafka-exporter", "--kafka.server=kafka:9090"],
+                    "kubernetes": {
+                        "readinessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": port,
+                            },
+                            "initialDelaySeconds": 10,
+                            "periodSeconds": 10,
+                            "timeoutSeconds": 5,
+                            "successThreshold": 1,
+                            "failureThreshold": 3,
+                        },
+                        "livenessProbe": {
+                            "httpGet": {
+                                "path": "/api/health",
+                                "port": port,
+                            },
+                            "initialDelaySeconds": 60,
+                            "timeoutSeconds": 30,
+                            "failureThreshold": 10,
+                        },
+                    },
+                }
+            ],
+            "kubernetesResources": {
+                "ingressResources": [
+                    {
+                        "name": "{}-ingress".format(app_name),
+                        "annotations": {
+                            "nginx.ingress.kubernetes.io/whitelist-source-range": config.get(
+                                "ingress_whitelist_source_range"
+                            ),
+                        },
+                        "spec": {
+                            "rules": [
+                                {
+                                    "host": app_name,
+                                    "http": {
+                                        "paths": [
+                                            {
+                                                "path": "/",
+                                                "backend": {
+                                                    "serviceName": app_name,
+                                                    "servicePort": port,
+                                                },
+                                            }
+                                        ]
+                                    },
+                                }
+                            ],
+                            "tls": [
+                                {
+                                    "hosts": [app_name],
+                                    "secretName": config.get("tls_secret_name"),
+                                }
+                            ],
+                        },
+                    }
+                ],
+            },
+        }
+
+        spec = pod_spec.make_pod_spec(
+            image_info, config, relation_state, app_name, port
+        )
+
+        self.assertDictEqual(expected_result, spec)
+
+    def test_make_pod_spec_without_image_info(self) -> NoReturn:
+        """Testing make pod spec without image_info."""
+        image_info = None
+        config = {
+            "site_url": "",
+        }
+        relation_state = {
+            "kafka_host": "kafka",
+            "kafka_port": "9090",
+        }
+        app_name = "prometheus-kafka-exporter"
+        port = 9308
+
+        spec = pod_spec.make_pod_spec(
+            image_info, config, relation_state, app_name, port
+        )
+
+        self.assertIsNone(spec)
+
+    def test_make_pod_spec_without_relation_state(self) -> NoReturn:
+        """Testing make pod spec without relation_state."""
+        image_info = {"upstream-source": "bitnami/kafka-exporter:latest"}
+        config = {
+            "site_url": "",
+        }
+        relation_state = {}
+        app_name = "prometheus-kafka-exporter"
+        port = 9308
+
+        with self.assertRaises(ValueError):
+            pod_spec.make_pod_spec(image_info, config, relation_state, app_name, port)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/installers/charm/prometheus-kafka-exporter/tox.ini b/installers/charm/prometheus-kafka-exporter/tox.ini
new file mode 100644 (file)
index 0000000..cf77aa6
--- /dev/null
@@ -0,0 +1,81 @@
+# Copyright 2021 Canonical Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact: legal@canonical.com
+#
+# To get in touch with the maintainers, please contact:
+# osm-charmers@lists.launchpad.net
+##
+
+[tox]
+skipsdist = True
+envlist = unit, lint
+sitepackages = False
+skip_missing_interpreters = False
+
+[testenv]
+basepython = python3
+setenv =
+  PYTHONHASHSEED=0
+  PYTHONPATH = {toxinidir}/src
+  CHARM_NAME = prometheus-kafka-exporter
+
+[testenv:build]
+passenv=HTTP_PROXY HTTPS_PROXY NO_PROXY
+whitelist_externals =
+  charmcraft
+  rm
+  unzip
+commands =
+  rm -rf release grafana.charm
+  charmcraft build
+  unzip prometheus-kafka-exporter.charm -d release
+
+[testenv:unit]
+commands =
+  coverage erase
+  stestr run --slowest --test-path=./tests --top-dir=./
+  coverage combine
+  coverage html -d cover
+  coverage xml -o cover/coverage.xml
+  coverage report
+deps =
+  coverage
+  stestr
+  mock
+  ops
+setenv =
+  {[testenv]setenv}
+  PYTHON=coverage run
+
+[testenv:lint]
+deps =
+  black
+  yamllint
+  flake8
+commands =
+  black --check --diff . --exclude "build/|.tox/|mod/|lib/"
+  yamllint .
+  flake8 . --max-line-length=100 --ignore="E501,W503,W504,F722" --exclude "build/ .tox/ mod/ lib/"
+
+[coverage:run]
+branch = True
+concurrency = multiprocessing
+parallel = True
+source =
+  .
+omit =
+  .tox/*
+  tests/*