Refreshing osm_metrics stack 81/6681/8
authorlavado <glavado@whitestack.com>
Wed, 10 Oct 2018 21:44:28 +0000 (23:44 +0200)
committerlavado <glavado@whitestack.com>
Fri, 19 Oct 2018 21:50:20 +0000 (16:50 -0500)
Moves Prometheus installation to OSM Core for it to be available for MON Collector.
Sets Prometheus version to v2.4.3
Copies prometheus.yml file to the working dir
Modifies configurations in Prometheus to leverage new MON exporter.
Modifies OSM Sample Dashboard at Grafana to point to new metric names.
Removes "Kafka Exporter" container (previous workaround to get metrics from bus)

Signed-off-by: lavado <glavado@whitestack.com>
Change-Id: I15451f717d9ee1cfbdee266a84ed29398360ed94

installers/docker/docker-compose.yaml
installers/docker/osm_metrics/docker-compose.yml
installers/docker/osm_metrics/kafka-exporter/Dockerfile [deleted file]
installers/docker/osm_metrics/kafka-exporter/initscript.sh [deleted file]
installers/docker/osm_metrics/kafka-exporter/kafka-topic-exporter.properties [deleted file]
installers/docker/osm_metrics/kafka-exporter/mon_to_kafka_exporter.py [deleted file]
installers/docker/osm_metrics/osm-sample-dashboard.json
installers/docker/osm_metrics/prometheus.yml [deleted file]
installers/docker/prometheus.yml [new file with mode: 0644]
installers/full_install_osm.sh

index c0ca27d..e45fabd 100644 (file)
@@ -46,6 +46,15 @@ services:
       - netOSM
     volumes:
       - mongo_db:/data/db
+  prometheus:
+    image: prom/prometheus:${PROMETHEUS_TAG:-latest}
+    hostname: prometheus
+    ports:
+      - "${OSM_PROM_PORTS:-9091:9090}"
+    volumes:
+      - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
+    networks:
+      - netOSM
   keystone-db:
     image: mariadb:10
     networks:
index f529a01..fb0e835 100644 (file)
@@ -4,22 +4,6 @@ networks:
     external:
        name: ${OSM_NETWORK:-netOSM}
 services:
-  kafka-exporter:
-    image: osm/kafka-exporter
-    hostname: kafka-exporter
-    ports:
-      - "12340:12340"
-    networks:
-      - netOSM
-  prometheus:
-    image: prom/prometheus
-    hostname: prometheus
-    volumes:
-      - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
-    networks:
-      - netOSM
-    depends_on:
-      - kafka-exporter
   grafana:
     image: grafana/grafana
     volumes:
@@ -31,5 +15,3 @@ services:
       - "${OSM_PM_PORTS:-3000:3000}"
     networks:
       - netOSM
-    depends_on:
-      - prometheus
diff --git a/installers/docker/osm_metrics/kafka-exporter/Dockerfile b/installers/docker/osm_metrics/kafka-exporter/Dockerfile
deleted file mode 100644 (file)
index c2a7ef7..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM phusion/baseimage
-MAINTAINER Gianpietro Lavado "glavado@whitestack.com"
-EXPOSE 12340
-RUN apt-get update && apt-get install -y python python-pip default-jdk maven git
-RUN pip install pyaml && pip install kafka
-RUN mkdir -p /kafka-topic-exporter/config
-RUN cd /tmp && git clone https://github.com/ogibayashi/kafka-topic-exporter.git && cd /tmp/kafka-topic-exporter/ && git checkout v0.0.5 && mvn install
-RUN cp /tmp/kafka-topic-exporter/target/kafka-topic-exporter-0.0.5-jar-with-dependencies.jar /kafka-topic-exporter/
-RUN rm -rf /tmp/kafka-topic-exporter && apt-get remove -y maven git
-COPY kafka-topic-exporter.properties /kafka-topic-exporter/config/kafka-topic-exporter.properties
-COPY mon_to_kafka_exporter.py /kafka-topic-exporter/mon_to_kafka_exporter.py
-COPY initscript.sh /kafka-topic-exporter/initscript.sh
-WORKDIR /kafka-topic-exporter
-ENTRYPOINT ["./initscript.sh"]
diff --git a/installers/docker/osm_metrics/kafka-exporter/initscript.sh b/installers/docker/osm_metrics/kafka-exporter/initscript.sh
deleted file mode 100755 (executable)
index 8bbea9e..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-
-# metric_response topic to kafka_exporter_topic
-nohup python /kafka-topic-exporter/mon_to_kafka_exporter.py kafka:9092 &
-
-# kafka_exporter_topic to prometheus web service
-java -jar /kafka-topic-exporter/kafka-topic-exporter-0.0.5-jar-with-dependencies.jar /kafka-topic-exporter/config/kafka-topic-exporter.properties
diff --git a/installers/docker/osm_metrics/kafka-exporter/kafka-topic-exporter.properties b/installers/docker/osm_metrics/kafka-exporter/kafka-topic-exporter.properties
deleted file mode 100755 (executable)
index 755f8af..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-exporter.port=12340
-exporter.metric.expire.seconds=10
-kafka.consumer.topics=kafka_exporter_topic
-bootstrap.servers=kafka:9092
-group.id=kte-group
-client.id=kte
diff --git a/installers/docker/osm_metrics/kafka-exporter/mon_to_kafka_exporter.py b/installers/docker/osm_metrics/kafka-exporter/mon_to_kafka_exporter.py
deleted file mode 100644 (file)
index e3dbf0e..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-from kafka import KafkaConsumer, KafkaProducer
-from kafka.errors import KafkaError
-import logging
-import yaml
-import json
-import sys
-import re
-import datetime
-import time
-
-logging.basicConfig(stream=sys.stdout,
-                    format='%(asctime)s %(message)s',
-                    datefmt='%m/%d/%Y %I:%M:%S %p',
-                    level=logging.INFO)
-log = logging.getLogger(__name__)
-
-
-def main():
-    if len(sys.argv) <= 1:
-        print ("Usage: metric-transformer.py kafka_server")
-        exit()
-    kafka_server = sys.argv.pop(1)
-    kafka_host = kafka_server.split(':')[0]
-    kafka_port = kafka_server.split(':')[1]
-    transform_messages(kafka_host=kafka_host,
-                       kafka_port=kafka_port)
-
-
-def transform_messages(kafka_host, kafka_port):
-    bootstrap_servers = '{}:{}'.format(kafka_host, kafka_port)
-    producer = KafkaProducer(bootstrap_servers=bootstrap_servers,
-                             key_serializer=str.encode,
-                             value_serializer=str.encode)
-    consumer = KafkaConsumer(bootstrap_servers=bootstrap_servers,
-                             key_deserializer=str.encode,
-                             value_deserializer=str.encode)
-    consumer.subscribe(["metric_response"])
-    for message in consumer:
-        try:
-            if message.topic == "metric_response":
-                if message.key == "read_metric_data_response":
-                    values = json.loads(message.value)
-                    new_msg = {
-                        'name': values['metric_name'],
-                        'value': values['metrics_data']['metrics_series'][-1],
-                        'labels': {
-                            'resource_uuid': values['resource_uuid']
-                        }
-                    }
-                    log.info("Message to kafka exporter: %s", new_msg)
-                    future = producer.send(topic='kafka_exporter_topic', key='kafka-exporter-key',
-                                           value=json.dumps(new_msg))
-                    response = future.get()
-                    log.info("Response from Kafka: %s", response)
-        except Exception as e:
-            log.exception("Error processing message: ")
-
-
-if __name__ == '__main__':
-    main()
-
index 35a165d..a640c50 100644 (file)
       "steppedLine": false,
       "targets": [
         {
-          "expr": "kafka_exporter_topic_cpu_utilization",
+          "expr": "cpu_utilization",
           "format": "time_series",
           "interval": "",
           "intervalFactor": 1,
-          "legendFormat": "{{resource_uuid}}",
+          "legendFormat": "{{vdu_name}}",
           "refId": "A"
         }
       ],
       "steppedLine": false,
       "targets": [
         {
-          "expr": "kafka_exporter_topic_average_memory_utilization",
+          "expr": "average_memory_utilization",
           "format": "time_series",
           "interval": "",
           "intervalFactor": 1,
-          "legendFormat": "{{resource_uuid}}",
+          "legendFormat": "{{vdu_name}}",
           "refId": "A"
         }
       ],
diff --git a/installers/docker/osm_metrics/prometheus.yml b/installers/docker/osm_metrics/prometheus.yml
deleted file mode 100644 (file)
index ff1e176..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2018 The Prometheus Authors
-# Copyright 2018 Whitestack
-# Copyright 2018 Telefonica Investigacion y Desarrollo, S.A.U.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# my global config
-global:
-  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
-  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
-  # scrape_timeout is set to the global default (10s).
-
-# Alertmanager configuration
-alerting:
-  alertmanagers:
-  - static_configs:
-    - targets:
-      # - alertmanager:9093
-
-# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
-rule_files:
-  # - "first_rules.yml"
-  # - "second_rules.yml"
-
-# A scrape configuration containing exactly one endpoint to scrape:
-# Here it's Prometheus itself.
-scrape_configs:
-  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
-  - job_name: 'prometheus'
-
-    # metrics_path defaults to '/metrics'
-    # scheme defaults to 'http'.
-
-    static_configs:
-    - targets: ['kafka-exporter:12340']
diff --git a/installers/docker/prometheus.yml b/installers/docker/prometheus.yml
new file mode 100644 (file)
index 0000000..2d535a0
--- /dev/null
@@ -0,0 +1,45 @@
+# Copyright 2018 The Prometheus Authors
+# Copyright 2018 Whitestack
+# Copyright 2018 Telefonica Investigacion y Desarrollo, S.A.U.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# my global config
+global:
+  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
+  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
+  # scrape_timeout is set to the global default (10s).
+
+# Alertmanager configuration
+alerting:
+  alertmanagers:
+  - static_configs:
+    - targets:
+      # - alertmanager:9093
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+  # - "first_rules.yml"
+  # - "second_rules.yml"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+  - job_name: 'prometheus'
+
+    # metrics_path defaults to '/metrics'
+    # scheme defaults to 'http'.
+
+    static_configs:
+    - targets: ['mon:8000']
index 2774e0d..5c6e253 100755 (executable)
@@ -646,7 +646,7 @@ function generate_docker_images() {
     BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
     BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
     BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
-    
+
     if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
         sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
         sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
@@ -656,6 +656,10 @@ function generate_docker_images() {
         sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
     fi
 
+    if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
+        sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
+    fi
+
     if [ -n "$PULL_IMAGES" ]; then
         sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
         sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
@@ -722,6 +726,7 @@ function cmp_overwrite() {
 function generate_config_log_folders() {
     echo "Generating config and log folders"
     $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
+    $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
     echo "Finished generation of config and log folders"
 }
 
@@ -791,15 +796,18 @@ function deploy_lightweight() {
     OSM_KEYSTONE_PORT=5000
     OSM_UI_PORT=80
     OSM_MON_PORT=8662
+    OSM_PROM_PORT=9090
+    OSM_PROM_HOSTPORT=9091
     [ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
     [ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
-    
+
     if [ -n "$NO_HOST_PORTS" ]; then
         OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
         OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
         OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
         OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
         OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
+        OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
         [ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
         [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
     else
@@ -808,6 +816,7 @@ function deploy_lightweight() {
         OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
         OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
         OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
+        OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
         [ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
         [ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
     fi
@@ -816,6 +825,7 @@ function deploy_lightweight() {
     echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
     echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
     echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
+    echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
 
 
 
@@ -1075,6 +1085,7 @@ OSM_WORK_DIR="/etc/osm"
 OSM_DOCKER_TAG=latest
 DOCKER_USER=osm
 KAFKA_TAG=2.11-1.0.2
+PROMETHEUS_TAG=v2.4.3
 
 while getopts ":hy-:b:r:k:u:R:l:p:D:o:m:H:S:s:w:t:" o; do
     case "${o}" in