Moves Prometheus installation to OSM Core for it to be available for MON Collector.
Sets Prometheus version to v2.4.3
Copies prometheus.yml file to the working dir
Modifies configurations in Prometheus to leverage new MON exporter.
Modifies OSM Sample Dashboard at Grafana to point to new metric names.
Removes "Kafka Exporter" container (previous workaround to get metrics from bus)
Signed-off-by: lavado <glavado@whitestack.com>
Change-Id: I15451f717d9ee1cfbdee266a84ed29398360ed94
- netOSM
volumes:
- mongo_db:/data/db
+ prometheus:
+ image: prom/prometheus:${PROMETHEUS_TAG:-latest}
+ hostname: prometheus
+ ports:
+ - "${OSM_PROM_PORTS:-9091:9090}"
+ volumes:
+ - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
+ networks:
+ - netOSM
keystone-db:
image: mariadb:10
networks:
external:
name: ${OSM_NETWORK:-netOSM}
services:
- kafka-exporter:
- image: osm/kafka-exporter
- hostname: kafka-exporter
- ports:
- - "12340:12340"
- networks:
- - netOSM
- prometheus:
- image: prom/prometheus
- hostname: prometheus
- volumes:
- - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
- networks:
- - netOSM
- depends_on:
- - kafka-exporter
grafana:
image: grafana/grafana
volumes:
- "${OSM_PM_PORTS:-3000:3000}"
networks:
- netOSM
- depends_on:
- - prometheus
+++ /dev/null
-FROM phusion/baseimage
-MAINTAINER Gianpietro Lavado "glavado@whitestack.com"
-EXPOSE 12340
-RUN apt-get update && apt-get install -y python python-pip default-jdk maven git
-RUN pip install pyaml && pip install kafka
-RUN mkdir -p /kafka-topic-exporter/config
-RUN cd /tmp && git clone https://github.com/ogibayashi/kafka-topic-exporter.git && cd /tmp/kafka-topic-exporter/ && git checkout v0.0.5 && mvn install
-RUN cp /tmp/kafka-topic-exporter/target/kafka-topic-exporter-0.0.5-jar-with-dependencies.jar /kafka-topic-exporter/
-RUN rm -rf /tmp/kafka-topic-exporter && apt-get remove -y maven git
-COPY kafka-topic-exporter.properties /kafka-topic-exporter/config/kafka-topic-exporter.properties
-COPY mon_to_kafka_exporter.py /kafka-topic-exporter/mon_to_kafka_exporter.py
-COPY initscript.sh /kafka-topic-exporter/initscript.sh
-WORKDIR /kafka-topic-exporter
-ENTRYPOINT ["./initscript.sh"]
+++ /dev/null
-#!/bin/bash
-
-# metric_response topic to kafka_exporter_topic
-nohup python /kafka-topic-exporter/mon_to_kafka_exporter.py kafka:9092 &
-
-# kafka_exporter_topic to prometheus web service
-java -jar /kafka-topic-exporter/kafka-topic-exporter-0.0.5-jar-with-dependencies.jar /kafka-topic-exporter/config/kafka-topic-exporter.properties
+++ /dev/null
-exporter.port=12340
-exporter.metric.expire.seconds=10
-kafka.consumer.topics=kafka_exporter_topic
-bootstrap.servers=kafka:9092
-group.id=kte-group
-client.id=kte
+++ /dev/null
-from kafka import KafkaConsumer, KafkaProducer
-from kafka.errors import KafkaError
-import logging
-import yaml
-import json
-import sys
-import re
-import datetime
-import time
-
-logging.basicConfig(stream=sys.stdout,
- format='%(asctime)s %(message)s',
- datefmt='%m/%d/%Y %I:%M:%S %p',
- level=logging.INFO)
-log = logging.getLogger(__name__)
-
-
-def main():
- if len(sys.argv) <= 1:
- print ("Usage: metric-transformer.py kafka_server")
- exit()
- kafka_server = sys.argv.pop(1)
- kafka_host = kafka_server.split(':')[0]
- kafka_port = kafka_server.split(':')[1]
- transform_messages(kafka_host=kafka_host,
- kafka_port=kafka_port)
-
-
-def transform_messages(kafka_host, kafka_port):
- bootstrap_servers = '{}:{}'.format(kafka_host, kafka_port)
- producer = KafkaProducer(bootstrap_servers=bootstrap_servers,
- key_serializer=str.encode,
- value_serializer=str.encode)
- consumer = KafkaConsumer(bootstrap_servers=bootstrap_servers,
- key_deserializer=str.encode,
- value_deserializer=str.encode)
- consumer.subscribe(["metric_response"])
- for message in consumer:
- try:
- if message.topic == "metric_response":
- if message.key == "read_metric_data_response":
- values = json.loads(message.value)
- new_msg = {
- 'name': values['metric_name'],
- 'value': values['metrics_data']['metrics_series'][-1],
- 'labels': {
- 'resource_uuid': values['resource_uuid']
- }
- }
- log.info("Message to kafka exporter: %s", new_msg)
- future = producer.send(topic='kafka_exporter_topic', key='kafka-exporter-key',
- value=json.dumps(new_msg))
- response = future.get()
- log.info("Response from Kafka: %s", response)
- except Exception as e:
- log.exception("Error processing message: ")
-
-
-if __name__ == '__main__':
- main()
-
"steppedLine": false,
"targets": [
{
- "expr": "kafka_exporter_topic_cpu_utilization",
+ "expr": "cpu_utilization",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
- "legendFormat": "{{resource_uuid}}",
+ "legendFormat": "{{vdu_name}}",
"refId": "A"
}
],
"steppedLine": false,
"targets": [
{
- "expr": "kafka_exporter_topic_average_memory_utilization",
+ "expr": "average_memory_utilization",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
- "legendFormat": "{{resource_uuid}}",
+ "legendFormat": "{{vdu_name}}",
"refId": "A"
}
],
+++ /dev/null
-# Copyright 2018 The Prometheus Authors
-# Copyright 2018 Whitestack
-# Copyright 2018 Telefonica Investigacion y Desarrollo, S.A.U.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# my global config
-global:
- scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
- evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
- # scrape_timeout is set to the global default (10s).
-
-# Alertmanager configuration
-alerting:
- alertmanagers:
- - static_configs:
- - targets:
- # - alertmanager:9093
-
-# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
-rule_files:
- # - "first_rules.yml"
- # - "second_rules.yml"
-
-# A scrape configuration containing exactly one endpoint to scrape:
-# Here it's Prometheus itself.
-scrape_configs:
- # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- - job_name: 'prometheus'
-
- # metrics_path defaults to '/metrics'
- # scheme defaults to 'http'.
-
- static_configs:
- - targets: ['kafka-exporter:12340']
--- /dev/null
+# Copyright 2018 The Prometheus Authors
+# Copyright 2018 Whitestack
+# Copyright 2018 Telefonica Investigacion y Desarrollo, S.A.U.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# my global config
+global:
+ scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
+ evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
+ # scrape_timeout is set to the global default (10s).
+
+# Alertmanager configuration
+alerting:
+ alertmanagers:
+ - static_configs:
+ - targets:
+ # - alertmanager:9093
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+ # - "first_rules.yml"
+ # - "second_rules.yml"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: ['mon:8000']
BUILD_ARGS+=(--build-arg RELEASE="$RELEASE")
BUILD_ARGS+=(--build-arg REPOSITORY_KEY="$REPOSITORY_KEY")
BUILD_ARGS+=(--build-arg REPOSITORY_BASE="$REPOSITORY_BASE")
-
+
if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q KAFKA ; then
sg docker -c "docker pull wurstmeister/zookeeper" || FATAL "cannot get zookeeper docker image"
sg docker -c "docker pull wurstmeister/kafka:${KAFKA_TAG}" || FATAL "cannot get kafka docker image"
sg docker -c "docker pull mongo" || FATAL "cannot get mongo docker image"
fi
+ if [ -z "$TO_REBUILD" ] || echo $TO_REBUILD | grep -q PROMETHEUS ; then
+ sg docker -c "docker pull prom/prometheus:${PROMETHEUS_TAG}" || FATAL "cannot get prometheus docker image"
+ fi
+
if [ -n "$PULL_IMAGES" ]; then
sg docker -c "docker pull ${DOCKER_USER}/mon:${OSM_DOCKER_TAG}" || FATAL "cannot pull MON docker image"
sg docker -c "docker pull ${DOCKER_USER}/pol:${OSM_DOCKER_TAG}" || FATAL "cannot pull POL docker image"
function generate_config_log_folders() {
echo "Generating config and log folders"
$WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/docker-compose.yaml $OSM_DOCKER_WORK_DIR/docker-compose.yaml
+ $WORKDIR_SUDO cp -b ${OSM_DEVOPS}/installers/docker/prometheus.yml $OSM_DOCKER_WORK_DIR/prometheus.yml
echo "Finished generation of config and log folders"
}
OSM_KEYSTONE_PORT=5000
OSM_UI_PORT=80
OSM_MON_PORT=8662
+ OSM_PROM_PORT=9090
+ OSM_PROM_HOSTPORT=9091
[ -n "$INSTALL_ELK" ] && OSM_ELK_PORT=5601
[ -n "$INSTALL_PERFMON" ] && OSM_PM_PORT=3000
-
+
if [ -n "$NO_HOST_PORTS" ]; then
OSM_PORTS+=(OSM_NBI_PORTS=$OSM_NBI_PORT)
OSM_PORTS+=(OSM_RO_PORTS=$OSM_RO_PORT)
OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT)
OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT)
OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT)
+ OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_PORT)
[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT)
[ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT)
else
OSM_PORTS+=(OSM_KEYSTONE_PORTS=$OSM_KEYSTONE_PORT:$OSM_KEYSTONE_PORT)
OSM_PORTS+=(OSM_UI_PORTS=$OSM_UI_PORT:$OSM_UI_PORT)
OSM_PORTS+=(OSM_MON_PORTS=$OSM_MON_PORT:$OSM_MON_PORT)
+ OSM_PORTS+=(OSM_PROM_PORTS=$OSM_PROM_HOSTPORT:$OSM_PROM_PORT)
[ -n "$INSTALL_PERFMON" ] && OSM_PORTS+=(OSM_PM_PORTS=$OSM_PM_PORT:$OSM_PM_PORT)
[ -n "$INSTALL_ELK" ] && OSM_PORTS+=(OSM_ELK_PORTS=$OSM_ELK_PORT:$OSM_ELK_PORT)
fi
echo "export TAG=${OSM_DOCKER_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
echo "export DOCKER_USER=${DOCKER_USER}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
echo "export KAFKA_TAG=${KAFKA_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
+ echo "export PROMETHEUS_TAG=${PROMETHEUS_TAG}" | $WORKDIR_SUDO tee --append $OSM_DOCKER_WORK_DIR/osm_ports.sh
OSM_DOCKER_TAG=latest
DOCKER_USER=osm
KAFKA_TAG=2.11-1.0.2
+PROMETHEUS_TAG=v2.4.3
while getopts ":hy-:b:r:k:u:R:l:p:D:o:m:H:S:s:w:t:" o; do
case "${o}" in