--- /dev/null
+version: '3'
+
+services:
+
+ elasticsearch:
+ image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.3
+ volumes:
+ - ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
+ environment:
+ ES_JAVA_OPTS: "-Xmx256m -Xms256m"
+ networks:
+ - elk
+
+ logstash:
+ image: docker.elastic.co/logstash/logstash-oss:6.2.3
+ volumes:
+ - ./logstash.yml:/usr/share/logstash/config/logstash.yml:ro
+ - ./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
+ environment:
+ LS_JAVA_OPTS: "-Xmx256m -Xms256m"
+ networks:
+ - elk
+ depends_on:
+ - elasticsearch
+
+ kibana:
+ image: docker.elastic.co/kibana/kibana-oss:6.2.3
+ volumes:
+ - ./kibana.yml:/usr/share/kibana/config/kibana.yml:ro
+ ports:
+ - "5601:5601"
+ networks:
+ - elk
+ depends_on:
+ - elasticsearch
+
+networks:
+ elk:
+ external:
+ name: netOSM
--- /dev/null
+---
+network.host: 0.0.0.0
+cluster.name: "docker-cluster"
+discovery.type: single-node
+discovery.zen.minimum_master_nodes: 1
--- /dev/null
+---
+server.host: "0"
+server.name: kibana
+elasticsearch.url: http://elasticsearch:9200
--- /dev/null
+input {
+ tcp {
+ port => 5000
+ }
+ kafka {
+ bootstrap_servers => 'kafka:9092'
+ topics => ["alarm_response", "lcm_pm"]
+ }
+}
+
+## Add your filters / logstash plugins configuration here
+
+output {
+ elasticsearch {
+ hosts => "elasticsearch:9200"
+ }
+}
--- /dev/null
+---
+path.config: /usr/share/logstash/pipeline
+http.host: "0.0.0.0"
--- /dev/null
+# # config file version
+apiVersion: 1
+
+providers:
+ - name: 'osm'
+ orgId: 1
+ folder: ''
+ type: file
+ options:
+ path: /etc/grafana/provisioning/dashboards/
--- /dev/null
+datasources:
+- access: 'proxy' # make grafana perform the requests
+ editable: true # whether it should be editable
+ is_default: true # whether this should be the default DS
+ name: 'osm_prometheus' # name of the datasource
+ org_id: 1 # id of the organization to tie this datasource to
+ type: 'prometheus' # type of the data source
+ url: 'http://prometheus:9090' # url of the prom instance
+ version: 1 # well, versioning
--- /dev/null
+version: '3'
+networks:
+ netOSM:
+ external: true
+services:
+ kafka-exporter:
+ image: osm/kafka-exporter
+ hostname: kafka-exporter
+ ports:
+ - "12340:12340"
+ networks:
+ - netOSM
+ prometheus:
+ image: prom/prometheus
+ hostname: prometheus
+ volumes:
+ - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
+ networks:
+ - netOSM
+ depends_on:
+ - kafka-exporter
+ grafana:
+ image: grafana/grafana
+ volumes:
+ - ./dashboards-osm.yml:/etc/grafana/provisioning/dashboards/dashboards-osm.yml
+ - ./osm-sample-dashboard.json:/etc/grafana/provisioning/dashboards/osm-sample-dashboard.json
+ - ./datasource-prometheus.yml:/etc/grafana/provisioning/datasources/datasource-prometheus.yml
+ hostname: grafana
+ ports:
+ - "3000:3000"
+ networks:
+ - netOSM
+ depends_on:
+ - prometheus
+
--- /dev/null
+FROM phusion/baseimage
+MAINTAINER Gianpietro Lavado "glavado@whitestack.com"
+EXPOSE 12340
+RUN apt-get update && apt-get install -y python python-pip default-jdk maven git
+RUN pip install pyaml && pip install kafka
+RUN mkdir -p /kafka-topic-exporter/config
+RUN cd /tmp && git clone https://github.com/ogibayashi/kafka-topic-exporter.git && cd /tmp/kafka-topic-exporter/ && git checkout v0.0.5 && mvn install
+RUN cp /tmp/kafka-topic-exporter/target/kafka-topic-exporter-0.0.5-jar-with-dependencies.jar /kafka-topic-exporter/
+RUN rm -rf /tmp/kafka-topic-exporter && apt-get remove -y maven git
+COPY kafka-topic-exporter.properties /kafka-topic-exporter/config/kafka-topic-exporter.properties
+COPY mon_to_kafka_exporter.py /kafka-topic-exporter/mon_to_kafka_exporter.py
+COPY initscript.sh /kafka-topic-exporter/initscript.sh
+WORKDIR /kafka-topic-exporter
+ENTRYPOINT ["./initscript.sh"]
--- /dev/null
+#!/bin/bash
+
+# metric_response topic to kafka_exporter_topic
+nohup python /kafka-topic-exporter/mon_to_kafka_exporter.py kafka:9092 &
+
+# kafka_exporter_topic to prometheus web service
+java -jar /kafka-topic-exporter/kafka-topic-exporter-0.0.5-jar-with-dependencies.jar /kafka-topic-exporter/config/kafka-topic-exporter.properties
--- /dev/null
+exporter.port=12340
+exporter.metric.expire.seconds=10
+kafka.consumer.topics=kafka_exporter_topic
+bootstrap.servers=kafka:9092
+group.id=kte-group
+client.id=kte
--- /dev/null
+from kafka import KafkaConsumer, KafkaProducer
+from kafka.errors import KafkaError
+import logging
+import yaml
+import json
+import sys
+import re
+import datetime
+import time
+
+logging.basicConfig(stream=sys.stdout,
+ format='%(asctime)s %(message)s',
+ datefmt='%m/%d/%Y %I:%M:%S %p',
+ level=logging.INFO)
+log = logging.getLogger(__name__)
+
+
+def main():
+ if len(sys.argv) <= 1:
+ print ("Usage: metric-transformer.py kafka_server")
+ exit()
+ kafka_server = sys.argv.pop(1)
+ kafka_host = kafka_server.split(':')[0]
+ kafka_port = kafka_server.split(':')[1]
+ transform_messages(kafka_host=kafka_host,
+ kafka_port=kafka_port)
+
+
+def transform_messages(kafka_host, kafka_port):
+ bootstrap_servers = '{}:{}'.format(kafka_host, kafka_port)
+ producer = KafkaProducer(bootstrap_servers=bootstrap_servers,
+ key_serializer=str.encode,
+ value_serializer=str.encode)
+ consumer = KafkaConsumer(bootstrap_servers=bootstrap_servers,
+ key_deserializer=str.encode,
+ value_deserializer=str.encode)
+ consumer.subscribe(["metric_response"])
+ for message in consumer:
+ try:
+ if message.topic == "metric_response":
+ if message.key == "read_metric_data_response":
+ values = json.loads(message.value)
+ new_msg = {
+ 'name': values['metric_name'],
+ 'value': values['metrics_data']['metrics_series'][-1],
+ 'labels': {
+ 'resource_uuid': values['resource_uuid']
+ }
+ }
+ log.info("Message to kafka exporter: %s", new_msg)
+ future = producer.send(topic='kafka_exporter_topic', key='kafka-exporter-key',
+ value=json.dumps(new_msg))
+ response = future.get()
+ log.info("Response from Kafka: %s", response)
+ except Exception as e:
+ log.exception("Error processing message: ")
+
+
+if __name__ == '__main__':
+ main()
+
--- /dev/null
+{
+ "__inputs": [
+ {
+ "name": "osm_prometheus",
+ "label": "osm_prometheus",
+ "description": "",
+ "type": "datasource",
+ "pluginId": "prometheus",
+ "pluginName": "Prometheus"
+ }
+ ],
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "5.0.4"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph",
+ "version": "5.0.0"
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "5.0.0"
+ }
+ ],
+ "annotations": {
+ "list": [
+ {
+ "builtIn": 1,
+ "datasource": "-- Grafana --",
+ "enable": true,
+ "hide": true,
+ "iconColor": "rgba(0, 211, 255, 1)",
+ "name": "Annotations & Alerts",
+ "type": "dashboard"
+ }
+ ]
+ },
+ "editable": true,
+ "gnetId": null,
+ "graphTooltip": 0,
+ "id": null,
+ "links": [],
+ "panels": [
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "osm_prometheus",
+ "fill": 1,
+ "gridPos": {
+ "h": 9,
+ "w": 24,
+ "x": 0,
+ "y": 0
+ },
+ "id": 2,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kafka_exporter_topic_cpu_utilization",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{resource_uuid}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "VDU CPU Metrics",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ },
+ {
+ "aliasColors": {},
+ "bars": false,
+ "dashLength": 10,
+ "dashes": false,
+ "datasource": "osm_prometheus",
+ "fill": 1,
+ "gridPos": {
+ "h": 10,
+ "w": 24,
+ "x": 0,
+ "y": 9
+ },
+ "id": 3,
+ "legend": {
+ "avg": false,
+ "current": false,
+ "max": false,
+ "min": false,
+ "show": true,
+ "total": false,
+ "values": false
+ },
+ "lines": true,
+ "linewidth": 1,
+ "links": [],
+ "nullPointMode": "null",
+ "percentage": false,
+ "pointradius": 5,
+ "points": false,
+ "renderer": "flot",
+ "seriesOverrides": [],
+ "spaceLength": 10,
+ "stack": false,
+ "steppedLine": false,
+ "targets": [
+ {
+ "expr": "kafka_exporter_topic_average_memory_utilization",
+ "format": "time_series",
+ "interval": "",
+ "intervalFactor": 1,
+ "legendFormat": "{{resource_uuid}}",
+ "refId": "A"
+ }
+ ],
+ "thresholds": [],
+ "timeFrom": null,
+ "timeShift": null,
+ "title": "VDU Memory Metrics",
+ "tooltip": {
+ "shared": true,
+ "sort": 0,
+ "value_type": "individual"
+ },
+ "type": "graph",
+ "xaxis": {
+ "buckets": null,
+ "mode": "time",
+ "name": null,
+ "show": true,
+ "values": []
+ },
+ "yaxes": [
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ },
+ {
+ "format": "short",
+ "label": null,
+ "logBase": 1,
+ "max": null,
+ "min": null,
+ "show": true
+ }
+ ]
+ }
+ ],
+ "schemaVersion": 16,
+ "style": "dark",
+ "tags": [],
+ "templating": {
+ "list": []
+ },
+ "time": {
+ "from": "now-6h",
+ "to": "now"
+ },
+ "timepicker": {
+ "refresh_intervals": [
+ "5s",
+ "10s",
+ "30s",
+ "1m",
+ "5m",
+ "15m",
+ "30m",
+ "1h",
+ "2h",
+ "1d"
+ ],
+ "time_options": [
+ "5m",
+ "15m",
+ "1h",
+ "6h",
+ "12h",
+ "24h",
+ "2d",
+ "7d",
+ "30d"
+ ]
+ },
+ "timezone": "",
+ "title": "OSM Sample Dashboard",
+ "uid": "x5vtPXmik",
+ "version": 1
+}
--- /dev/null
+# Copyright 2018 The Prometheus Authors
+# Copyright 2018 Whitestack
+# Copyright 2018 Telefonica Investigacion y Desarrollo, S.A.U.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# my global config
+global:
+ scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
+ evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
+ # scrape_timeout is set to the global default (10s).
+
+# Alertmanager configuration
+alerting:
+ alertmanagers:
+ - static_configs:
+ - targets:
+ # - alertmanager:9093
+
+# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
+rule_files:
+ # - "first_rules.yml"
+ # - "second_rules.yml"
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # metrics_path defaults to '/metrics'
+ # scheme defaults to 'http'.
+
+ static_configs:
+ - targets: ['kafka-exporter:12340']