# For those usages not covered by the Apache License, Version 2.0 please
# contact: bdiaz@whitestack.com or glavado@whitestack.com
##
-import json
import logging
-import random
-import uuid
-from collections import Iterable
-
-from kafka import KafkaProducer, KafkaConsumer
-from osm_common import dbmongo
-from prometheus_client.core import GaugeMetricFamily
+import multiprocessing
+import time
+from osm_mon.collector.backends.prometheus import PrometheusBackend
+from osm_mon.collector.collectors.juju import VCACollector
+from osm_mon.collector.collectors.openstack import OpenstackCollector
+from osm_mon.collector.collectors.vmware import VMwareCollector
+from osm_mon.core.common_db import CommonDbClient
+from osm_mon.core.database import DatabaseManager
from osm_mon.core.settings import Config
log = logging.getLogger(__name__)
+VIM_COLLECTORS = {
+ "openstack": OpenstackCollector,
+ "vmware": VMwareCollector
+}
+METRIC_BACKENDS = [
+ PrometheusBackend
+]
+
-class MonCollector:
+class Collector:
def __init__(self):
+ self.common_db = CommonDbClient()
+ self.plugins = []
+ self.database_manager = DatabaseManager()
+ self.database_manager.create_tables()
+ self.queue = multiprocessing.Queue()
+
+ def init_backends(self):
+ for backend in METRIC_BACKENDS:
+ self.plugins.append(backend())
+
+ def collect_forever(self):
+ log.debug('collect_forever')
cfg = Config.instance()
- self.kafka_server = cfg.BROKER_URI
- self.common_db_host = cfg.MONGO_URI.split(':')[0]
- self.common_db_port = cfg.MONGO_URI.split(':')[1]
- self.common_db = dbmongo.DbMongo()
- self.common_db.db_connect({'host': self.common_db_host, 'port': int(self.common_db_port), 'name': 'osm'})
- self.producer = KafkaProducer(bootstrap_servers=self.kafka_server,
- key_serializer=str.encode,
- value_serializer=str.encode)
- self.consumer = KafkaConsumer(bootstrap_servers=self.kafka_server,
- key_deserializer=bytes.decode,
- value_deserializer=bytes.decode,
- consumer_timeout_ms=10000,
- group_id='mon-collector-' + str(uuid.uuid4()))
- self.consumer.subscribe(['metric_response'])
+ while True:
+ try:
+ self.collect_metrics()
+ time.sleep(cfg.OSMMON_COLLECTOR_INTERVAL)
+ except Exception:
+ log.exception("Error collecting metrics")
- def collect_metrics(self) -> Iterable:
- # TODO(diazb): Remove dependencies on prometheus_client
- log.debug("collect_metrics")
- metrics = {}
- vnfrs = self.common_db.get_list('vnfrs')
+ def _collect_vim_metrics(self, vnfr: dict, vim_account_id: str):
+ # TODO(diazb) Add support for vrops and aws
+ vim_type = self.database_manager.get_vim_type(vim_account_id)
+ if vim_type in VIM_COLLECTORS:
+ collector = VIM_COLLECTORS[vim_type](vim_account_id)
+ metrics = collector.collect(vnfr)
+ for metric in metrics:
+ self.queue.put(metric)
+ else:
+ log.debug("vimtype %s is not supported.", vim_type)
+
+ def _collect_vca_metrics(self, vnfr: dict):
+ log.debug('_collect_vca_metrics')
+ log.debug('vnfr: %s', vnfr)
+ vca_collector = VCACollector()
+ metrics = vca_collector.collect(vnfr)
+ for metric in metrics:
+ self.queue.put(metric)
+
+ def collect_metrics(self):
+ vnfrs = self.common_db.get_vnfrs()
+ processes = []
for vnfr in vnfrs:
nsr_id = vnfr['nsr-id-ref']
- vnfd = self.common_db.get_one('vnfds', {"_id": vnfr['vnfd-id']})
- payloads = self._generate_metric_data_payloads(vnfr, vnfd)
- for payload in payloads:
- cor_id = payload['correlation_id']
- metric_name = payload['metric_name']
- vnf_member_index = payload['vnf_member_index']
- vdu_name = payload['vdu_name']
- self.producer.send(topic='metric_request', key='read_metric_data_request',
- value=json.dumps(payload))
- self.producer.flush()
- for message in self.consumer:
- if message.key == 'read_metric_data_response':
- content = json.loads(message.value)
- if content['correlation_id'] == cor_id:
- if len(content['metrics_data']['metrics_series']):
- metric_reading = content['metrics_data']['metrics_series'][-1]
- if metric_name not in metrics.keys():
- metrics[metric_name] = GaugeMetricFamily(
- metric_name,
- 'OSM metric',
- labels=['ns_id', 'vnf_member_index', 'vdu_name']
- )
- metrics[metric_name].add_metric([nsr_id, vnf_member_index, vdu_name],
- metric_reading)
- break
- return metrics.values()
-
- @staticmethod
- def _generate_metric_data_payloads(vnfr: dict, vnfd: dict) -> list:
- log.debug('_generate_metric_data_payloads')
- payloads = []
- nsr_id = vnfr['nsr-id-ref']
- for vdur in vnfr['vdur']:
- # This avoids errors when vdur records have not been completely filled
- if 'name' not in vdur:
- continue
- vdu = next(
- filter(lambda vdu: vdu['id'] == vdur['vdu-id-ref'], vnfd['vdu'])
- )
- if 'monitoring-param' in vdu:
- for param in vdu['monitoring-param']:
- metric_name = param['nfvi-metric']
- vnf_member_index = vnfr['member-vnf-index-ref']
- vdu_name = vdur['name']
- cor_id = random.randint(1, 10e7)
- payload = {
- 'correlation_id': cor_id,
- 'metric_name': metric_name,
- 'ns_id': nsr_id,
- 'vnf_member_index': vnf_member_index,
- 'vdu_name': vdu_name,
- 'collection_period': 1,
- 'collection_unit': 'DAY',
- }
- payloads.append(payload)
- return payloads
+ vnf_member_index = vnfr['member-vnf-index-ref']
+ vim_account_id = self.common_db.get_vim_account_id(nsr_id, vnf_member_index)
+ p = multiprocessing.Process(target=self._collect_vim_metrics,
+ args=(vnfr, vim_account_id))
+ processes.append(p)
+ p.start()
+ p = multiprocessing.Process(target=self._collect_vca_metrics,
+ args=(vnfr,))
+ processes.append(p)
+ p.start()
+ for process in processes:
+ process.join()
+ metrics = []
+ while not self.queue.empty():
+ metrics.append(self.queue.get())
+ for plugin in self.plugins:
+ plugin.handle(metrics)