Adds collection of VM status metric in OpenStack infra plugin
[osm/MON.git] / osm_mon / collector / collector.py
index 9bf3953..8b0d425 100644 (file)
 # For those usages not covered by the Apache License, Version 2.0 please
 # contact: bdiaz@whitestack.com or glavado@whitestack.com
 ##
-import json
 import logging
-import random
-import uuid
-from collections import Iterable
+import multiprocessing
+import time
 
-from kafka import KafkaProducer, KafkaConsumer
-from osm_common import dbmongo
-from prometheus_client.core import GaugeMetricFamily
+import peewee
 
-from osm_mon.core.settings import Config
+from osm_mon.collector.backends.prometheus import PrometheusBackend
+from osm_mon.collector.infra_collectors.openstack import OpenstackInfraCollector
+from osm_mon.collector.vnf_collectors.juju import VCACollector
+from osm_mon.collector.vnf_collectors.openstack import OpenstackCollector
+from osm_mon.collector.vnf_collectors.vmware import VMwareCollector
+from osm_mon.core.common_db import CommonDbClient
+from osm_mon.core.config import Config
+from osm_mon.core.database import DatabaseManager
 
 log = logging.getLogger(__name__)
 
+VIM_COLLECTORS = {
+    "openstack": OpenstackCollector,
+    "vmware": VMwareCollector
+}
+VIM_INFRA_COLLECTORS = {
+    "openstack": OpenstackInfraCollector
+}
+METRIC_BACKENDS = [
+    PrometheusBackend
+]
 
-class MonCollector:
-    def __init__(self):
-        cfg = Config.instance()
-        self.kafka_server = cfg.BROKER_URI
-        self.common_db_host = cfg.MONGO_URI.split(':')[0]
-        self.common_db_port = cfg.MONGO_URI.split(':')[1]
-        self.common_db = dbmongo.DbMongo()
-        self.common_db.db_connect({'host': self.common_db_host, 'port': int(self.common_db_port), 'name': 'osm'})
-        self.producer = KafkaProducer(bootstrap_servers=self.kafka_server,
-                                      key_serializer=str.encode,
-                                      value_serializer=str.encode)
-        self.consumer = KafkaConsumer(bootstrap_servers=self.kafka_server,
-                                      key_deserializer=bytes.decode,
-                                      value_deserializer=bytes.decode,
-                                      consumer_timeout_ms=10000,
-                                      group_id='mon-collector-' + str(uuid.uuid4()))
-        self.consumer.subscribe(['metric_response'])
-
-    def collect_metrics(self) -> Iterable:
-        # TODO(diazb): Remove dependencies on prometheus_client
-        log.debug("collect_metrics")
-        metrics = {}
-        vnfrs = self.common_db.get_list('vnfrs')
+
+class Collector:
+    def __init__(self, config: Config):
+        self.conf = config
+        self.common_db = CommonDbClient(self.conf)
+        self.plugins = []
+        self.database_manager = DatabaseManager(self.conf)
+        self.database_manager.create_tables()
+        self.queue = multiprocessing.Queue()
+        self._init_backends()
+
+    def collect_forever(self):
+        log.debug('collect_forever')
+        while True:
+            try:
+                self.collect_metrics()
+                time.sleep(int(self.conf.get('collector', 'interval')))
+            except peewee.PeeweeException:
+                log.exception("Database error consuming message: ")
+                raise
+            except Exception:
+                log.exception("Error collecting metrics")
+
+    def _collect_vim_metrics(self, vnfr: dict, vim_account_id: str):
+        # TODO(diazb) Add support for vrops and aws
+        database_manager = DatabaseManager(self.conf)
+        vim_type = database_manager.get_vim_type(vim_account_id)
+        if vim_type in VIM_COLLECTORS:
+            collector = VIM_COLLECTORS[vim_type](self.conf, vim_account_id)
+            metrics = collector.collect(vnfr)
+            for metric in metrics:
+                self.queue.put(metric)
+        else:
+            log.debug("vimtype %s is not supported.", vim_type)
+
+    def _collect_vim_infra_metrics(self, vim_account_id: str):
+        database_manager = DatabaseManager(self.conf)
+        vim_type = database_manager.get_vim_type(vim_account_id)
+        if vim_type in VIM_INFRA_COLLECTORS:
+            collector = VIM_INFRA_COLLECTORS[vim_type](self.conf, vim_account_id)
+            metrics = collector.collect()
+            for metric in metrics:
+                self.queue.put(metric)
+        else:
+            log.debug("vimtype %s is not supported.", vim_type)
+
+    def _collect_vca_metrics(self, vnfr: dict):
+        log.debug('_collect_vca_metrics')
+        log.debug('vnfr: %s', vnfr)
+        vca_collector = VCACollector(self.conf)
+        metrics = vca_collector.collect(vnfr)
+        for metric in metrics:
+            self.queue.put(metric)
+
+    def collect_metrics(self):
+        vnfrs = self.common_db.get_vnfrs()
+        processes = []
         for vnfr in vnfrs:
             nsr_id = vnfr['nsr-id-ref']
-            vnfd = self.common_db.get_one('vnfds', {"_id": vnfr['vnfd-id']})
-            payloads = self._generate_metric_data_payloads(vnfr, vnfd)
-            for payload in payloads:
-                cor_id = payload['correlation_id']
-                metric_name = payload['metric_name']
-                vnf_member_index = payload['vnf_member_index']
-                vdu_name = payload['vdu_name']
-                self.producer.send(topic='metric_request', key='read_metric_data_request',
-                                   value=json.dumps(payload))
-                self.producer.flush()
-                for message in self.consumer:
-                    if message.key == 'read_metric_data_response':
-                        content = json.loads(message.value)
-                        if content['correlation_id'] == cor_id:
-                            if len(content['metrics_data']['metrics_series']):
-                                metric_reading = content['metrics_data']['metrics_series'][-1]
-                                if metric_name not in metrics.keys():
-                                    metrics[metric_name] = GaugeMetricFamily(
-                                        metric_name,
-                                        'OSM metric',
-                                        labels=['ns_id', 'vnf_member_index', 'vdu_name']
-                                    )
-                                metrics[metric_name].add_metric([nsr_id, vnf_member_index, vdu_name],
-                                                                metric_reading)
-                            break
-        return metrics.values()
-
-    @staticmethod
-    def _generate_metric_data_payloads(vnfr: dict, vnfd: dict) -> list:
-        log.debug('_generate_metric_data_payloads')
-        payloads = []
-        nsr_id = vnfr['nsr-id-ref']
-        for vdur in vnfr['vdur']:
-            # This avoids errors when vdur records have not been completely filled
-            if 'name' not in vdur:
-                continue
-            vdu = next(
-                filter(lambda vdu: vdu['id'] == vdur['vdu-id-ref'], vnfd['vdu'])
-            )
-            if 'monitoring-param' in vdu:
-                for param in vdu['monitoring-param']:
-                    metric_name = param['nfvi-metric']
-                    vnf_member_index = vnfr['member-vnf-index-ref']
-                    vdu_name = vdur['name']
-                    cor_id = random.randint(1, 10e7)
-                    payload = {
-                        'correlation_id': cor_id,
-                        'metric_name': metric_name,
-                        'ns_id': nsr_id,
-                        'vnf_member_index': vnf_member_index,
-                        'vdu_name': vdu_name,
-                        'collection_period': 1,
-                        'collection_unit': 'DAY',
-                    }
-                    payloads.append(payload)
-        return payloads
+            vnf_member_index = vnfr['member-vnf-index-ref']
+            vim_account_id = self.common_db.get_vim_account_id(nsr_id, vnf_member_index)
+            p = multiprocessing.Process(target=self._collect_vim_metrics,
+                                        args=(vnfr, vim_account_id))
+            processes.append(p)
+            p.start()
+            p = multiprocessing.Process(target=self._collect_vca_metrics,
+                                        args=(vnfr,))
+            processes.append(p)
+            p.start()
+        vims = self.common_db.get_vim_accounts()
+        for vim in vims:
+            p = multiprocessing.Process(target=self._collect_vim_infra_metrics,
+                                        args=(vim['_id'],))
+            processes.append(p)
+            p.start()
+        for process in processes:
+            process.join(timeout=10)
+        metrics = []
+        while not self.queue.empty():
+            metrics.append(self.queue.get())
+        for plugin in self.plugins:
+            plugin.handle(metrics)
+
+    def _init_backends(self):
+        for backend in METRIC_BACKENDS:
+            self.plugins.append(backend())