import multiprocessing
import time
+import peewee
+
from osm_mon.collector.backends.prometheus import PrometheusBackend
-from osm_mon.collector.collectors.juju import VCACollector
-from osm_mon.collector.collectors.openstack import OpenstackCollector
+from osm_mon.collector.infra_collectors.onos import OnosInfraCollector
+from osm_mon.collector.infra_collectors.openstack import OpenstackInfraCollector
+from osm_mon.collector.vnf_collectors.juju import VCACollector
+from osm_mon.collector.vnf_collectors.openstack import OpenstackCollector
+from osm_mon.collector.vnf_collectors.vmware import VMwareCollector
+from osm_mon.collector.vnf_collectors.vio import VIOCollector
from osm_mon.core.common_db import CommonDbClient
+from osm_mon.core.config import Config
from osm_mon.core.database import DatabaseManager
-from osm_mon.core.settings import Config
log = logging.getLogger(__name__)
VIM_COLLECTORS = {
- "openstack": OpenstackCollector
+ "openstack": OpenstackCollector,
+ "vmware": VMwareCollector,
+ "vio": VIOCollector
+}
+VIM_INFRA_COLLECTORS = {
+ "openstack": OpenstackInfraCollector
+}
+SDN_INFRA_COLLECTORS = {
+ "onos": OnosInfraCollector
}
METRIC_BACKENDS = [
PrometheusBackend
class Collector:
- def __init__(self):
- self.common_db = CommonDbClient()
+ def __init__(self, config: Config):
+ self.conf = config
+ self.common_db = CommonDbClient(self.conf)
self.plugins = []
- self.database_manager = DatabaseManager()
+ self.database_manager = DatabaseManager(self.conf)
self.database_manager.create_tables()
self.queue = multiprocessing.Queue()
-
- def init_backends(self):
- for backend in METRIC_BACKENDS:
- self.plugins.append(backend())
+ self._init_backends()
def collect_forever(self):
log.debug('collect_forever')
- cfg = Config.instance()
while True:
try:
self.collect_metrics()
- time.sleep(cfg.OSMMON_COLLECTOR_INTERVAL)
+ time.sleep(int(self.conf.get('collector', 'interval')))
+ except peewee.PeeweeException:
+ log.exception("Database error consuming message: ")
+ raise
except Exception:
log.exception("Error collecting metrics")
def _collect_vim_metrics(self, vnfr: dict, vim_account_id: str):
- # TODO(diazb) Add support for vrops and aws
- vim_type = self.database_manager.get_vim_type(vim_account_id)
+ # TODO(diazb) Add support for aws
+ database_manager = DatabaseManager(self.conf)
+ vim_type = database_manager.get_vim_type(vim_account_id)
if vim_type in VIM_COLLECTORS:
- collector = VIM_COLLECTORS[vim_type](vim_account_id)
+ collector = VIM_COLLECTORS[vim_type](self.conf, vim_account_id)
metrics = collector.collect(vnfr)
for metric in metrics:
self.queue.put(metric)
else:
log.debug("vimtype %s is not supported.", vim_type)
+ def _collect_vim_infra_metrics(self, vim_account_id: str):
+ database_manager = DatabaseManager(self.conf)
+ vim_type = database_manager.get_vim_type(vim_account_id)
+ if vim_type in VIM_INFRA_COLLECTORS:
+ collector = VIM_INFRA_COLLECTORS[vim_type](self.conf, vim_account_id)
+ metrics = collector.collect()
+ for metric in metrics:
+ self.queue.put(metric)
+ else:
+ log.debug("vimtype %s is not supported.", vim_type)
+
+ def _collect_sdnc_infra_metrics(self, sdnc_id: str):
+ common_db = CommonDbClient(self.conf)
+ sdn_type = common_db.get_sdnc(sdnc_id)['type']
+ if sdn_type in SDN_INFRA_COLLECTORS:
+ collector = SDN_INFRA_COLLECTORS[sdn_type](self.conf, sdnc_id)
+ metrics = collector.collect()
+ for metric in metrics:
+ self.queue.put(metric)
+ else:
+ log.debug("sdn_type %s is not supported.", sdn_type)
+
def _collect_vca_metrics(self, vnfr: dict):
log.debug('_collect_vca_metrics')
log.debug('vnfr: %s', vnfr)
- vca_collector = VCACollector()
+ vca_collector = VCACollector(self.conf)
metrics = vca_collector.collect(vnfr)
for metric in metrics:
self.queue.put(metric)
args=(vnfr,))
processes.append(p)
p.start()
+ vims = self.common_db.get_vim_accounts()
+ for vim in vims:
+ p = multiprocessing.Process(target=self._collect_vim_infra_metrics,
+ args=(vim['_id'],))
+ processes.append(p)
+ p.start()
+ sdncs = self.common_db.get_sdncs()
+ for sdnc in sdncs:
+ p = multiprocessing.Process(target=self._collect_sdnc_infra_metrics,
+ args=(sdnc['_id'],))
+ processes.append(p)
+ p.start()
for process in processes:
- process.join()
+ process.join(timeout=10)
metrics = []
while not self.queue.empty():
metrics.append(self.queue.get())
for plugin in self.plugins:
plugin.handle(metrics)
+
+ def _init_backends(self):
+ for backend in METRIC_BACKENDS:
+ self.plugins.append(backend())