Fix bug 1884 MON openstack token optimization
[osm/MON.git] / osm_mon / collector / service.py
index aa27083..314ce11 100644 (file)
@@ -27,6 +27,7 @@ import logging
 from typing import List
 import concurrent.futures
 import time
+import keystoneauth1.exceptions
 
 from osm_mon.collector.infra_collectors.onos import OnosInfraCollector
 from osm_mon.collector.infra_collectors.openstack import OpenstackInfraCollector
@@ -54,6 +55,15 @@ VIM_INFRA_COLLECTORS = {
 }
 SDN_INFRA_COLLECTORS = {"onosof": OnosInfraCollector, "onos_vpls": OnosInfraCollector}
 
+# Map to store vim ids and corresponding vim session objects
+vim_sess_map = {}
+
+
+# Invoked from process executor to initialize the vim session map
+def init_session(session_map: dict):
+    global vim_sess_map
+    vim_sess_map = session_map
+
 
 class CollectorService:
     def __init__(self, config: Config):
@@ -80,7 +90,7 @@ class CollectorService:
         vim_type = CollectorService._get_vim_type(conf, vim_account_id)
         log.debug("vim type.....{}".format(vim_type))
         if vim_type in VIM_COLLECTORS:
-            collector = VIM_COLLECTORS[vim_type](conf, vim_account_id)
+            collector = VIM_COLLECTORS[vim_type](conf, vim_account_id, vim_sess_map[vim_account_id])
             metrics = collector.collect(vnfr)
             log.debug("Collecting vim metrics.....{}".format(metrics))
         else:
@@ -147,10 +157,26 @@ class CollectorService:
         vnfrs = self.common_db.get_vnfrs()
         metrics = []
 
+        # Get all vim ids regiestered in osm and create their corresponding vim session objects
+        # Vim ids and their corresponding session objects are stored in vim-session-map
+        # It optimizes the number of authentication tokens created in vim for metric colleciton
+        vim_sess_map.clear()
+        vims = self.common_db.get_vim_accounts()
+        for vim in vims:
+            vim_type = CollectorService._get_vim_type(self.conf, vim["_id"])
+            if vim_type in VIM_INFRA_COLLECTORS:
+                collector = VIM_INFRA_COLLECTORS[vim_type](self.conf, vim["_id"])
+                vim_sess = collector.vim_session if vim_type == "openstack" else None
+                # Populate the vim session map with vim ids and corresponding session objects
+                # vim session objects are stopred only for vim type openstack
+                if vim_sess:
+                    vim_sess_map[vim["_id"]] = vim_sess
+
         start_time = time.time()
         # Starting executor pool with pool size process_pool_size. Default process_pool_size is 20
+        # init_session is called to assign the session map to the gloabal vim session map variable
         with concurrent.futures.ProcessPoolExecutor(
-            self.conf.get("collector", "process_pool_size")
+            self.conf.get("collector", "process_pool_size"), initializer=init_session, initargs=(vim_sess_map,)
         ) as executor:
             log.info(
                 "Started metric collector process pool with pool size %s"
@@ -177,7 +203,6 @@ class CollectorService:
                     )
                 )
 
-            vims = self.common_db.get_vim_accounts()
             for vim in vims:
                 futures.append(
                     executor.submit(
@@ -202,13 +227,17 @@ class CollectorService:
                 for future in concurrent.futures.as_completed(
                     futures, self.conf.get("collector", "process_execution_timeout")
                 ):
-                    result = future.result(
-                        timeout=int(
-                            self.conf.get("collector", "process_execution_timeout")
+                    try:
+                        result = future.result(
+                            timeout=int(
+                                self.conf.get("collector", "process_execution_timeout")
+                            )
                         )
-                    )
-                    metrics.extend(result)
-                    log.debug("result = %s" % (result))
+                        metrics.extend(result)
+                        log.debug("result = %s" % (result))
+                    except keystoneauth1.exceptions.connection.ConnectionError as e:
+                        log.info("Keystone connection error during metric collection")
+                        log.debug("Keystone connection error exception %s" % (e))
             except concurrent.futures.TimeoutError as e:
                 # Some processes have not completed due to timeout error
                 log.info(