Reformat MON to standardized format 03/10803/1
authorgarciadeblas <gerardo.garciadeblas@telefonica.com>
Fri, 14 May 2021 14:47:03 +0000 (16:47 +0200)
committergarciadeblas <gerardo.garciadeblas@telefonica.com>
Fri, 14 May 2021 14:47:03 +0000 (16:47 +0200)
Change-Id: I5869a8c1d0a53c5f6ad6b8859e6469d447bfb63d
Signed-off-by: garciadeblas <gerardo.garciadeblas@telefonica.com>
48 files changed:
osm_mon/cmd/mon_collector.py
osm_mon/cmd/mon_dashboarder.py
osm_mon/cmd/mon_evaluator.py
osm_mon/cmd/mon_healthcheck.py
osm_mon/cmd/mon_server.py
osm_mon/cmd/mon_utils.py
osm_mon/collector/backends/prometheus.py
osm_mon/collector/collector.py
osm_mon/collector/infra_collectors/base.py
osm_mon/collector/infra_collectors/base_osinfra.py
osm_mon/collector/infra_collectors/onos.py
osm_mon/collector/infra_collectors/vmware.py
osm_mon/collector/service.py
osm_mon/collector/utils/openstack.py
osm_mon/collector/vnf_collectors/openstack.py
osm_mon/collector/vnf_collectors/vio.py
osm_mon/collector/vnf_collectors/vmware.py
osm_mon/collector/vnf_collectors/vrops/vrops_helper.py
osm_mon/collector/vnf_metric.py
osm_mon/core/common_db.py
osm_mon/core/config.py
osm_mon/core/keystone.py
osm_mon/core/message_bus_client.py
osm_mon/core/models.py
osm_mon/core/response.py
osm_mon/core/utils.py
osm_mon/dashboarder/backends/grafana.py
osm_mon/dashboarder/dashboarder.py
osm_mon/dashboarder/service.py
osm_mon/evaluator/backends/prometheus.py
osm_mon/evaluator/evaluator.py
osm_mon/evaluator/service.py
osm_mon/server/server.py
osm_mon/server/service.py
osm_mon/tests/unit/collector/test_collector_service.py
osm_mon/tests/unit/collector/utils/test_openstack.py
osm_mon/tests/unit/collector/vnf_collectors/test_openstack.py
osm_mon/tests/unit/collector/vnf_collectors/vmware/mock_http.py
osm_mon/tests/unit/collector/vnf_collectors/vmware/test_vcd_collector.py
osm_mon/tests/unit/collector/vnf_collectors/vmware/test_vio_collector.py
osm_mon/tests/unit/collector/vnf_collectors/vmware/test_vrops_helper.py
osm_mon/tests/unit/core/test_common_db_client.py
osm_mon/tests/unit/core/test_message_bus_client.py
osm_mon/tests/unit/evaluator/test_evaluator.py
osm_mon/tests/unit/evaluator/test_evaluator_service.py
osm_mon/tests/unit/evaluator/test_prometheus.py
setup.py
tox.ini

index 8732269..30e6676 100644 (file)
@@ -31,16 +31,18 @@ from osm_mon.cmd.mon_utils import wait_till_core_services_are_ready
 
 
 def main():
-    parser = argparse.ArgumentParser(prog='osm-mon-collector')
-    parser.add_argument('--config-file', nargs='?', help='MON configuration file')
+    parser = argparse.ArgumentParser(prog="osm-mon-collector")
+    parser.add_argument("--config-file", nargs="?", help="MON configuration file")
     args = parser.parse_args()
     cfg = Config(args.config_file)
 
     root = logging.getLogger()
-    root.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
+    root.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
     ch = logging.StreamHandler(sys.stdout)
-    ch.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
-    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
+    ch.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
+    formatter = logging.Formatter(
+        "%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p"
+    )
     ch.setFormatter(formatter)
     root.addHandler(ch)
 
@@ -59,5 +61,5 @@ def main():
         log.error("Failed to start MON Collector")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
index e7748cd..c3c588c 100644 (file)
@@ -32,16 +32,18 @@ import threading
 
 
 def main():
-    parser = argparse.ArgumentParser(prog='osm-policy-agent')
-    parser.add_argument('--config-file', nargs='?', help='POL configuration file')
+    parser = argparse.ArgumentParser(prog="osm-policy-agent")
+    parser.add_argument("--config-file", nargs="?", help="POL configuration file")
     args = parser.parse_args()
     cfg = Config(args.config_file)
 
     root = logging.getLogger()
-    root.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
+    root.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
     ch = logging.StreamHandler(sys.stdout)
-    ch.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
-    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
+    ch.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
+    formatter = logging.Formatter(
+        "%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p"
+    )
     ch.setFormatter(formatter)
     root.addHandler(ch)
 
@@ -62,5 +64,5 @@ def main():
         log.error("Failed to start MON Dashboarder")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
index ca2df2e..b1c0e2a 100644 (file)
@@ -31,16 +31,18 @@ from osm_mon.cmd.mon_utils import wait_till_core_services_are_ready
 
 
 def main():
-    parser = argparse.ArgumentParser(prog='osm-mon-evaluator')
-    parser.add_argument('--config-file', nargs='?', help='MON configuration file')
+    parser = argparse.ArgumentParser(prog="osm-mon-evaluator")
+    parser.add_argument("--config-file", nargs="?", help="MON configuration file")
     args = parser.parse_args()
     cfg = Config(args.config_file)
 
     root = logging.getLogger()
-    root.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
+    root.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
     ch = logging.StreamHandler(sys.stdout)
-    ch.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
-    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
+    ch.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
+    formatter = logging.Formatter(
+        "%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p"
+    )
     ch.setFormatter(formatter)
     root.addHandler(ch)
 
@@ -59,5 +61,5 @@ def main():
         log.error("Failed to start MON Evaluator")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
index 99ec1d3..7eae4c2 100644 (file)
@@ -30,8 +30,8 @@ log = logging.getLogger(__name__)
 
 
 def main():
-    parser = argparse.ArgumentParser(prog='osm-mon-healthcheck')
-    parser.add_argument('--config-file', nargs='?', help='MON configuration file')
+    parser = argparse.ArgumentParser(prog="osm-mon-healthcheck")
+    parser.add_argument("--config-file", nargs="?", help="MON configuration file")
     # args = parser.parse_args()
     # cfg = Config(args.config_file)
 
@@ -49,9 +49,9 @@ def _processes_running():
                 return True
         return False
 
-    processes_to_check = ['osm-mon-collector', 'osm-mon-evaluator', 'osm-mon-server']
-    ps = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE).communicate()[0]
-    processes_running = ps.decode().split('\n')
+    processes_to_check = ["osm-mon-collector", "osm-mon-evaluator", "osm-mon-server"]
+    ps = subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE).communicate()[0]
+    processes_running = ps.decode().split("\n")
     for p in processes_to_check:
         if not _contains_process(processes_running, p):
             log.error("Process %s not running!" % p)
@@ -61,7 +61,7 @@ def _processes_running():
 
 def _is_prometheus_exporter_ok():
     try:
-        r = requests.get('http://localhost:8000')
+        r = requests.get("http://localhost:8000")
         r.raise_for_status()
         return True
     except Exception:
@@ -69,5 +69,5 @@ def _is_prometheus_exporter_ok():
         return False
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
index e5bca31..7d676d7 100644 (file)
@@ -32,16 +32,18 @@ from osm_mon.cmd.mon_utils import wait_till_core_services_are_ready
 
 
 def main():
-    parser = argparse.ArgumentParser(prog='osm-mon-server')
-    parser.add_argument('--config-file', nargs='?', help='MON configuration file')
+    parser = argparse.ArgumentParser(prog="osm-mon-server")
+    parser.add_argument("--config-file", nargs="?", help="MON configuration file")
     args = parser.parse_args()
     cfg = Config(args.config_file)
 
     root = logging.getLogger()
-    root.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
+    root.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
     ch = logging.StreamHandler(sys.stdout)
-    ch.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
-    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
+    ch.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
+    formatter = logging.Formatter(
+        "%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p"
+    )
     ch.setFormatter(formatter)
     root.addHandler(ch)
 
@@ -61,5 +63,5 @@ def main():
         log.error("Failed to start MON Server")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
index 9640543..83dd130 100644 (file)
@@ -25,14 +25,16 @@ def wait_till_commondb_is_ready(config, process_name="osm-mon", commondb_wait_ti
 
     logging.debug("wait_till_commondb_is_ready")
 
-    while(True):
+    while True:
         commondb_url = config.conf["database"].get("uri")
         try:
             commondb = pymongo.MongoClient(commondb_url)
             commondb.server_info()
             break
         except Exception:
-            logging.info("{} process is waiting for commondb to come up...".format(process_name))
+            logging.info(
+                "{} process is waiting for commondb to come up...".format(process_name)
+            )
             time.sleep(commondb_wait_time)
 
 
@@ -40,7 +42,7 @@ def wait_till_kafka_is_ready(config, process_name="osm-mon", kafka_wait_time=5):
 
     logging.debug("wait_till_kafka_is_ready")
 
-    while(True):
+    while True:
         kafka_ready = False
         try:
             with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
@@ -55,17 +57,25 @@ def wait_till_kafka_is_ready(config, process_name="osm-mon", kafka_wait_time=5):
                     == 0
                 ):
                     # Get the list of topics. If kafka is not ready exception will be thrown.
-                    consumer = kafka.KafkaConsumer(group_id=config.conf["message"].get("group_id"),
-                                                   bootstrap_servers=[config.conf.get("message", {}).get("host",
-                                                                      "kafka") + ":" + config.conf["message"]
-                                                                      .get("port")])
+                    consumer = kafka.KafkaConsumer(
+                        group_id=config.conf["message"].get("group_id"),
+                        bootstrap_servers=[
+                            config.conf.get("message", {}).get("host", "kafka")
+                            + ":"
+                            + config.conf["message"].get("port")
+                        ],
+                    )
                     all_topics = consumer.topics()
                     logging.debug("Number of topics found: %s", len(all_topics))
 
                     # Send dummy message in kafka topics. If kafka is not ready exception will be thrown.
-                    producer = kafka.KafkaProducer(bootstrap_servers=[config.conf.get("message", {}).get("host",
-                                                                      "kafka") + ":" + config.conf["message"]
-                                                                      .get("port")])
+                    producer = kafka.KafkaProducer(
+                        bootstrap_servers=[
+                            config.conf.get("message", {}).get("host", "kafka")
+                            + ":"
+                            + config.conf["message"].get("port")
+                        ]
+                    )
                     mon_topics = ["alarm_request", "users", "project"]
                     for mon_topic in mon_topics:
                         producer.send(mon_topic, key=b"echo", value=b"dummy message")
@@ -79,11 +89,15 @@ def wait_till_kafka_is_ready(config, process_name="osm-mon", kafka_wait_time=5):
             if kafka_ready:
                 break
             else:
-                logging.info("{} process is waiting for kafka to come up...".format(process_name))
+                logging.info(
+                    "{} process is waiting for kafka to come up...".format(process_name)
+                )
                 time.sleep(kafka_wait_time)
 
 
-def wait_till_core_services_are_ready(config, process_name="osm-mon", commondb_wait_time=5, kafka_wait_time=5):
+def wait_till_core_services_are_ready(
+    config, process_name="osm-mon", commondb_wait_time=5, kafka_wait_time=5
+):
 
     logging.debug("wait_till_core_services_are_ready")
 
index fbe6d36..a9bb938 100644 (file)
@@ -31,44 +31,44 @@ from osm_mon.collector.metric import Metric
 
 log = logging.getLogger(__name__)
 
-OSM_METRIC_PREFIX = 'osm_'
+OSM_METRIC_PREFIX = "osm_"
 
 
 class PrometheusBackend(BaseBackend):
-
     def __init__(self):
         self.custom_collector = CustomCollector()
         self._start_exporter(8000)
 
     def handle(self, metrics: List[Metric]):
-        log.debug('handle')
-        log.debug('metrics: %s', metrics)
+        log.debug("handle")
+        log.debug("metrics: %s", metrics)
         prometheus_metrics = {}
         for metric in metrics:
             if metric.name not in prometheus_metrics:
                 prometheus_metrics[metric.name] = GaugeMetricFamily(
                     OSM_METRIC_PREFIX + metric.name,
-                    'OSM metric',
-                    labels=list(metric.tags.keys())
+                    "OSM metric",
+                    labels=list(metric.tags.keys()),
                 )
-            prometheus_metrics[metric.name].add_metric(list(metric.tags.values()), metric.value)
+            prometheus_metrics[metric.name].add_metric(
+                list(metric.tags.values()), metric.value
+            )
         self.custom_collector.metrics = prometheus_metrics.values()
 
     def _start_exporter(self, port):
-        log.debug('_start_exporter')
-        log.debug('port: %s', port)
+        log.debug("_start_exporter")
+        log.debug("port: %s", port)
         REGISTRY.register(self.custom_collector)
         log.info("Starting MON Prometheus exporter at port %s", port)
         start_http_server(port)
 
 
 class CustomCollector(object):
-
     def __init__(self):
         self.metrics = []
 
     def describe(self):
-        log.debug('describe')
+        log.debug("describe")
         return []
 
     def collect(self):
index ad166e4..a69e651 100644 (file)
@@ -29,9 +29,7 @@ from osm_mon.core.config import Config
 
 log = logging.getLogger(__name__)
 
-METRIC_BACKENDS = [
-    PrometheusBackend
-]
+METRIC_BACKENDS = [PrometheusBackend]
 
 
 class Collector:
@@ -42,11 +40,11 @@ class Collector:
         self._init_backends()
 
     def collect_forever(self):
-        log.debug('collect_forever')
+        log.debug("collect_forever")
         while True:
             try:
                 self.collect_metrics()
-                time.sleep(int(self.conf.get('collector', 'interval')))
+                time.sleep(int(self.conf.get("collector", "interval")))
             except Exception:
                 log.exception("Error collecting metrics")
 
index 586169a..2f97ebc 100644 (file)
@@ -25,6 +25,5 @@ from osm_mon.collector.metric import Metric
 
 
 class BaseInfraCollector:
-
     def collect(self) -> List[Metric]:
         pass
index 1c1999c..61e9e42 100644 (file)
@@ -46,46 +46,46 @@ class BaseOpenStackInfraCollector(BaseVimInfraCollector):
     def collect(self) -> List[Metric]:
         metrics = []
         vim_status = self.is_vim_ok()
-        if self.vim_account['_admin']['projects_read']:
-            vim_project_id = self.vim_account['_admin']['projects_read'][0]
+        if self.vim_account["_admin"]["projects_read"]:
+            vim_project_id = self.vim_account["_admin"]["projects_read"][0]
         else:
-            vim_project_id = ''
+            vim_project_id = ""
         vim_tags = {
-            'vim_account_id': self.vim_account['_id'],
-            'project_id': vim_project_id
+            "vim_account_id": self.vim_account["_id"],
+            "project_id": vim_project_id,
         }
-        vim_status_metric = Metric(vim_tags, 'vim_status', vim_status)
+        vim_status_metric = Metric(vim_tags, "vim_status", vim_status)
         metrics.append(vim_status_metric)
-        vnfrs = self.common_db.get_vnfrs(vim_account_id=self.vim_account['_id'])
+        vnfrs = self.common_db.get_vnfrs(vim_account_id=self.vim_account["_id"])
         for vnfr in vnfrs:
-            nsr_id = vnfr['nsr-id-ref']
-            ns_name = self.common_db.get_nsr(nsr_id)['name']
-            vnf_member_index = vnfr['member-vnf-index-ref']
-            if vnfr['_admin']['projects_read']:
-                vnfr_project_id = vnfr['_admin']['projects_read'][0]
+            nsr_id = vnfr["nsr-id-ref"]
+            ns_name = self.common_db.get_nsr(nsr_id)["name"]
+            vnf_member_index = vnfr["member-vnf-index-ref"]
+            if vnfr["_admin"]["projects_read"]:
+                vnfr_project_id = vnfr["_admin"]["projects_read"][0]
             else:
-                vnfr_project_id = ''
-            for vdur in vnfr['vdur']:
-                if 'vim-id' not in vdur:
+                vnfr_project_id = ""
+            for vdur in vnfr["vdur"]:
+                if "vim-id" not in vdur:
                     log.debug("Field vim-id is not present in vdur")
                     continue
-                resource_uuid = vdur['vim-id']
+                resource_uuid = vdur["vim-id"]
                 tags = {
-                    'vim_account_id': self.vim_account['_id'],
-                    'resource_uuid': resource_uuid,
-                    'nsr_id': nsr_id,
-                    'ns_name': ns_name,
-                    'vnf_member_index': vnf_member_index,
-                    'vdur_name': vdur.get("name", ""),
-                    'project_id': vnfr_project_id
+                    "vim_account_id": self.vim_account["_id"],
+                    "resource_uuid": resource_uuid,
+                    "nsr_id": nsr_id,
+                    "ns_name": ns_name,
+                    "vnf_member_index": vnf_member_index,
+                    "vdur_name": vdur.get("name", ""),
+                    "project_id": vnfr_project_id,
                 }
                 try:
                     vm = self.nova.servers.get(resource_uuid)
-                    vm_status = (1 if vm.status == 'ACTIVE' else 0)
-                    vm_status_metric = Metric(tags, 'vm_status', vm_status)
+                    vm_status = 1 if vm.status == "ACTIVE" else 0
+                    vm_status_metric = Metric(tags, "vm_status", vm_status)
                 except Exception as e:
                     log.warning("VM status is not OK: %s" % e)
-                    vm_status_metric = Metric(tags, 'vm_status', 0)
+                    vm_status_metric = Metric(tags, "vm_status", 0)
                 metrics.append(vm_status_metric)
 
         return metrics
index 33a3aa4..ccd66d5 100644 (file)
@@ -42,29 +42,26 @@ class OnosInfraCollector(BaseSdncInfraCollector):
     def collect(self) -> List[Metric]:
         metrics = []
         sdnc_status = self.is_sdnc_ok()
-        if self.sdnc['_admin']['projects_read']:
-            sdnc_project_id = self.sdnc['_admin']['projects_read'][0]
+        if self.sdnc["_admin"]["projects_read"]:
+            sdnc_project_id = self.sdnc["_admin"]["projects_read"][0]
         else:
-            sdnc_project_id = ''
-        sdnc_tags = {
-            'sdnc_id': self.sdnc['_id'],
-            'project_id': sdnc_project_id
-        }
-        sdnc_status_metric = Metric(sdnc_tags, 'sdnc_status', sdnc_status)
+            sdnc_project_id = ""
+        sdnc_tags = {"sdnc_id": self.sdnc["_id"], "project_id": sdnc_project_id}
+        sdnc_status_metric = Metric(sdnc_tags, "sdnc_status", sdnc_status)
         metrics.append(sdnc_status_metric)
 
         return metrics
 
     def is_sdnc_ok(self) -> bool:
         try:
-            ip = self.sdnc['ip']
-            port = self.sdnc['port']
-            user = self.sdnc['user']
-            password = self.common_db.decrypt_sdnc_password(self.sdnc['password'],
-                                                            self.sdnc['schema_version'],
-                                                            self.sdnc['_id'])
+            ip = self.sdnc["ip"]
+            port = self.sdnc["port"]
+            user = self.sdnc["user"]
+            password = self.common_db.decrypt_sdnc_password(
+                self.sdnc["password"], self.sdnc["schema_version"], self.sdnc["_id"]
+            )
             # TODO: Add support for https
-            url = 'http://{}:{}/onos/v1/devices'.format(ip, port)
+            url = "http://{}:{}/onos/v1/devices".format(ip, port)
             requests.get(url, auth=HTTPBasicAuth(user, password))
             return True
         except Exception:
index 89717dc..65e739d 100644 (file)
@@ -37,30 +37,29 @@ from osm_mon.core.common_db import CommonDbClient
 from osm_mon.core.config import Config
 
 log = logging.getLogger(__name__)
-API_VERSION = '27.0'
+API_VERSION = "27.0"
 
 
 class VMwareInfraCollector(BaseVimInfraCollector):
-
     def __init__(self, config: Config, vim_account_id: str):
         super().__init__(config, vim_account_id)
         self.vim_account_id = vim_account_id
         self.common_db = CommonDbClient(config)
         vim_account = self.get_vim_account(vim_account_id)
-        self.vcloud_site = vim_account['vim_url']
-        self.admin_username = vim_account['admin_username']
-        self.admin_password = vim_account['admin_password']
-        self.vim_uuid = vim_account['vim_uuid']
-        self.org_name = vim_account['orgname']
-        self.vim_project_id = vim_account['project_id']
+        self.vcloud_site = vim_account["vim_url"]
+        self.admin_username = vim_account["admin_username"]
+        self.admin_password = vim_account["admin_password"]
+        self.vim_uuid = vim_account["vim_uuid"]
+        self.org_name = vim_account["orgname"]
+        self.vim_project_id = vim_account["project_id"]
 
     def connect_vim_as_admin(self):
-        """ Method connect as pvdc admin user to vCloud director.
-            There are certain action that can be done only by provider vdc admin user.
-            Organization creation / provider network creation etc.
+        """Method connect as pvdc admin user to vCloud director.
+        There are certain action that can be done only by provider vdc admin user.
+        Organization creation / provider network creation etc.
 
-            Returns:
-                The return client object that letter can be used to connect to vcloud direct as admin for provider vdc
+        Returns:
+            The return client object that letter can be used to connect to vcloud direct as admin for provider vdc
         """
 
         log.info("Logging into vCD org as admin.")
@@ -70,42 +69,45 @@ class VMwareInfraCollector(BaseVimInfraCollector):
             host = self.vcloud_site
             admin_user = self.admin_username
             admin_passwd = self.admin_password
-            org = 'System'
+            org = "System"
             client = Client(host, verify_ssl_certs=False)
             client.set_highest_supported_version()
-            client.set_credentials(BasicLoginCredentials(admin_user, org,
-                                                         admin_passwd))
+            client.set_credentials(BasicLoginCredentials(admin_user, org, admin_passwd))
             return client
 
         except Exception as e:
-            log.info("Can't connect to a vCloud director as: {} with exception {}".format(admin_user, e))
+            log.info(
+                "Can't connect to a vCloud director as: {} with exception {}".format(
+                    admin_user, e
+                )
+            )
 
     def get_vim_account(self, vim_account_id: str):
         """
-           Method to get VIM account details by its ID
-           arg - VIM ID
-           return - dict with vim account details
+        Method to get VIM account details by its ID
+        arg - VIM ID
+        return - dict with vim account details
         """
         vim_account = {}
         vim_account_info = self.common_db.get_vim_account(vim_account_id)
 
-        vim_account['name'] = vim_account_info['name']
-        vim_account['vim_tenant_name'] = vim_account_info['vim_tenant_name']
-        vim_account['vim_type'] = vim_account_info['vim_type']
-        vim_account['vim_url'] = vim_account_info['vim_url']
-        vim_account['org_user'] = vim_account_info['vim_user']
-        vim_account['vim_uuid'] = vim_account_info['_id']
-        if vim_account_info['_admin']['projects_read']:
-            vim_account['project_id'] = vim_account_info['_admin']['projects_read'][0]
+        vim_account["name"] = vim_account_info["name"]
+        vim_account["vim_tenant_name"] = vim_account_info["vim_tenant_name"]
+        vim_account["vim_type"] = vim_account_info["vim_type"]
+        vim_account["vim_url"] = vim_account_info["vim_url"]
+        vim_account["org_user"] = vim_account_info["vim_user"]
+        vim_account["vim_uuid"] = vim_account_info["_id"]
+        if vim_account_info["_admin"]["projects_read"]:
+            vim_account["project_id"] = vim_account_info["_admin"]["projects_read"][0]
         else:
-            vim_account['project_id'] = ''
+            vim_account["project_id"] = ""
 
-        vim_config = vim_account_info['config']
-        vim_account['admin_username'] = vim_config['admin_username']
-        vim_account['admin_password'] = vim_config['admin_password']
+        vim_config = vim_account_info["config"]
+        vim_account["admin_username"] = vim_config["admin_username"]
+        vim_account["admin_password"] = vim_config["admin_password"]
 
-        if vim_config['orgname'] is not None:
-            vim_account['orgname'] = vim_config['orgname']
+        if vim_config["orgname"] is not None:
+            vim_account["orgname"] = vim_config["orgname"]
 
         return vim_account
 
@@ -115,27 +117,36 @@ class VMwareInfraCollector(BaseVimInfraCollector):
             if client._session:
                 org_list = client.get_org_list()
                 for org in org_list.Org:
-                    if org.get('name') == self.org_name:
-                        org_uuid = org.get('href').split('/')[-1]
+                    if org.get("name") == self.org_name:
+                        org_uuid = org.get("href").split("/")[-1]
 
-                url = '{}/api/org/{}'.format(self.vcloud_site, org_uuid)
+                url = "{}/api/org/{}".format(self.vcloud_site, org_uuid)
 
-                headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
+                headers = {
+                    "Accept": "application/*+xml;version=" + API_VERSION,
+                    "x-vcloud-authorization": client._session.headers[
+                        "x-vcloud-authorization"
+                    ],
+                }
 
-                response = requests.get(url=url,
-                                        headers=headers,
-                                        verify=False)
+                response = requests.get(url=url, headers=headers, verify=False)
 
-                if response.status_code != requests.codes.ok:  # pylint: disable=no-member
+                if (
+                    response.status_code != requests.codes.ok
+                ):  # pylint: disable=no-member
                     log.info("check_vim_status(): failed to get org details")
                 else:
                     org_details = XmlElementTree.fromstring(response.content)
                     vdc_list = {}
                     for child in org_details:
-                        if 'type' in child.attrib:
-                            if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
-                                vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                        if "type" in child.attrib:
+                            if (
+                                child.attrib["type"]
+                                == "application/vnd.vmware.vcloud.vdc+xml"
+                            ):
+                                vdc_list[
+                                    child.attrib["href"].split("/")[-1:][0]
+                                ] = child.attrib["name"]
 
                 if vdc_list:
                     return True
@@ -148,22 +159,26 @@ class VMwareInfraCollector(BaseVimInfraCollector):
         try:
             client = self.connect_vim_as_admin()
             if client._session:
-                url = '{}/api/vApp/vapp-{}'.format(self.vcloud_site, vapp_id)
+                url = "{}/api/vApp/vapp-{}".format(self.vcloud_site, vapp_id)
 
-                headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
-                           'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
+                headers = {
+                    "Accept": "application/*+xml;version=" + API_VERSION,
+                    "x-vcloud-authorization": client._session.headers[
+                        "x-vcloud-authorization"
+                    ],
+                }
 
-                response = requests.get(url=url,
-                                        headers=headers,
-                                        verify=False)
+                response = requests.get(url=url, headers=headers, verify=False)
 
-                if response.status_code != requests.codes.ok:  # pylint: disable=no-member
+                if (
+                    response.status_code != requests.codes.ok
+                ):  # pylint: disable=no-member
                     log.info("check_vm_status(): failed to get vApp details")
                 else:
                     vapp_details = XmlElementTree.fromstring(response.content)
                     vm_list = []
                     for child in vapp_details:
-                        if child.tag.split("}")[1] == 'Children':
+                        if child.tag.split("}")[1] == "Children":
                             for item in child.getchildren():
                                 vm_list.append(item.attrib)
                 return vm_list
@@ -175,42 +190,39 @@ class VMwareInfraCollector(BaseVimInfraCollector):
         vim_status = self.check_vim_status()
         vim_account_id = self.vim_account_id
         vim_project_id = self.vim_project_id
-        vim_tags = {
-            'vim_account_id': vim_account_id,
-            'project_id': vim_project_id
-        }
-        vim_status_metric = Metric(vim_tags, 'vim_status', vim_status)
+        vim_tags = {"vim_account_id": vim_account_id, "project_id": vim_project_id}
+        vim_status_metric = Metric(vim_tags, "vim_status", vim_status)
         metrics.append(vim_status_metric)
         vnfrs = self.common_db.get_vnfrs(vim_account_id=vim_account_id)
         for vnfr in vnfrs:
-            nsr_id = vnfr['nsr-id-ref']
-            ns_name = self.common_db.get_nsr(nsr_id)['name']
-            vnf_member_index = vnfr['member-vnf-index-ref']
-            if vnfr['_admin']['projects_read']:
-                vnfr_project_id = vnfr['_admin']['projects_read'][0]
+            nsr_id = vnfr["nsr-id-ref"]
+            ns_name = self.common_db.get_nsr(nsr_id)["name"]
+            vnf_member_index = vnfr["member-vnf-index-ref"]
+            if vnfr["_admin"]["projects_read"]:
+                vnfr_project_id = vnfr["_admin"]["projects_read"][0]
             else:
-                vnfr_project_id = ''
-            for vdur in vnfr['vdur']:
-                resource_uuid = vdur['vim-id']
+                vnfr_project_id = ""
+            for vdur in vnfr["vdur"]:
+                resource_uuid = vdur["vim-id"]
                 tags = {
-                    'vim_account_id': self.vim_account_id,
-                    'resource_uuid': resource_uuid,
-                    'nsr_id': nsr_id,
-                    'ns_name': ns_name,
-                    'vnf_member_index': vnf_member_index,
-                    'vdur_name': vdur['name'],
-                    'project_id': vnfr_project_id
+                    "vim_account_id": self.vim_account_id,
+                    "resource_uuid": resource_uuid,
+                    "nsr_id": nsr_id,
+                    "ns_name": ns_name,
+                    "vnf_member_index": vnf_member_index,
+                    "vdur_name": vdur["name"],
+                    "project_id": vnfr_project_id,
                 }
                 try:
                     vm_list = self.check_vm_status(resource_uuid)
                     for vm in vm_list:
-                        if vm['status'] == '4' and vm['deployed'] == 'true':
+                        if vm["status"] == "4" and vm["deployed"] == "true":
                             vm_status = 1
                         else:
                             vm_status = 0
-                        vm_status_metric = Metric(tags, 'vm_status', vm_status)
+                        vm_status_metric = Metric(tags, "vm_status", vm_status)
                 except Exception:
                     log.exception("VM status is not OK!")
-                    vm_status_metric = Metric(tags, 'vm_status', 0)
+                    vm_status_metric = Metric(tags, "vm_status", 0)
                 metrics.append(vm_status_metric)
         return metrics
index 5eb65a9..aa27083 100644 (file)
@@ -45,21 +45,17 @@ log = logging.getLogger(__name__)
 VIM_COLLECTORS = {
     "openstack": OpenstackCollector,
     "vmware": VMwareCollector,
-    "vio": VIOCollector
+    "vio": VIOCollector,
 }
 VIM_INFRA_COLLECTORS = {
     "openstack": OpenstackInfraCollector,
     "vmware": VMwareInfraCollector,
-    "vio": VIOInfraCollector
-}
-SDN_INFRA_COLLECTORS = {
-    "onosof": OnosInfraCollector,
-    "onos_vpls": OnosInfraCollector
+    "vio": VIOInfraCollector,
 }
+SDN_INFRA_COLLECTORS = {"onosof": OnosInfraCollector, "onos_vpls": OnosInfraCollector}
 
 
 class CollectorService:
-
     def __init__(self, config: Config):
         self.conf = config
         self.common_db = CommonDbClient(self.conf)
@@ -70,11 +66,11 @@ class CollectorService:
     def _get_vim_type(conf: Config, vim_account_id: str) -> str:
         common_db = CommonDbClient(conf)
         vim_account = common_db.get_vim_account(vim_account_id)
-        vim_type = vim_account['vim_type']
-        if 'config' in vim_account and 'vim_type' in vim_account['config']:
-            vim_type = vim_account['config']['vim_type'].lower()
-            if vim_type == 'vio' and 'vrops_site' not in vim_account['config']:
-                vim_type = 'openstack'
+        vim_type = vim_account["vim_type"]
+        if "config" in vim_account and "vim_type" in vim_account["config"]:
+            vim_type = vim_account["config"]["vim_type"].lower()
+            if vim_type == "vio" and "vrops_site" not in vim_account["config"]:
+                vim_type = "openstack"
         return vim_type
 
     @staticmethod
@@ -117,7 +113,7 @@ class CollectorService:
         log.info("Collecting sdnc metrics")
         metrics = []
         common_db = CommonDbClient(conf)
-        sdn_type = common_db.get_sdnc(sdnc_id)['type']
+        sdn_type = common_db.get_sdnc(sdnc_id)["type"]
         if sdn_type in SDN_INFRA_COLLECTORS:
             collector = SDN_INFRA_COLLECTORS[sdn_type](conf, sdnc_id)
             metrics = collector.collect()
@@ -128,9 +124,9 @@ class CollectorService:
 
     @staticmethod
     def _stop_process_pool(executor):
-        log.info('Shutting down process pool')
+        log.info("Shutting down process pool")
         try:
-            log.debug('Stopping residual processes in the process pool')
+            log.debug("Stopping residual processes in the process pool")
             for pid, process in executor._processes.items():
                 if process.is_alive():
                     process.terminate()
@@ -140,11 +136,11 @@ class CollectorService:
 
         try:
             # Shutting down executor
-            log.debug('Shutting down process pool executor')
+            log.debug("Shutting down process pool executor")
             executor.shutdown()
         except RuntimeError as e:
-            log.info('RuntimeError in shutting down executer')
-            log.debug('RuntimeError %s' % (e))
+            log.info("RuntimeError in shutting down executer")
+            log.debug("RuntimeError %s" % (e))
         return
 
     def collect_metrics(self) -> List[Metric]:
@@ -153,37 +149,72 @@ class CollectorService:
 
         start_time = time.time()
         # Starting executor pool with pool size process_pool_size. Default process_pool_size is 20
-        with concurrent.futures.ProcessPoolExecutor(self.conf.get('collector', 'process_pool_size')) as executor:
-            log.info('Started metric collector process pool with pool size %s' % (self.conf.get('collector',
-                                                                                                'process_pool_size')))
+        with concurrent.futures.ProcessPoolExecutor(
+            self.conf.get("collector", "process_pool_size")
+        ) as executor:
+            log.info(
+                "Started metric collector process pool with pool size %s"
+                % (self.conf.get("collector", "process_pool_size"))
+            )
             futures = []
             for vnfr in vnfrs:
-                nsr_id = vnfr['nsr-id-ref']
-                vnf_member_index = vnfr['member-vnf-index-ref']
-                vim_account_id = self.common_db.get_vim_account_id(nsr_id, vnf_member_index)
-                futures.append(executor.submit(CollectorService._collect_vim_metrics, self.conf, vnfr, vim_account_id))
-                futures.append(executor.submit(CollectorService._collect_vca_metrics, self.conf, vnfr))
+                nsr_id = vnfr["nsr-id-ref"]
+                vnf_member_index = vnfr["member-vnf-index-ref"]
+                vim_account_id = self.common_db.get_vim_account_id(
+                    nsr_id, vnf_member_index
+                )
+                futures.append(
+                    executor.submit(
+                        CollectorService._collect_vim_metrics,
+                        self.conf,
+                        vnfr,
+                        vim_account_id,
+                    )
+                )
+                futures.append(
+                    executor.submit(
+                        CollectorService._collect_vca_metrics, self.conf, vnfr
+                    )
+                )
 
             vims = self.common_db.get_vim_accounts()
             for vim in vims:
-                futures.append(executor.submit(CollectorService._collect_vim_infra_metrics, self.conf, vim['_id']))
+                futures.append(
+                    executor.submit(
+                        CollectorService._collect_vim_infra_metrics,
+                        self.conf,
+                        vim["_id"],
+                    )
+                )
 
             sdncs = self.common_db.get_sdncs()
             for sdnc in sdncs:
-                futures.append(executor.submit(CollectorService._collect_sdnc_infra_metrics, self.conf, sdnc['_id']))
+                futures.append(
+                    executor.submit(
+                        CollectorService._collect_sdnc_infra_metrics,
+                        self.conf,
+                        sdnc["_id"],
+                    )
+                )
 
             try:
                 # Wait for future calls to complete till process_execution_timeout. Default is 50 seconds
-                for future in concurrent.futures.as_completed(futures, self.conf.get('collector',
-                                                                                     'process_execution_timeout')):
-                    result = future.result(timeout=int(self.conf.get('collector',
-                                                                     'process_execution_timeout')))
+                for future in concurrent.futures.as_completed(
+                    futures, self.conf.get("collector", "process_execution_timeout")
+                ):
+                    result = future.result(
+                        timeout=int(
+                            self.conf.get("collector", "process_execution_timeout")
+                        )
+                    )
                     metrics.extend(result)
-                    log.debug('result = %s' % (result))
+                    log.debug("result = %s" % (result))
             except concurrent.futures.TimeoutError as e:
                 # Some processes have not completed due to timeout error
-                log.info('Some processes have not finished due to TimeoutError exception')
-                log.debug('concurrent.futures.TimeoutError exception %s' % (e))
+                log.info(
+                    "Some processes have not finished due to TimeoutError exception"
+                )
+                log.debug("concurrent.futures.TimeoutError exception %s" % (e))
 
             # Shutting down process pool executor
             CollectorService._stop_process_pool(executor)
index 09c472c..9162f98 100644 (file)
@@ -27,26 +27,27 @@ from keystoneauth1.identity import v3
 
 
 class OpenstackUtils:
-
     @staticmethod
     def get_session(creds: dict):
         verify_ssl = True
-        project_domain_name = 'Default'
-        user_domain_name = 'Default'
-        if 'config' in creds:
-            vim_config = creds['config']
-            if 'insecure' in vim_config and vim_config['insecure']:
+        project_domain_name = "Default"
+        user_domain_name = "Default"
+        if "config" in creds:
+            vim_config = creds["config"]
+            if "insecure" in vim_config and vim_config["insecure"]:
                 verify_ssl = False
-            if 'ca_cert' in vim_config:
-                verify_ssl = vim_config['ca_cert']
-            if 'project_domain_name' in vim_config:
-                project_domain_name = vim_config['project_domain_name']
-            if 'user_domain_name' in vim_config:
-                user_domain_name = vim_config['user_domain_name']
-        auth = v3.Password(auth_url=creds['vim_url'],
-                           username=creds['vim_user'],
-                           password=creds['vim_password'],
-                           project_name=creds['vim_tenant_name'],
-                           project_domain_name=project_domain_name,
-                           user_domain_name=user_domain_name)
+            if "ca_cert" in vim_config:
+                verify_ssl = vim_config["ca_cert"]
+            if "project_domain_name" in vim_config:
+                project_domain_name = vim_config["project_domain_name"]
+            if "user_domain_name" in vim_config:
+                user_domain_name = vim_config["user_domain_name"]
+        auth = v3.Password(
+            auth_url=creds["vim_url"],
+            username=creds["vim_user"],
+            password=creds["vim_password"],
+            project_name=creds["vim_tenant_name"],
+            project_domain_name=project_domain_name,
+            user_domain_name=user_domain_name,
+        )
         return session.Session(auth=auth, verify=verify_ssl, timeout=10)
index a5c4980..525bd00 100644 (file)
@@ -55,21 +55,22 @@ METRIC_MAPPINGS = {
     "cpu_utilization": "cpu",
 }
 
-METRIC_MULTIPLIERS = {
-    "cpu": 0.0000001
-}
+METRIC_MULTIPLIERS = {"cpu": 0.0000001}
 
-METRIC_AGGREGATORS = {
-    "cpu": "rate:mean"
-}
+METRIC_AGGREGATORS = {"cpu": "rate:mean"}
 
-INTERFACE_METRICS = ['packets_in_dropped', 'packets_out_dropped', 'packets_received', 'packets_sent']
+INTERFACE_METRICS = [
+    "packets_in_dropped",
+    "packets_out_dropped",
+    "packets_received",
+    "packets_sent",
+]
 
 
 class MetricType(Enum):
-    INSTANCE = 'instance'
-    INTERFACE_ALL = 'interface_all'
-    INTERFACE_ONE = 'interface_one'
+    INSTANCE = "instance"
+    INTERFACE_ALL = "interface_all"
+    INTERFACE_ONE = "interface_one"
 
 
 class OpenstackCollector(BaseVimCollector):
@@ -83,59 +84,77 @@ class OpenstackCollector(BaseVimCollector):
         sess = OpenstackUtils.get_session(vim_account)
         return keystone_client.Client(session=sess)
 
-    def _get_resource_uuid(self, nsr_id: str, vnf_member_index: str, vdur_name: str) -> str:
+    def _get_resource_uuid(
+        self, nsr_id: str, vnf_member_index: str, vdur_name: str
+    ) -> str:
         vdur = self.common_db.get_vdur(nsr_id, vnf_member_index, vdur_name)
-        return vdur['vim-id']
+        return vdur["vim-id"]
 
     def collect(self, vnfr: dict) -> List[Metric]:
-        nsr_id = vnfr['nsr-id-ref']
-        vnf_member_index = vnfr['member-vnf-index-ref']
-        vnfd = self.common_db.get_vnfd(vnfr['vnfd-id'])
+        nsr_id = vnfr["nsr-id-ref"]
+        vnf_member_index = vnfr["member-vnf-index-ref"]
+        vnfd = self.common_db.get_vnfd(vnfr["vnfd-id"])
         # Populate extra tags for metrics
         tags = {}
-        tags['ns_name'] = self.common_db.get_nsr(nsr_id)['name']
-        if vnfr['_admin']['projects_read']:
-            tags['project_id'] = vnfr['_admin']['projects_read'][0]
+        tags["ns_name"] = self.common_db.get_nsr(nsr_id)["name"]
+        if vnfr["_admin"]["projects_read"]:
+            tags["project_id"] = vnfr["_admin"]["projects_read"][0]
         else:
-            tags['project_id'] = ''
+            tags["project_id"] = ""
 
         metrics = []
 
-        for vdur in vnfr['vdur']:
+        for vdur in vnfr["vdur"]:
             # This avoids errors when vdur records have not been completely filled
-            if 'name' not in vdur:
+            if "name" not in vdur:
                 continue
-            vdu = next(
-                filter(lambda vdu: vdu['id'] == vdur['vdu-id-ref'], vnfd['vdu'])
-            )
-            if 'monitoring-parameter' in vdu:
-                for param in vdu['monitoring-parameter']:
-                    metric_name = param['performance-metric']
+            vdu = next(filter(lambda vdu: vdu["id"] == vdur["vdu-id-ref"], vnfd["vdu"]))
+            if "monitoring-parameter" in vdu:
+                for param in vdu["monitoring-parameter"]:
+                    metric_name = param["performance-metric"]
                     openstack_metric_name = METRIC_MAPPINGS[metric_name]
                     metric_type = self._get_metric_type(metric_name)
                     try:
-                        resource_id = self._get_resource_uuid(nsr_id, vnf_member_index, vdur['name'])
+                        resource_id = self._get_resource_uuid(
+                            nsr_id, vnf_member_index, vdur["name"]
+                        )
                     except ValueError:
                         log.warning(
                             "Could not find resource_uuid for vdur %s, vnf_member_index %s, nsr_id %s. "
                             "Was it recently deleted?",
-                            vdur['name'], vnf_member_index, nsr_id)
+                            vdur["name"],
+                            vnf_member_index,
+                            nsr_id,
+                        )
                         continue
                     try:
                         log.info(
                             "Collecting metric type: %s and metric_name: %s and resource_id %s and ",
                             metric_type,
                             metric_name,
-                            resource_id)
-                        value = self.backend.collect_metric(metric_type, openstack_metric_name, resource_id)
+                            resource_id,
+                        )
+                        value = self.backend.collect_metric(
+                            metric_type, openstack_metric_name, resource_id
+                        )
                         if value is not None:
                             log.info("value: %s", value)
-                            metric = VnfMetric(nsr_id, vnf_member_index, vdur['name'], metric_name, value, tags)
+                            metric = VnfMetric(
+                                nsr_id,
+                                vnf_member_index,
+                                vdur["name"],
+                                metric_name,
+                                value,
+                                tags,
+                            )
                             metrics.append(metric)
                         else:
                             log.info("metric value is empty")
                     except Exception as e:
-                        log.exception("Error collecting metric %s for vdu %s" % (metric_name, vdur['name']))
+                        log.exception(
+                            "Error collecting metric %s for vdu %s"
+                            % (metric_name, vdur["name"])
+                        )
                         log.info("Error in metric collection: %s" % e)
         return metrics
 
@@ -159,12 +178,13 @@ class OpenstackCollector(BaseVimCollector):
 
 
 class OpenstackBackend:
-    def collect_metric(self, metric_type: MetricType, metric_name: str, resource_id: str):
+    def collect_metric(
+        self, metric_type: MetricType, metric_name: str, resource_id: str
+    ):
         pass
 
 
 class GnocchiBackend(OpenstackBackend):
-
     def __init__(self, vim_account: dict):
         self.client = self._build_gnocchi_client(vim_account)
         self.neutron = self._build_neutron_client(vim_account)
@@ -177,7 +197,9 @@ class GnocchiBackend(OpenstackBackend):
         sess = OpenstackUtils.get_session(vim_account)
         return neutron_client.Client(session=sess)
 
-    def collect_metric(self, metric_type: MetricType, metric_name: str, resource_id: str):
+    def collect_metric(
+        self, metric_type: MetricType, metric_name: str, resource_id: str
+    ):
         if metric_type == MetricType.INTERFACE_ALL:
             return self._collect_interface_all_metric(metric_name, resource_id)
 
@@ -185,17 +207,19 @@ class GnocchiBackend(OpenstackBackend):
             return self._collect_instance_metric(metric_name, resource_id)
 
         else:
-            raise Exception('Unknown metric type %s' % metric_type.value)
+            raise Exception("Unknown metric type %s" % metric_type.value)
 
     def _collect_interface_all_metric(self, openstack_metric_name, resource_id):
         total_measure = None
-        interfaces = self.client.resource.search(resource_type='instance_network_interface',
-                                                 query={'=': {'instance_id': resource_id}})
+        interfaces = self.client.resource.search(
+            resource_type="instance_network_interface",
+            query={"=": {"instance_id": resource_id}},
+        )
         for interface in interfaces:
             try:
-                measures = self.client.metric.get_measures(openstack_metric_name,
-                                                           resource_id=interface['id'],
-                                                           limit=1)
+                measures = self.client.metric.get_measures(
+                    openstack_metric_name, resource_id=interface["id"], limit=1
+                )
                 if measures:
                     if not total_measure:
                         total_measure = 0.0
@@ -203,8 +227,12 @@ class GnocchiBackend(OpenstackBackend):
 
             except (gnocchiclient.exceptions.NotFound, TypeError) as e:
                 # Gnocchi in some Openstack versions raise TypeError instead of NotFound
-                log.debug("No metric %s found for interface %s: %s", openstack_metric_name,
-                          interface['id'], e)
+                log.debug(
+                    "No metric %s found for interface %s: %s",
+                    openstack_metric_name,
+                    interface["id"],
+                    e,
+                )
         return total_measure
 
     def _collect_instance_metric(self, openstack_metric_name, resource_id):
@@ -213,22 +241,36 @@ class GnocchiBackend(OpenstackBackend):
             aggregation = METRIC_AGGREGATORS.get(openstack_metric_name)
 
             try:
-                measures = self.client.metric.get_measures(openstack_metric_name,
-                                                           aggregation=aggregation,
-                                                           start=time.time() - 1200,
-                                                           resource_id=resource_id)
+                measures = self.client.metric.get_measures(
+                    openstack_metric_name,
+                    aggregation=aggregation,
+                    start=time.time() - 1200,
+                    resource_id=resource_id,
+                )
                 if measures:
                     value = measures[-1][2]
-            except (gnocchiclient.exceptions.NotFound, gnocchiclient.exceptions.BadRequest, TypeError) as e:
+            except (
+                gnocchiclient.exceptions.NotFound,
+                gnocchiclient.exceptions.BadRequest,
+                TypeError,
+            ) as e:
                 # CPU metric in previous Openstack versions do not support rate:mean aggregation method
                 # Gnocchi in some Openstack versions raise TypeError instead of NotFound or BadRequest
                 if openstack_metric_name == "cpu":
-                    log.debug("No metric %s found for instance %s: %s", openstack_metric_name, resource_id, e)
-                    log.info("Retrying to get metric %s for instance %s without aggregation",
-                             openstack_metric_name, resource_id)
-                    measures = self.client.metric.get_measures(openstack_metric_name,
-                                                               resource_id=resource_id,
-                                                               limit=1)
+                    log.debug(
+                        "No metric %s found for instance %s: %s",
+                        openstack_metric_name,
+                        resource_id,
+                        e,
+                    )
+                    log.info(
+                        "Retrying to get metric %s for instance %s without aggregation",
+                        openstack_metric_name,
+                        resource_id,
+                    )
+                    measures = self.client.metric.get_measures(
+                        openstack_metric_name, resource_id=resource_id, limit=1
+                    )
                 else:
                     raise e
                 # measures[-1] is the last measure
@@ -247,8 +289,12 @@ class GnocchiBackend(OpenstackBackend):
                 if openstack_metric_name in METRIC_MULTIPLIERS:
                     value = value * METRIC_MULTIPLIERS[openstack_metric_name]
         except gnocchiclient.exceptions.NotFound as e:
-            log.debug("No metric %s found for instance %s: %s", openstack_metric_name, resource_id,
-                      e)
+            log.debug(
+                "No metric %s found for instance %s: %s",
+                openstack_metric_name,
+                resource_id,
+                e,
+            )
         return value
 
 
@@ -260,9 +306,16 @@ class CeilometerBackend(OpenstackBackend):
         sess = OpenstackUtils.get_session(vim_account)
         return ceilometer_client.Client("2", session=sess)
 
-    def collect_metric(self, metric_type: MetricType, metric_name: str, resource_id: str):
+    def collect_metric(
+        self, metric_type: MetricType, metric_name: str, resource_id: str
+    ):
         if metric_type != MetricType.INSTANCE:
-            raise NotImplementedError('Ceilometer backend only support instance metrics')
-        measures = self.client.samples.list(meter_name=metric_name, limit=1, q=[
-            {'field': 'resource_id', 'op': 'eq', 'value': resource_id}])
+            raise NotImplementedError(
+                "Ceilometer backend only support instance metrics"
+            )
+        measures = self.client.samples.list(
+            meter_name=metric_name,
+            limit=1,
+            q=[{"field": "resource_id", "op": "eq", "value": resource_id}],
+        )
         return measures[0].counter_volume if measures else None
index e84f5e7..4f7d55a 100644 (file)
@@ -36,57 +36,63 @@ class VIOCollector(BaseVimCollector):
         super().__init__(config, vim_account_id)
         self.common_db = CommonDbClient(config)
         cfg = self.get_vim_account(vim_account_id)
-        self.vrops = vROPS_Helper(vrops_site=cfg['vrops_site'],
-                                  vrops_user=cfg['vrops_user'],
-                                  vrops_password=cfg['vrops_password'])
+        self.vrops = vROPS_Helper(
+            vrops_site=cfg["vrops_site"],
+            vrops_user=cfg["vrops_user"],
+            vrops_password=cfg["vrops_password"],
+        )
 
     def get_vim_account(self, vim_account_id: str):
         vim_account_info = self.common_db.get_vim_account(vim_account_id)
-        return vim_account_info['config']
+        return vim_account_info["config"]
 
     def collect(self, vnfr: dict):
-        vnfd = self.common_db.get_vnfd(vnfr['vnfd-id'])
+        vnfd = self.common_db.get_vnfd(vnfr["vnfd-id"])
         vdu_mappings = {}
 
         # Populate extra tags for metrics
-        nsr_id = vnfr['nsr-id-ref']
+        nsr_id = vnfr["nsr-id-ref"]
         tags = {}
-        tags['ns_name'] = self.common_db.get_nsr(nsr_id)['name']
-        if vnfr['_admin']['projects_read']:
-            tags['project_id'] = vnfr['_admin']['projects_read'][0]
+        tags["ns_name"] = self.common_db.get_nsr(nsr_id)["name"]
+        if vnfr["_admin"]["projects_read"]:
+            tags["project_id"] = vnfr["_admin"]["projects_read"][0]
         else:
-            tags['project_id'] = ''
+            tags["project_id"] = ""
 
         # Fetch the list of all known resources from vROPS.
         resource_list = self.vrops.get_vm_resource_list_from_vrops()
 
-        for vdur in vnfr['vdur']:
+        for vdur in vnfr["vdur"]:
             # This avoids errors when vdur records have not been completely filled
-            if 'name' not in vdur:
+            if "name" not in vdur:
                 continue
 
-            vdu = next(
-                filter(lambda vdu: vdu['id'] == vdur['vdu-id-ref'], vnfd['vdu'])
-            )
-            if 'monitoring-parameter' not in vdu:
+            vdu = next(filter(lambda vdu: vdu["id"] == vdur["vdu-id-ref"], vnfd["vdu"]))
+            if "monitoring-parameter" not in vdu:
                 continue
 
-            vim_id = vdur['vim-id']
-            vdu_mappings[vim_id] = {'name': vdur['name']}
+            vim_id = vdur["vim-id"]
+            vdu_mappings[vim_id] = {"name": vdur["name"]}
 
             # Map the vROPS instance id to the vim-id so we can look it up.
             for resource in resource_list:
-                for resourceIdentifier in resource['resourceKey']['resourceIdentifiers']:
-                    if resourceIdentifier['identifierType']['name'] == 'VMEntityInstanceUUID':
-                        if resourceIdentifier['value'] != vim_id:
+                for resourceIdentifier in resource["resourceKey"][
+                    "resourceIdentifiers"
+                ]:
+                    if (
+                        resourceIdentifier["identifierType"]["name"]
+                        == "VMEntityInstanceUUID"
+                    ):
+                        if resourceIdentifier["value"] != vim_id:
                             continue
-                        vdu_mappings[vim_id]['vrops_id'] = resource['identifier']
+                        vdu_mappings[vim_id]["vrops_id"] = resource["identifier"]
 
         if len(vdu_mappings) != 0:
-            return self.vrops.get_metrics(vdu_mappings=vdu_mappings,
-                                          monitoring_params=vdu['monitoring-parameter'],
-                                          vnfr=vnfr,
-                                          tags=tags
-                                          )
+            return self.vrops.get_metrics(
+                vdu_mappings=vdu_mappings,
+                monitoring_params=vdu["monitoring-parameter"],
+                vnfr=vnfr,
+                tags=tags,
+            )
         else:
             return []
index 1bc33eb..93592b7 100644 (file)
@@ -36,7 +36,7 @@ from osm_mon.core.config import Config
 
 log = logging.getLogger(__name__)
 
-API_VERSION = '27.0'
+API_VERSION = "27.0"
 
 
 class VMwareCollector(BaseVimCollector):
@@ -44,20 +44,22 @@ class VMwareCollector(BaseVimCollector):
         super().__init__(config, vim_account_id)
         self.common_db = CommonDbClient(config)
         vim_account = self.get_vim_account(vim_account_id)
-        self.vcloud_site = vim_account['vim_url']
-        self.admin_username = vim_account['admin_username']
-        self.admin_password = vim_account['admin_password']
-        self.vrops = vROPS_Helper(vrops_site=vim_account['vrops_site'],
-                                  vrops_user=vim_account['vrops_user'],
-                                  vrops_password=vim_account['vrops_password'])
+        self.vcloud_site = vim_account["vim_url"]
+        self.admin_username = vim_account["admin_username"]
+        self.admin_password = vim_account["admin_password"]
+        self.vrops = vROPS_Helper(
+            vrops_site=vim_account["vrops_site"],
+            vrops_user=vim_account["vrops_user"],
+            vrops_password=vim_account["vrops_password"],
+        )
 
     def connect_as_admin(self):
-        """ Method connect as pvdc admin user to vCloud director.
-            There are certain action that can be done only by provider vdc admin user.
-            Organization creation / provider network creation etc.
+        """Method connect as pvdc admin user to vCloud director.
+        There are certain action that can be done only by provider vdc admin user.
+        Organization creation / provider network creation etc.
 
-            Returns:
-                The return client object that letter can be used to connect to vcloud direct as admin for provider vdc
+        Returns:
+            The return client object that letter can be used to connect to vcloud direct as admin for provider vdc
         """
 
         log.debug("Logging into vCD org as admin.")
@@ -67,41 +69,44 @@ class VMwareCollector(BaseVimCollector):
             host = self.vcloud_site
             admin_user = self.admin_username
             admin_passwd = self.admin_password
-            org = 'System'
+            org = "System"
             client = Client(host, verify_ssl_certs=False)
             client.set_highest_supported_version()
-            client.set_credentials(BasicLoginCredentials(admin_user, org,
-                                                         admin_passwd))
+            client.set_credentials(BasicLoginCredentials(admin_user, org, admin_passwd))
             return client
 
         except Exception as e:
-            log.error("Can't connect to a vCloud director as: {} with exception {}".format(admin_user, e))
+            log.error(
+                "Can't connect to a vCloud director as: {} with exception {}".format(
+                    admin_user, e
+                )
+            )
 
     def get_vim_account(self, vim_account_id: str):
         """
-           Method to get VIM account details by its ID
-           arg - VIM ID
-           return - dict with vim account details
+        Method to get VIM account details by its ID
+        arg - VIM ID
+        return - dict with vim account details
         """
         vim_account = {}
         vim_account_info = self.common_db.get_vim_account(vim_account_id)
 
-        vim_account['vim_url'] = vim_account_info['vim_url']
+        vim_account["vim_url"] = vim_account_info["vim_url"]
 
-        vim_config = vim_account_info['config']
-        vim_account['admin_username'] = vim_config['admin_username']
-        vim_account['admin_password'] = vim_config['admin_password']
-        vim_account['vrops_site'] = vim_config['vrops_site']
-        vim_account['vrops_user'] = vim_config['vrops_user']
-        vim_account['vrops_password'] = vim_config['vrops_password']
+        vim_config = vim_account_info["config"]
+        vim_account["admin_username"] = vim_config["admin_username"]
+        vim_account["admin_password"] = vim_config["admin_password"]
+        vim_account["vrops_site"] = vim_config["vrops_site"]
+        vim_account["vrops_user"] = vim_config["vrops_user"]
+        vim_account["vrops_password"] = vim_config["vrops_password"]
 
         return vim_account
 
     def get_vm_moref_id(self, vapp_uuid):
         """
-           Method to get the moref_id of given VM
-           arg - vapp_uuid
-           return - VM mored_id
+        Method to get the moref_id of given VM
+        arg - vapp_uuid
+        return - VM mored_id
         """
         vm_moref_id = None
         try:
@@ -110,12 +115,24 @@ class VMwareCollector(BaseVimCollector):
 
                 if vm_details and "vm_vcenter_info" in vm_details:
                     vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
-                    log.debug("Found vm_moref_id: {} for vApp UUID: {}".format(vm_moref_id, vapp_uuid))
+                    log.debug(
+                        "Found vm_moref_id: {} for vApp UUID: {}".format(
+                            vm_moref_id, vapp_uuid
+                        )
+                    )
                 else:
-                    log.error("Failed to find vm_moref_id from vApp UUID: {}".format(vapp_uuid))
+                    log.error(
+                        "Failed to find vm_moref_id from vApp UUID: {}".format(
+                            vapp_uuid
+                        )
+                    )
 
         except Exception as exp:
-            log.warning("Error occurred while getting VM moref ID for VM: {}\n{}".format(exp, traceback.format_exc()))
+            log.warning(
+                "Error occurred while getting VM moref ID for VM: {}\n{}".format(
+                    exp, traceback.format_exc()
+                )
+            )
 
         return vm_moref_id
 
@@ -136,95 +153,117 @@ class VMwareCollector(BaseVimCollector):
             log.error("Failed to connect to vCD")
             return parsed_respond
 
-        url_list = [self.vcloud_site, '/api/vApp/vapp-', vapp_uuid]
-        get_vapp_restcall = ''.join(url_list)
+        url_list = [self.vcloud_site, "/api/vApp/vapp-", vapp_uuid]
+        get_vapp_restcall = "".join(url_list)
 
         if vca._session:
-            headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
-                       'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
-            response = requests.get(get_vapp_restcall,
-                                    headers=headers,
-                                    verify=False)
+            headers = {
+                "Accept": "application/*+xml;version=" + API_VERSION,
+                "x-vcloud-authorization": vca._session.headers[
+                    "x-vcloud-authorization"
+                ],
+            }
+            response = requests.get(get_vapp_restcall, headers=headers, verify=False)
 
             if response.status_code != 200:
-                log.error("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
-                                                                                  response.content))
+                log.error(
+                    "REST API call {} failed. Return status code {}".format(
+                        get_vapp_restcall, response.content
+                    )
+                )
                 return parsed_respond
 
             try:
                 xmlroot_respond = XmlElementTree.fromstring(response.content)
 
-                namespaces = {'vm': 'http://www.vmware.com/vcloud/v1.5',
-                              "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
-                              "xmlns": "http://www.vmware.com/vcloud/v1.5"}
+                namespaces = {
+                    "vm": "http://www.vmware.com/vcloud/v1.5",
+                    "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
+                    "xmlns": "http://www.vmware.com/vcloud/v1.5",
+                }
 
                 # parse children section for other attrib
-                children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                children_section = xmlroot_respond.find("vm:Children/", namespaces)
                 if children_section is not None:
-                    vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                    vCloud_extension_section = children_section.find(
+                        "xmlns:VCloudExtension", namespaces
+                    )
                     if vCloud_extension_section is not None:
                         vm_vcenter_info = {}
-                        vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
-                        vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                        vim_info = vCloud_extension_section.find(
+                            "vmext:VmVimInfo", namespaces
+                        )
+                        vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
                         if vmext is not None:
-                            vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                            vm_vcenter_info["vm_moref_id"] = vmext.find(
+                                "vmext:MoRef", namespaces
+                            ).text
                         parsed_respond["vm_vcenter_info"] = vm_vcenter_info
 
             except Exception as exp:
-                log.warning("Error occurred for getting vApp details: {}\n{}".format(exp,
-                                                                                     traceback.format_exc()))
+                log.warning(
+                    "Error occurred for getting vApp details: {}\n{}".format(
+                        exp, traceback.format_exc()
+                    )
+                )
 
         return parsed_respond
 
     def collect(self, vnfr: dict):
-        vnfd = self.common_db.get_vnfd(vnfr['vnfd-id'])
+        vnfd = self.common_db.get_vnfd(vnfr["vnfd-id"])
         vdu_mappings = {}
 
         # Populate extra tags for metrics
-        nsr_id = vnfr['nsr-id-ref']
+        nsr_id = vnfr["nsr-id-ref"]
         tags = {}
-        tags['ns_name'] = self.common_db.get_nsr(nsr_id)['name']
-        if vnfr['_admin']['projects_read']:
-            tags['project_id'] = vnfr['_admin']['projects_read'][0]
+        tags["ns_name"] = self.common_db.get_nsr(nsr_id)["name"]
+        if vnfr["_admin"]["projects_read"]:
+            tags["project_id"] = vnfr["_admin"]["projects_read"][0]
         else:
-            tags['project_id'] = ''
+            tags["project_id"] = ""
 
         # Fetch the list of all known resources from vROPS.
         resource_list = self.vrops.get_vm_resource_list_from_vrops()
 
-        for vdur in vnfr['vdur']:
+        for vdur in vnfr["vdur"]:
             # This avoids errors when vdur records have not been completely filled
-            if 'name' not in vdur:
+            if "name" not in vdur:
                 continue
-            vdu = next(
-                filter(lambda vdu: vdu['id'] == vdur['vdu-id-ref'], vnfd['vdu'])
-            )
+            vdu = next(filter(lambda vdu: vdu["id"] == vdur["vdu-id-ref"], vnfd["vdu"]))
 
-            if 'monitoring-parameter' not in vdu:
+            if "monitoring-parameter" not in vdu:
                 continue
 
-            resource_uuid = vdur['vim-id']
+            resource_uuid = vdur["vim-id"]
             # Find vm_moref_id from vApp uuid in vCD
             vim_id = self.get_vm_moref_id(resource_uuid)
             if vim_id is None:
-                log.debug("Failed to find vROPS ID for vApp in vCD: {}".format(resource_uuid))
+                log.debug(
+                    "Failed to find vROPS ID for vApp in vCD: {}".format(resource_uuid)
+                )
                 continue
 
-            vdu_mappings[vim_id] = {'name': vdur['name']}
+            vdu_mappings[vim_id] = {"name": vdur["name"]}
 
             # Map the vROPS instance id to the vim-id so we can look it up.
             for resource in resource_list:
-                for resourceIdentifier in resource['resourceKey']['resourceIdentifiers']:
-                    if resourceIdentifier['identifierType']['name'] == 'VMEntityObjectID':
-                        if resourceIdentifier['value'] != vim_id:
+                for resourceIdentifier in resource["resourceKey"][
+                    "resourceIdentifiers"
+                ]:
+                    if (
+                        resourceIdentifier["identifierType"]["name"]
+                        == "VMEntityObjectID"
+                    ):
+                        if resourceIdentifier["value"] != vim_id:
                             continue
-                        vdu_mappings[vim_id]['vrops_id'] = resource['identifier']
+                        vdu_mappings[vim_id]["vrops_id"] = resource["identifier"]
 
         if len(vdu_mappings) != 0:
-            return self.vrops.get_metrics(vdu_mappings=vdu_mappings,
-                                          monitoring_params=vdu['monitoring-parameter'],
-                                          vnfr=vnfr,
-                                          tags=tags
-                                          )
+            return self.vrops.get_metrics(
+                vdu_mappings=vdu_mappings,
+                monitoring_params=vdu["monitoring-parameter"],
+                vnfr=vnfr,
+                tags=tags,
+            )
         else:
             return []
index 775aa10..2196326 100644 (file)
@@ -39,73 +39,75 @@ METRIC_MULTIPLIERS = {
     "disk_read_bytes": 1024,
     "disk_write_bytes": 1024,
     "packets_received": 1024,
-    "packets_sent": 1024
+    "packets_sent": 1024,
 }
 
 
-class vROPS_Helper():
-
-    def __init__(self,
-                 vrops_site='https://vrops',
-                 vrops_user='',
-                 vrops_password=''):
+class vROPS_Helper:
+    def __init__(self, vrops_site="https://vrops", vrops_user="", vrops_password=""):
         self.vrops_site = vrops_site
         self.vrops_user = vrops_user
         self.vrops_password = vrops_password
 
     def get_vrops_token(self):
-        """Fetches token from vrops
-        """
-        auth_url = '/suite-api/api/auth/token/acquire'
-        headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+        """Fetches token from vrops"""
+        auth_url = "/suite-api/api/auth/token/acquire"
+        headers = {"Content-Type": "application/json", "Accept": "application/json"}
         req_body = {"username": self.vrops_user, "password": self.vrops_password}
-        resp = requests.post(self.vrops_site + auth_url,
-                             json=req_body, verify=False,
-                             headers=headers)
+        resp = requests.post(
+            self.vrops_site + auth_url, json=req_body, verify=False, headers=headers
+        )
         if resp.status_code != 200:
-            log.error("Failed to get token from vROPS: {} {}".format(resp.status_code,
-                                                                     resp.content))
+            log.error(
+                "Failed to get token from vROPS: {} {}".format(
+                    resp.status_code, resp.content
+                )
+            )
             return None
 
-        resp_data = json.loads(resp.content.decode('utf-8'))
-        return resp_data['token']
+        resp_data = json.loads(resp.content.decode("utf-8"))
+        return resp_data["token"]
 
     def get_vm_resource_list_from_vrops(self):
-        """ Find all known resource IDs in vROPs
-        """
+        """Find all known resource IDs in vROPs"""
         auth_token = self.get_vrops_token()
-        api_url = '/suite-api/api/resources?resourceKind=VirtualMachine'
-        headers = {'Accept': 'application/json', 'Authorization': 'vRealizeOpsToken {}'.format(auth_token)}
+        api_url = "/suite-api/api/resources?resourceKind=VirtualMachine"
+        headers = {
+            "Accept": "application/json",
+            "Authorization": "vRealizeOpsToken {}".format(auth_token),
+        }
         resource_list = []
 
-        resp = requests.get(self.vrops_site + api_url,
-                            verify=False, headers=headers)
+        resp = requests.get(self.vrops_site + api_url, verify=False, headers=headers)
 
         if resp.status_code != 200:
-            log.error("Failed to get resource list from vROPS: {} {}".format(resp.status_code,
-                                                                             resp.content))
+            log.error(
+                "Failed to get resource list from vROPS: {} {}".format(
+                    resp.status_code, resp.content
+                )
+            )
             return resource_list
 
         try:
-            resp_data = json.loads(resp.content.decode('utf-8'))
-            if resp_data.get('resourceList') is not None:
-                resource_list = resp_data.get('resourceList')
+            resp_data = json.loads(resp.content.decode("utf-8"))
+            if resp_data.get("resourceList") is not None:
+                resource_list = resp_data.get("resourceList")
 
         except Exception as exp:
-            log.error("get_vm_resource_id: Error in parsing {}\n{}".format(exp, traceback.format_exc()))
+            log.error(
+                "get_vm_resource_id: Error in parsing {}\n{}".format(
+                    exp, traceback.format_exc()
+                )
+            )
 
         return resource_list
 
-    def get_metrics(self,
-                    vdu_mappings={},
-                    monitoring_params={},
-                    vnfr=None,
-                    tags={}):
+    def get_metrics(self, vdu_mappings={}, monitoring_params={}, vnfr=None, tags={}):
 
         monitoring_keys = {}
         # Collect the names of all the metrics we need to query
         for metric_entry in monitoring_params:
-            metric_name = metric_entry['performance-metric']
+            metric_name = metric_entry["performance-metric"]
             if metric_name not in METRIC_MAPPINGS:
                 log.debug("Metric {} not supported, ignoring".format(metric_name))
                 continue
@@ -122,66 +124,81 @@ class vROPS_Helper():
         sanitized_vdu_mappings = copy.deepcopy(vdu_mappings)
         for key in vdu_mappings.keys():
             vdu = vdu_mappings[key]
-            if 'vrops_id' not in vdu:
+            if "vrops_id" not in vdu:
                 log.info("Could not find vROPS id for vdu {}".format(vdu))
                 del sanitized_vdu_mappings[key]
                 continue
-            resource_ids += "&resourceId={}".format(vdu['vrops_id'])
+            resource_ids += "&resourceId={}".format(vdu["vrops_id"])
         vdu_mappings = sanitized_vdu_mappings
 
         try:
 
             # Now we can make a single call to vROPS to collect all relevant metrics for resources we need to monitor
-            api_url = "/suite-api/api/resources/stats?IntervalType=MINUTES&IntervalCount=1"\
+            api_url = (
+                "/suite-api/api/resources/stats?IntervalType=MINUTES&IntervalCount=1"
                 "&rollUpType=MAX&currentOnly=true{}{}".format(stats_key, resource_ids)
+            )
 
             auth_token = self.get_vrops_token()
-            headers = {'Accept': 'application/json', 'Authorization': 'vRealizeOpsToken {}'.format(auth_token)}
+            headers = {
+                "Accept": "application/json",
+                "Authorization": "vRealizeOpsToken {}".format(auth_token),
+            }
 
-            resp = requests.get(self.vrops_site + api_url,
-                                verify=False, headers=headers)
+            resp = requests.get(
+                self.vrops_site + api_url, verify=False, headers=headers
+            )
 
             if resp.status_code != 200:
-                log.error("Failed to get Metrics data from vROPS for {} {}".format(resp.status_code,
-                                                                                   resp.content))
+                log.error(
+                    "Failed to get Metrics data from vROPS for {} {}".format(
+                        resp.status_code, resp.content
+                    )
+                )
                 return metrics
 
-            m_data = json.loads(resp.content.decode('utf-8'))
-            if 'values' not in m_data:
+            m_data = json.loads(resp.content.decode("utf-8"))
+            if "values" not in m_data:
                 return metrics
 
-            statistics = m_data['values']
+            statistics = m_data["values"]
             for vdu_stat in statistics:
-                vrops_id = vdu_stat['resourceId']
+                vrops_id = vdu_stat["resourceId"]
                 vdu_name = None
                 for vdu in vdu_mappings.values():
-                    if vdu['vrops_id'] == vrops_id:
-                        vdu_name = vdu['name']
+                    if vdu["vrops_id"] == vrops_id:
+                        vdu_name = vdu["name"]
                 if vdu_name is None:
                     continue
-                for item in vdu_stat['stat-list']['stat']:
-                    reported_metric = item['statKey']['key']
+                for item in vdu_stat["stat-list"]["stat"]:
+                    reported_metric = item["statKey"]["key"]
                     if reported_metric not in METRIC_MAPPINGS.values():
                         continue
 
                     # Convert the vROPS metric name back to OSM key
-                    metric_name = list(METRIC_MAPPINGS.keys())[list(METRIC_MAPPINGS.values()).
-                                                               index(reported_metric)]
+                    metric_name = list(METRIC_MAPPINGS.keys())[
+                        list(METRIC_MAPPINGS.values()).index(reported_metric)
+                    ]
                     if metric_name in monitoring_keys.keys():
-                        metric_value = item['data'][-1]
+                        metric_value = item["data"][-1]
                         if metric_name in METRIC_MULTIPLIERS:
                             metric_value *= METRIC_MULTIPLIERS[metric_name]
-                        metric = VnfMetric(vnfr['nsr-id-ref'],
-                                           vnfr['member-vnf-index-ref'],
-                                           vdu_name,
-                                           metric_name,
-                                           metric_value,
-                                           tags
-                                           )
+                        metric = VnfMetric(
+                            vnfr["nsr-id-ref"],
+                            vnfr["member-vnf-index-ref"],
+                            vdu_name,
+                            metric_name,
+                            metric_value,
+                            tags,
+                        )
 
                         metrics.append(metric)
 
         except Exception as exp:
-            log.error("Exception while parsing metrics data from vROPS {}\n{}".format(exp, traceback.format_exc()))
+            log.error(
+                "Exception while parsing metrics data from vROPS {}\n{}".format(
+                    exp, traceback.format_exc()
+                )
+            )
 
         return metrics
index 49e7842..961a4ef 100644 (file)
@@ -26,13 +26,15 @@ log = logging.getLogger(__name__)
 
 
 class VnfMetric(Metric):
-    def __init__(self, nsr_id, vnf_member_index, vdur_name, name, value, extra_tags: dict = None):
+    def __init__(
+        self, nsr_id, vnf_member_index, vdur_name, name, value, extra_tags: dict = None
+    ):
         tags = {
-            'ns_id': nsr_id,
-            'vnf_member_index': vnf_member_index,
-            'vdu_name': vdur_name
+            "ns_id": nsr_id,
+            "vnf_member_index": vnf_member_index,
+            "vdu_name": vdur_name,
         }
         if extra_tags:
             tags.update(extra_tags)
-        log.debug('Tags: %s', tags)
+        log.debug("Tags: %s", tags)
         super().__init__(tags, name, value)
index aa65388..df8db60 100644 (file)
@@ -31,35 +31,38 @@ from osm_mon.core.models import Alarm
 
 class CommonDbClient:
     def __init__(self, config: Config):
-        if config.get('database', 'driver') == "mongo":
+        if config.get("database", "driver") == "mongo":
             self.common_db = dbmongo.DbMongo()
-        elif config.get('database', 'driver') == "memory":
+        elif config.get("database", "driver") == "memory":
             self.common_db = dbmemory.DbMemory()
         else:
-            raise Exception("Unknown database driver {}".format(config.get('section', 'driver')))
+            raise Exception(
+                "Unknown database driver {}".format(config.get("section", "driver"))
+            )
         self.common_db.db_connect(config.get("database"))
 
     def get_vnfr(self, nsr_id: str, member_index: int):
-        vnfr = self.common_db.get_one("vnfrs",
-                                      {"nsr-id-ref": nsr_id, "member-vnf-index-ref": str(member_index)})
+        vnfr = self.common_db.get_one(
+            "vnfrs", {"nsr-id-ref": nsr_id, "member-vnf-index-ref": str(member_index)}
+        )
         return vnfr
 
     def get_vnfrs(self, nsr_id: str = None, vim_account_id: str = None):
         if nsr_id and vim_account_id:
             raise NotImplementedError("Only one filter is currently supported")
         if nsr_id:
-            vnfrs = [self.get_vnfr(nsr_id, member['member-vnf-index']) for member in
-                     self.get_nsr(nsr_id)['nsd']['constituent-vnfd']]
+            vnfrs = [
+                self.get_vnfr(nsr_id, member["member-vnf-index"])
+                for member in self.get_nsr(nsr_id)["nsd"]["constituent-vnfd"]
+            ]
         elif vim_account_id:
-            vnfrs = self.common_db.get_list("vnfrs",
-                                            {"vim-account-id": vim_account_id})
+            vnfrs = self.common_db.get_list("vnfrs", {"vim-account-id": vim_account_id})
         else:
-            vnfrs = self.common_db.get_list('vnfrs')
+            vnfrs = self.common_db.get_list("vnfrs")
         return vnfrs
 
     def get_vnfd(self, vnfd_id: str):
-        vnfd = self.common_db.get_one("vnfds",
-                                      {"_id": vnfd_id})
+        vnfd = self.common_db.get_one("vnfds", {"_id": vnfd_id})
         return vnfd
 
     def get_vnfd_by_id(self, vnfd_id: str, filter: dict = {}):
@@ -76,92 +79,104 @@ class CommonDbClient:
             return None
 
     def get_nsrs(self):
-        return self.common_db.get_list('nsrs')
+        return self.common_db.get_list("nsrs")
 
     def get_nsr(self, nsr_id: str):
-        nsr = self.common_db.get_one("nsrs",
-                                     {"id": nsr_id})
+        nsr = self.common_db.get_one("nsrs", {"id": nsr_id})
         return nsr
 
     def get_nslcmop(self, nslcmop_id):
-        nslcmop = self.common_db.get_one("nslcmops",
-                                         {"_id": nslcmop_id})
+        nslcmop = self.common_db.get_one("nslcmops", {"_id": nslcmop_id})
         return nslcmop
 
     def get_vdur(self, nsr_id, member_index, vdur_name):
         vnfr = self.get_vnfr(nsr_id, member_index)
-        for vdur in vnfr['vdur']:
-            if vdur['name'] == vdur_name:
+        for vdur in vnfr["vdur"]:
+            if vdur["name"] == vdur_name:
                 return vdur
-        raise ValueError('vdur not found for nsr-id {}, member_index {} and vdur_name {}'.format(nsr_id, member_index,
-                                                                                                 vdur_name))
+        raise ValueError(
+            "vdur not found for nsr-id {}, member_index {} and vdur_name {}".format(
+                nsr_id, member_index, vdur_name
+            )
+        )
 
     def decrypt_vim_password(self, vim_password: str, schema_version: str, vim_id: str):
         return self.common_db.decrypt(vim_password, schema_version, vim_id)
 
-    def decrypt_sdnc_password(self, sdnc_password: str, schema_version: str, sdnc_id: str):
+    def decrypt_sdnc_password(
+        self, sdnc_password: str, schema_version: str, sdnc_id: str
+    ):
         return self.common_db.decrypt(sdnc_password, schema_version, sdnc_id)
 
     def get_vim_account_id(self, nsr_id: str, vnf_member_index: int) -> str:
         vnfr = self.get_vnfr(nsr_id, vnf_member_index)
-        return vnfr['vim-account-id']
+        return vnfr["vim-account-id"]
 
     def get_vim_accounts(self):
-        return self.common_db.get_list('vim_accounts')
+        return self.common_db.get_list("vim_accounts")
 
     def get_vim_account(self, vim_account_id: str) -> dict:
-        vim_account = self.common_db.get_one('vim_accounts', {"_id": vim_account_id})
-        vim_account['vim_password'] = self.decrypt_vim_password(vim_account['vim_password'],
-                                                                vim_account['schema_version'],
-                                                                vim_account_id)
+        vim_account = self.common_db.get_one("vim_accounts", {"_id": vim_account_id})
+        vim_account["vim_password"] = self.decrypt_vim_password(
+            vim_account["vim_password"], vim_account["schema_version"], vim_account_id
+        )
         vim_config_encrypted_dict = {
             "1.1": ("admin_password", "nsx_password", "vcenter_password"),
-            "default": ("admin_password", "nsx_password", "vcenter_password", "vrops_password")
+            "default": (
+                "admin_password",
+                "nsx_password",
+                "vcenter_password",
+                "vrops_password",
+            ),
         }
-        vim_config_encrypted = vim_config_encrypted_dict['default']
-        if vim_account['schema_version'] in vim_config_encrypted_dict.keys():
-            vim_config_encrypted = vim_config_encrypted_dict[vim_account['schema_version']]
-        if 'config' in vim_account:
-            for key in vim_account['config']:
+        vim_config_encrypted = vim_config_encrypted_dict["default"]
+        if vim_account["schema_version"] in vim_config_encrypted_dict.keys():
+            vim_config_encrypted = vim_config_encrypted_dict[
+                vim_account["schema_version"]
+            ]
+        if "config" in vim_account:
+            for key in vim_account["config"]:
                 if key in vim_config_encrypted:
-                    vim_account['config'][key] = self.decrypt_vim_password(vim_account['config'][key],
-                                                                           vim_account['schema_version'],
-                                                                           vim_account_id)
+                    vim_account["config"][key] = self.decrypt_vim_password(
+                        vim_account["config"][key],
+                        vim_account["schema_version"],
+                        vim_account_id,
+                    )
         return vim_account
 
     def get_sdncs(self):
-        return self.common_db.get_list('sdns')
+        return self.common_db.get_list("sdns")
 
     def get_sdnc(self, sdnc_id: str):
-        return self.common_db.get_one('sdns', {'_id': sdnc_id})
+        return self.common_db.get_one("sdns", {"_id": sdnc_id})
 
     def get_projects(self):
-        return self.common_db.get_list('projects')
+        return self.common_db.get_list("projects")
 
     def get_project(self, project_id: str):
-        return self.common_db.get_one('projects', {'_id': project_id})
+        return self.common_db.get_one("projects", {"_id": project_id})
 
     def create_alarm(self, alarm: Alarm):
-        return self.common_db.create('alarms', alarm.to_dict())
+        return self.common_db.create("alarms", alarm.to_dict())
 
     def delete_alarm(self, alarm_uuid: str):
-        return self.common_db.del_one('alarms', {'uuid': alarm_uuid})
+        return self.common_db.del_one("alarms", {"uuid": alarm_uuid})
 
     def get_alarms(self) -> List[Alarm]:
         alarms = []
-        alarm_dicts = self.common_db.get_list('alarms')
+        alarm_dicts = self.common_db.get_list("alarms")
         for alarm_dict in alarm_dicts:
             alarms.append(Alarm.from_dict(alarm_dict))
         return alarms
 
     def get_user(self, username: str):
-        return self.common_db.get_one('users', {'username': username})
+        return self.common_db.get_one("users", {"username": username})
 
     def get_user_by_id(self, userid: str):
-        return self.common_db.get_one('users', {'_id': userid})
+        return self.common_db.get_one("users", {"_id": userid})
 
     def get_role_by_name(self, name: str):
-        return self.common_db.get_one('roles', {'name': name})
+        return self.common_db.get_one("roles", {"name": name})
 
     def get_role_by_id(self, role_id: str):
-        return self.common_db.get_one('roles', {'_id': role_id})
+        return self.common_db.get_one("roles", {"_id": role_id})
index cd99ffc..815e66e 100644 (file)
@@ -33,14 +33,14 @@ logger = logging.getLogger(__name__)
 
 
 class Config:
-    def __init__(self, config_file: str = ''):
+    def __init__(self, config_file: str = ""):
         self.conf = {}
         self._read_config_file(config_file)
         self._read_env()
 
     def _read_config_file(self, config_file):
         if not config_file:
-            path = 'mon.yaml'
+            path = "mon.yaml"
             config_file = pkg_resources.resource_filename(__name__, path)
         with open(config_file) as f:
             self.conf = yaml.load(f)
@@ -63,10 +63,12 @@ class Config:
             if len(elements) < 3:
                 logger.warning(
                     "Environment variable %s=%s does not comply with required format. Section and/or field missing.",
-                    env, os.getenv(env))
+                    env,
+                    os.getenv(env),
+                )
                 continue
             section = elements[1]
-            field = '_'.join(elements[2:])
+            field = "_".join(elements[2:])
             value = os.getenv(env)
             if section not in self.conf:
                 self.conf[section] = {}
index f389085..f5732bc 100644 (file)
@@ -31,13 +31,16 @@ class KeystoneConnection:
     Object representing a connection with keystone, it's main use is to collect
     projects and users from the OSM platform stored in keystone instead MongoDB
     """
+
     def __init__(self, config):
-        self.auth_url = config.get('keystone', 'url')
-        self.username = config.get('keystone', 'service_user')
-        self.project_name = config.get('keystone', 'service_project')
-        self.project_domain_name_list = config.get('keystone', 'service_project_domain_name').split(",")
-        self.password = config.get('keystone', 'service_password')
-        self.user_domain_name_list = config.get('keystone', 'domain_name').split(",")
+        self.auth_url = config.get("keystone", "url")
+        self.username = config.get("keystone", "service_user")
+        self.project_name = config.get("keystone", "service_project")
+        self.project_domain_name_list = config.get(
+            "keystone", "service_project_domain_name"
+        ).split(",")
+        self.password = config.get("keystone", "service_password")
+        self.user_domain_name_list = config.get("keystone", "domain_name").split(",")
 
         self.auth = v3.Password(
             auth_url=self.auth_url,
@@ -45,11 +48,13 @@ class KeystoneConnection:
             username=self.username,
             password=self.password,
             project_domain_name=self.project_domain_name_list[0],
-            project_name=self.project_name
+            project_name=self.project_name,
         )
 
         self.keystone_session = session.Session(auth=self.auth)
-        self.keystone_client = client.Client(session=self.keystone_session, endpoint_override=self.auth_url)
+        self.keystone_client = client.Client(
+            session=self.keystone_session, endpoint_override=self.auth_url
+        )
 
     def getProjects(self):
         """
index 6a7ef60..7d194c3 100644 (file)
@@ -31,13 +31,15 @@ from osm_mon.core.config import Config
 
 class MessageBusClient:
     def __init__(self, config: Config, loop=None):
-        if config.get('message', 'driver') == "local":
+        if config.get("message", "driver") == "local":
             self.msg_bus = msglocal.MsgLocal()
-        elif config.get('message', 'driver') == "kafka":
+        elif config.get("message", "driver") == "kafka":
             self.msg_bus = msgkafka.MsgKafka()
         else:
-            raise Exception("Unknown message bug driver {}".format(config.get('section', 'driver')))
-        self.msg_bus.connect(config.get('message'))
+            raise Exception(
+                "Unknown message bug driver {}".format(config.get("section", "driver"))
+            )
+        self.msg_bus.connect(config.get("message"))
         if not loop:
             loop = asyncio.get_event_loop()
         self.loop = loop
index ca25d8b..8fe3b6b 100644 (file)
@@ -24,9 +24,16 @@ import uuid
 
 
 class Alarm:
-
-    def __init__(self, name: str = None, severity: str = None, threshold: float = None, operation: str = None,
-                 statistic: str = None, metric: str = None, tags: dict = {}):
+    def __init__(
+        self,
+        name: str = None,
+        severity: str = None,
+        threshold: float = None,
+        operation: str = None,
+        statistic: str = None,
+        metric: str = None,
+        tags: dict = {},
+    ):
         self.uuid = str(uuid.uuid4())
         self.name = name
         self.severity = severity
@@ -38,26 +45,26 @@ class Alarm:
 
     def to_dict(self) -> dict:
         alarm = {
-            'uuid': self.uuid,
-            'name': self.name,
-            'severity': self.severity,
-            'threshold': self.threshold,
-            'statistic': self.statistic,
-            'metric': self.metric,
-            'tags': self.tags,
-            'operation': self.operation
+            "uuid": self.uuid,
+            "name": self.name,
+            "severity": self.severity,
+            "threshold": self.threshold,
+            "statistic": self.statistic,
+            "metric": self.metric,
+            "tags": self.tags,
+            "operation": self.operation,
         }
         return alarm
 
     @staticmethod
     def from_dict(data: dict):
         alarm = Alarm()
-        alarm.uuid = data.get('uuid', str(uuid.uuid4()))
-        alarm.name = data.get('name')
-        alarm.severity = data.get('severity')
-        alarm.threshold = float(data.get('threshold'))
-        alarm.statistic = data.get('statistic')
-        alarm.metric = data.get('metric')
-        alarm.tags = data.get('tags')
-        alarm.operation = data.get('operation')
+        alarm.uuid = data.get("uuid", str(uuid.uuid4()))
+        alarm.name = data.get("name")
+        alarm.severity = data.get("severity")
+        alarm.threshold = float(data.get("threshold"))
+        alarm.statistic = data.get("statistic")
+        alarm.metric = data.get("metric")
+        alarm.tags = data.get("tags")
+        alarm.operation = data.get("operation")
         return alarm
index 99b0b98..0879fcb 100644 (file)
@@ -50,35 +50,44 @@ class ResponseBuilder(object):
 
     def create_alarm_response(self, **kwargs) -> dict:
         """Generate a response for a create alarm request."""
-        create_alarm_resp = {"schema_version": schema_version,
-                             "schema_type": "create_alarm_response",
-                             "alarm_create_response": {
-                                 "correlation_id": kwargs['cor_id'],
-                                 "alarm_uuid": kwargs['alarm_id'],
-                                 "status": kwargs['status']}}
+        create_alarm_resp = {
+            "schema_version": schema_version,
+            "schema_type": "create_alarm_response",
+            "alarm_create_response": {
+                "correlation_id": kwargs["cor_id"],
+                "alarm_uuid": kwargs["alarm_id"],
+                "status": kwargs["status"],
+            },
+        }
         return create_alarm_resp
 
     def delete_alarm_response(self, **kwargs) -> dict:
         """Generate a response for a delete alarm request."""
-        delete_alarm_resp = {"schema_version": schema_version,
-                             "schema_type": "alarm_delete_response",
-                             "alarm_delete_response": {
-                                 "correlation_id": kwargs['cor_id'],
-                                 "alarm_uuid": kwargs['alarm_id'],
-                                 "status": kwargs['status']}}
+        delete_alarm_resp = {
+            "schema_version": schema_version,
+            "schema_type": "alarm_delete_response",
+            "alarm_delete_response": {
+                "correlation_id": kwargs["cor_id"],
+                "alarm_uuid": kwargs["alarm_id"],
+                "status": kwargs["status"],
+            },
+        }
         return delete_alarm_resp
 
     def notify_alarm(self, **kwargs) -> dict:
         """Generate a response to send alarm notifications."""
-        notify_alarm_resp = {"schema_version": schema_version,
-                             "schema_type": "notify_alarm",
-                             "notify_details": {
-                                 "alarm_uuid": kwargs['alarm_id'],
-                                 "metric_name": kwargs['metric_name'],
-                                 "threshold_value": kwargs['threshold_value'],
-                                 "operation": kwargs['operation'],
-                                 "severity": kwargs['sev'],
-                                 "status": kwargs['status'],
-                                 "start_date": kwargs['date'],
-                                 "tags": kwargs['tags']}}
+        notify_alarm_resp = {
+            "schema_version": schema_version,
+            "schema_type": "notify_alarm",
+            "notify_details": {
+                "alarm_uuid": kwargs["alarm_id"],
+                "metric_name": kwargs["metric_name"],
+                "threshold_value": kwargs["threshold_value"],
+                "operation": kwargs["operation"],
+                "severity": kwargs["sev"],
+                "status": kwargs["status"],
+                "start_date": kwargs["date"],
+                "tags": kwargs["tags"],
+            },
+        }
         return notify_alarm_resp
index a4be33c..c68d53d 100644 (file)
@@ -21,6 +21,7 @@
 # contact: fbravo@whitestack.com or agarcia@whitestack.com
 ##
 
+
 def find_in_list(the_list, condition_lambda):
     for item in the_list:
         if condition_lambda(item):
index 73459f7..acacf12 100644 (file)
@@ -32,27 +32,33 @@ log = logging.getLogger(__name__)
 class GrafanaBackend:
     def __init__(self, config: Config):
         self.conf = config
-        self.url = config.get('grafana', 'url')
+        self.url = config.get("grafana", "url")
         grafana_user = config.get("grafana", "user")
         grafana_password = config.get("grafana", "password")
         self.headers = {
-            'content-type': "application/json",
-            'authorization': "Basic %s" % base64.b64encode(
-                (grafana_user + ":" + grafana_password).encode("utf-8")).decode()
+            "content-type": "application/json",
+            "authorization": "Basic %s"
+            % base64.b64encode(
+                (grafana_user + ":" + grafana_password).encode("utf-8")
+            ).decode(),
         }
 
     def get_all_dashboard_uids(self):
         # Gets only dashboards that were automated by OSM (with tag 'osm_automated')
-        response = requests.request("GET", self.url + "/api/search?tag=osm_automated", headers=self.headers)
+        response = requests.request(
+            "GET", self.url + "/api/search?tag=osm_automated", headers=self.headers
+        )
         dashboards = response.json()
         dashboard_uids = []
         for dashboard in dashboards:
-            dashboard_uids.append(dashboard['uid'])
+            dashboard_uids.append(dashboard["uid"])
         log.debug("Searching for all dashboard uids: %s", dashboard_uids)
         return dashboard_uids
 
     def get_dashboard_status(self, uid):
-        response = requests.request("GET", self.url + "/api/dashboards/uid/" + uid, headers=self.headers)
+        response = requests.request(
+            "GET", self.url + "/api/dashboards/uid/" + uid, headers=self.headers
+        )
         log.debug("Searching for dashboard result: %s", response.text)
         return response
 
@@ -61,7 +67,9 @@ class GrafanaBackend:
             with open(json_file) as f:
                 dashboard_data = f.read()
 
-            dashboard_data = dashboard_data.replace('OSM_ID', uid).replace('OSM_NAME', name)
+            dashboard_data = dashboard_data.replace("OSM_ID", uid).replace(
+                "OSM_NAME", name
+            )
             dashboard_json_data = json.loads(dashboard_data)
             # Get folder id
             if project_name:
@@ -69,7 +77,10 @@ class GrafanaBackend:
             else:
                 folder_name = name
             response_folder_id = requests.request(
-                "GET", self.url + "/api/folders/{}".format(folder_name), headers=self.headers)
+                "GET",
+                self.url + "/api/folders/{}".format(folder_name),
+                headers=self.headers,
+            )
             if response_folder_id.status_code == 200:
                 folder_id = json.loads(response_folder_id.text)["id"]
                 dashboard_json_data["folderId"] = folder_id
@@ -80,9 +91,11 @@ class GrafanaBackend:
             # Admin dashboard will be created if already exists. Rest will remain same.
             if json.loads(response.text).get("status") == "name-exists":
                 # Delete any previous project-admin dashboard if it already exist.
-                if name == 'admin':
+                if name == "admin":
                     self.delete_admin_dashboard()
-                    response = self.send_request_for_creating_dashboard(dashboard_json_data)
+                    response = self.send_request_for_creating_dashboard(
+                        dashboard_json_data
+                    )
                 else:
                     return
 
@@ -90,15 +103,20 @@ class GrafanaBackend:
             if project_name is not None:
                 name = project_name
             response_team = requests.request(
-                "GET", self.url + "/api/teams/search?name={}".format(name), headers=self.headers)
+                "GET",
+                self.url + "/api/teams/search?name={}".format(name),
+                headers=self.headers,
+            )
 
             # Remove default permissions of admin user's dashboard so that it is not visible to non-admin users
             if len(json.loads(response_team.text)["teams"]) == 0:
                 # As team information is not available so it is admin user
                 dahboard_id = json.loads(response.text)["id"]
                 requests.request(
-                    "POST", self.url + "/api/dashboards/id/{}/permissions".format(dahboard_id),
-                    headers=self.headers)
+                    "POST",
+                    self.url + "/api/dashboards/id/{}/permissions".format(dahboard_id),
+                    headers=self.headers,
+                )
 
             log.info("Dashboard %s is created in Grafana", name)
             return response
@@ -107,17 +125,26 @@ class GrafanaBackend:
 
     def send_request_for_creating_dashboard(self, dashboard_data):
         response = requests.request(
-            "POST", self.url + "/api/dashboards/db/", data=json.dumps(dashboard_data), headers=self.headers)
+            "POST",
+            self.url + "/api/dashboards/db/",
+            data=json.dumps(dashboard_data),
+            headers=self.headers,
+        )
         return response
 
     def delete_dashboard(self, uid):
-        response = requests.request("DELETE", self.url + "/api/dashboards/uid/" + uid, headers=self.headers)
+        response = requests.request(
+            "DELETE", self.url + "/api/dashboards/uid/" + uid, headers=self.headers
+        )
         log.debug("Dashboard %s deleted from Grafana", uid)
         return response
 
     def delete_admin_dashboard(self):
         requests.request(
-            "DELETE", self.url + "/api/dashboards/db/osm-project-status-admin", headers=self.headers)
+            "DELETE",
+            self.url + "/api/dashboards/db/osm-project-status-admin",
+            headers=self.headers,
+        )
         log.debug("Dashboard osm-project-status-admin deleted from Grafana")
 
     def create_grafana_users(self, user):
@@ -128,20 +155,33 @@ class GrafanaBackend:
             "login": user,
             "password": user,
         }
-        response_users = requests.request("POST", self.url + "/api/admin/users/", json=user_payload,
-                                          headers=self.headers)
+        response_users = requests.request(
+            "POST",
+            self.url + "/api/admin/users/",
+            json=user_payload,
+            headers=self.headers,
+        )
         json_data = json.loads(response_users.text)
         url = "/api/org/users/{}/".format(json_data["id"])
-        permission_payload = {"role": "Editor", }
-        requests.request("PATCH", self.url + url, json=permission_payload, headers=self.headers)
+        permission_payload = {
+            "role": "Editor",
+        }
+        requests.request(
+            "PATCH", self.url + url, json=permission_payload, headers=self.headers
+        )
         log.info("New user %s created in Grafana", user)
         return response_users
 
     # Create Grafana team with member
-    def create_grafana_teams_members(self, project_name, user_name, is_admin, proj_list):
+    def create_grafana_teams_members(
+        self, project_name, user_name, is_admin, proj_list
+    ):
         # Check if user exist in Grafana
-        user_response = requests.request("GET", self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
-                                         headers=self.headers)
+        user_response = requests.request(
+            "GET",
+            self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
+            headers=self.headers,
+        )
         user_obj = json.loads(user_response.text)
         if user_response.status_code != 200:
             user_response = self.create_grafana_users(user_name)
@@ -150,7 +190,11 @@ class GrafanaBackend:
         user_id = user_obj["id"]
 
         # Get teams for user
-        team_objs = requests.request("GET", self.url + "/api/users/{}/teams".format(user_id), headers=self.headers)
+        team_objs = requests.request(
+            "GET",
+            self.url + "/api/users/{}/teams".format(user_id),
+            headers=self.headers,
+        )
         team_obj = json.loads(team_objs.text)
         team_list = []
         if len(team_obj):
@@ -159,116 +203,192 @@ class GrafanaBackend:
 
         proj_unlink = set(team_list) - set(proj_list)
         for prj in proj_unlink:
-            response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(prj),
-                                             headers=self.headers)
+            response_team = requests.request(
+                "GET",
+                self.url + "/api/teams/search?name={}".format(prj),
+                headers=self.headers,
+            )
             team_id = json.loads(response_team.text)["teams"][0]["id"]
-            requests.request("DELETE", self.url + "/api/teams/{}/members/{}".format(team_id, user_id),
-                             headers=self.headers)
+            requests.request(
+                "DELETE",
+                self.url + "/api/teams/{}/members/{}".format(team_id, user_id),
+                headers=self.headers,
+            )
         if project_name != "admin":
             # Add member to team
-            response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(project_name),
-                                             headers=self.headers)
+            response_team = requests.request(
+                "GET",
+                self.url + "/api/teams/search?name={}".format(project_name),
+                headers=self.headers,
+            )
 
             # Search if team in Grafana corresponding to the project already exists
             if not json.loads(response_team.text)["teams"]:
                 self.create_grafana_teams(project_name)
-                response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(project_name),
-                                                 headers=self.headers)
+                response_team = requests.request(
+                    "GET",
+                    self.url + "/api/teams/search?name={}".format(project_name),
+                    headers=self.headers,
+                )
             team_id = json.loads(response_team.text)["teams"][0]["id"]
             if project_name not in team_list:
                 # Create a team in Grafana corresponding to the project as it doesn't exist
-                member_payload = {
-                    "userId": user_id
-                }
-                requests.request("POST", self.url + "/api/teams/{}/members".format(team_id), json=member_payload,
-                                 headers=self.headers)
+                member_payload = {"userId": user_id}
+                requests.request(
+                    "POST",
+                    self.url + "/api/teams/{}/members".format(team_id),
+                    json=member_payload,
+                    headers=self.headers,
+                )
         # Check if user role or project name is admin
-        if is_admin or project_name == 'admin':
+        if is_admin or project_name == "admin":
             # Give admin righsts to user
             url = "/api/org/users/{}/".format(user_id)
-            permission_payload = {"role": "Admin", }
-            requests.request("PATCH", self.url + url, json=permission_payload, headers=self.headers)
+            permission_payload = {
+                "role": "Admin",
+            }
+            requests.request(
+                "PATCH", self.url + url, json=permission_payload, headers=self.headers
+            )
             log.info("User %s is assigned Admin permission", user_name)
         else:
             # Give editor rights to user
             url = "/api/org/users/{}/".format(user_id)
-            permission_payload = {"role": "Editor", }
-            requests.request("PATCH", self.url + url, json=permission_payload, headers=self.headers)
+            permission_payload = {
+                "role": "Editor",
+            }
+            requests.request(
+                "PATCH", self.url + url, json=permission_payload, headers=self.headers
+            )
             log.info("User %s is assigned Editor permission", user_name)
 
     # Create team in Grafana
     def create_grafana_teams(self, team_name):
-        team_payload = {"name": team_name, }
-        requests.request("POST", self.url + "/api/teams", json=team_payload, headers=self.headers)
+        team_payload = {
+            "name": team_name,
+        }
+        requests.request(
+            "POST", self.url + "/api/teams", json=team_payload, headers=self.headers
+        )
         log.info("New team %s created in Grafana", team_name)
 
     # Create folder in Grafana
     def create_grafana_folders(self, folder_name):
         folder_payload = {"uid": folder_name, "title": folder_name}
-        requests.request("POST", self.url + "/api/folders", json=folder_payload, headers=self.headers)
+        requests.request(
+            "POST", self.url + "/api/folders", json=folder_payload, headers=self.headers
+        )
         log.info("Dashboard folder %s created", folder_name)
 
-        response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(folder_name),
-                                         headers=self.headers)
+        response_team = requests.request(
+            "GET",
+            self.url + "/api/teams/search?name={}".format(folder_name),
+            headers=self.headers,
+        )
         # Create team if it doesn't already exists
         if len(json.loads(response_team.text)["teams"]) == 0:
             self.create_grafana_teams(folder_name)
-            response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(folder_name),
-                                             headers=self.headers)
+            response_team = requests.request(
+                "GET",
+                self.url + "/api/teams/search?name={}".format(folder_name),
+                headers=self.headers,
+            )
         # Assign required permission to the team's folder
         team_id = json.loads(response_team.text)["teams"][0]["id"]
-        permission_data = {"items": [{"teamId": team_id, "permission": 2}, ]}
-        requests.request("POST", self.url + "/api/folders/{}/permissions".format(folder_name),
-                         json=permission_data, headers=self.headers)
+        permission_data = {
+            "items": [
+                {"teamId": team_id, "permission": 2},
+            ]
+        }
+        requests.request(
+            "POST",
+            self.url + "/api/folders/{}/permissions".format(folder_name),
+            json=permission_data,
+            headers=self.headers,
+        )
 
     # delete user from grafana
     def delete_grafana_users(self, user_name):
         # Get user id
-        response_id = requests.request("GET", self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
-                                       headers=self.headers)
+        response_id = requests.request(
+            "GET",
+            self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
+            headers=self.headers,
+        )
         try:
             user_id = json.loads(response_id.text)["id"]
         except Exception:
             log.exception("Exception processing message: ")
         # Delete user
-        response = requests.request("DELETE", self.url + "/api/admin/users/{}".format(user_id), headers=self.headers)
+        response = requests.request(
+            "DELETE",
+            self.url + "/api/admin/users/{}".format(user_id),
+            headers=self.headers,
+        )
         log.info("User %s deleted in Grafana", user_name)
         return response
 
     # delete team from grafana
     def delete_grafana_team(self, project_name):
         # Delete Grafana folder
-        requests.request("DELETE", self.url + "/api/folders/{}".format(project_name),
-                         headers=self.headers)
+        requests.request(
+            "DELETE",
+            self.url + "/api/folders/{}".format(project_name),
+            headers=self.headers,
+        )
         # Delete Grafana team
-        team_obj = requests.request("GET", self.url + "/api/teams/search?name={}".format(project_name),
-                                    headers=self.headers)
+        team_obj = requests.request(
+            "GET",
+            self.url + "/api/teams/search?name={}".format(project_name),
+            headers=self.headers,
+        )
         team_id = json.loads(team_obj.text)["teams"][0]["id"]
-        response = requests.request("DELETE", self.url + "/api/teams/{}".format(team_id), headers=self.headers)
+        response = requests.request(
+            "DELETE", self.url + "/api/teams/{}".format(team_id), headers=self.headers
+        )
         log.info("Team %s deleted in Grafana", project_name)
         return response
 
     # update grafana team
     def update_grafana_teams(self, project_new_name, project_old_name):
-        team_obj = requests.request("GET", self.url + "/api/teams/search?name={}".format(project_old_name),
-                                    headers=self.headers)
+        team_obj = requests.request(
+            "GET",
+            self.url + "/api/teams/search?name={}".format(project_old_name),
+            headers=self.headers,
+        )
         team_id = json.loads(team_obj.text)["teams"][0]["id"]
-        data = {"name": project_new_name, }
-        response = requests.request("PUT", self.url + "/api/teams/{}".format(team_id), json=data, headers=self.headers)
+        data = {
+            "name": project_new_name,
+        }
+        response = requests.request(
+            "PUT",
+            self.url + "/api/teams/{}".format(team_id),
+            json=data,
+            headers=self.headers,
+        )
         log.info("Grafana team updated %s", response.text)
         return response
 
     # remove member from grafana team
     def remove_grafana_team_member(self, user_name, project_data):
         # Get user id
-        response_id = requests.request("GET", self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
-                                       headers=self.headers)
+        response_id = requests.request(
+            "GET",
+            self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
+            headers=self.headers,
+        )
         user_id = json.loads(response_id.text)["id"]
         for project in project_data:
             # Get team id
-            team_obj = requests.request("GET", self.url + "/api/teams/search?name={}".format(project['project']),
-                                        headers=self.headers)
+            team_obj = requests.request(
+                "GET",
+                self.url + "/api/teams/search?name={}".format(project["project"]),
+                headers=self.headers,
+            )
             team_id = json.loads(team_obj.text)["teams"][0]["id"]
-            response = requests.request("DELETE", self.url + "/api/teams/{}/members/{}".format(team_id, user_id),
-                                        headers=self.headers)
+            response = requests.request(
+                "DELETE",
+                self.url + "/api/teams/{}/members/{}".format(team_id, user_id),
+                headers=self.headers,
+            )
         return response
index 3b454b8..bed157a 100644 (file)
@@ -57,24 +57,31 @@ class Dashboarder:
             log.exception("Exception %s", str(e))
 
     async def _user_msg(self, topic, key, values):
-        log.debug("Message from kafka bus received: topic: %s and values: %s and key: %s", topic, values, key)
+        log.debug(
+            "Message from kafka bus received: topic: %s and values: %s and key: %s",
+            topic,
+            values,
+            key,
+        )
         try:
             if topic == "users" and key == "created":
                 log.debug("Received message from kafka for creating user")
-                if values.get('username'):
-                    user = values['username']
+                if values.get("username"):
+                    user = values["username"]
                 else:
-                    user = values['changes']['username']
+                    user = values["changes"]["username"]
                 self.service.create_grafana_user(user)
                 # user-created and mapping is done with osm cli
-                if values.get('changes'):
+                if values.get("changes"):
                     # user-project-role mapping is included in change
-                    if values['changes'].get('project_role_mappings'):
+                    if values["changes"].get("project_role_mappings"):
                         user_id = values["_id"]
                         project_data = values["changes"]["project_role_mappings"]
                         project_list = values["changes"].get("projects")
-                        self.service.create_grafana_team_member(project_data, user_id, project_list)
-                elif values.get('project_role_mappings'):
+                        self.service.create_grafana_team_member(
+                            project_data, user_id, project_list
+                        )
+                elif values.get("project_role_mappings"):
                     # for fresh project-role-mapping
                     user_id = values.get("_id")
                     project_data = values["project_role_mappings"]
@@ -82,19 +89,23 @@ class Dashboarder:
                         self.service.create_grafana_team_member(project_data, user_id)
                     else:
                         # for keystone we will get username
-                        self.service.create_grafana_team_member(project_data, user=values['username'])
+                        self.service.create_grafana_team_member(
+                            project_data, user=values["username"]
+                        )
             elif topic == "users" and key == "deleted":
                 log.debug("Received message from kafka for deleting user")
-                user = values['username']
+                user = values["username"]
                 self.service.delete_grafana_user(user)
             elif topic == "users" and key == "edited":
                 log.debug("Received message from kafka for associating user to team")
                 user_id = values["_id"]
-                if values["changes"].get("remove_project_role_mappings") and not \
-                        values["changes"].get("add_project_role_mappings"):
+                if values["changes"].get("remove_project_role_mappings") and not values[
+                    "changes"
+                ].get("add_project_role_mappings"):
                     # Removing user-project role mapping
-                    self.service.remove_grafana_team_members(user_id,
-                                                             values["changes"].get("remove_project_role_mappings"))
+                    self.service.remove_grafana_team_members(
+                        user_id, values["changes"].get("remove_project_role_mappings")
+                    )
                 else:
                     # Changing user project role mapping
                     if values["changes"].get("project_role_mappings"):
@@ -119,22 +130,22 @@ class Dashboarder:
             log.exception("Exception processing message: ")
 
     def dashboard_forever(self):
-        log.debug('dashboard_forever')
-        grafana_parsed_uri = urlparse(self.conf.get('grafana', 'url'))
+        log.debug("dashboard_forever")
+        grafana_parsed_uri = urlparse(self.conf.get("grafana", "url"))
         while True:
             try:
                 socket.gethostbyname(grafana_parsed_uri.hostname)
                 log.debug("Dashboard backend is running")
             except socket.error:
                 log.debug("Dashboard backend is not available")
-                time.sleep(int(self.conf.get('dashboarder', 'interval')))
+                time.sleep(int(self.conf.get("dashboarder", "interval")))
                 continue
             try:
                 self.create_dashboards()
-                time.sleep(int(self.conf.get('dashboarder', 'interval')))
+                time.sleep(int(self.conf.get("dashboarder", "interval")))
             except Exception:
                 log.exception("Error creating dashboards")
 
     def create_dashboards(self):
         self.service.create_dashboards()
-        log.debug('Dashboarder Service > create_dashboards called!')
+        log.debug("Dashboarder Service > create_dashboards called!")
index 441f881..48e8150 100644 (file)
@@ -38,7 +38,7 @@ class DashboarderService:
         self.common_db = CommonDbClient(self.conf)
         self.grafana = GrafanaBackend(self.conf)
 
-        if bool(self.conf.get('keystone', 'enabled')):
+        if bool(self.conf.get("keystone", "enabled")):
             self.keystone = KeystoneConnection(self.conf)
         else:
             self.keystone = None
@@ -54,50 +54,60 @@ class DashboarderService:
         if self.keystone:
             try:
                 projects.extend(
-                    map(lambda project: {'_id': project.id, 'name': project.name}, self.keystone.getProjects())
+                    map(
+                        lambda project: {"_id": project.id, "name": project.name},
+                        self.keystone.getProjects(),
+                    )
                 )
             except Exception:
-                log.error('Cannot retrieve projects from keystone')
+                log.error("Cannot retrieve projects from keystone")
         else:
             projects.extend(self.common_db.get_projects())
 
         # Reads existing project list and creates a dashboard for each
         for project in projects:
-            project_id = project['_id']
+            project_id = project["_id"]
             # Collect Project IDs for periodical dashboard clean-up
             osm_resource_uids.append(project_id)
-            dashboard_path = '{}/dashboarder/templates/project_scoped.json'.format(mon_path[0])
+            dashboard_path = "{}/dashboarder/templates/project_scoped.json".format(
+                mon_path[0]
+            )
             if project_id not in dashboard_uids:
-                project_name = project['name']
+                project_name = project["name"]
                 if project_name != "admin":
                     # Create project folder in Grafana only if user is not admin.
                     # Admin user's dashboard will be created in default folder
                     self.grafana.create_grafana_folders(project_name)
-                self.grafana.create_dashboard(project_id, project_name,
-                                              dashboard_path)
-                log.debug('Created dashboard for Project: %s', project_id)
+                self.grafana.create_dashboard(project_id, project_name, dashboard_path)
+                log.debug("Created dashboard for Project: %s", project_id)
             else:
-                log.debug('Dashboard already exists')
+                log.debug("Dashboard already exists")
 
         # Reads existing NS list and creates a dashboard for each
         # TODO lavado: only create for ACTIVE NSRs
         nsrs = self.common_db.get_nsrs()
         for nsr in nsrs:
-            nsr_id = nsr['_id']
-            dashboard_path = '{}/dashboarder/templates/ns_scoped.json'.format(mon_path[0])
+            nsr_id = nsr["_id"]
+            dashboard_path = "{}/dashboarder/templates/ns_scoped.json".format(
+                mon_path[0]
+            )
             # Collect NS IDs for periodical dashboard clean-up
             osm_resource_uids.append(nsr_id)
             # Check if the NSR's VNFDs contain metrics
             # Only one DF at the moment, support for this feature is comming in the future
-            vnfds_profiles = nsr['nsd']["df"][0]['vnf-profile']
+            vnfds_profiles = nsr["nsd"]["df"][0]["vnf-profile"]
             for vnf_profile in vnfds_profiles:
                 try:
-                    vnfd = self.common_db.get_vnfd_by_id(vnf_profile['vnfd-id'], create_filter_from_nsr(nsr))
+                    vnfd = self.common_db.get_vnfd_by_id(
+                        vnf_profile["vnfd-id"], create_filter_from_nsr(nsr)
+                    )
                     # If there are metrics, create dashboard (if exists)
-                    vdu_found = find_in_list(vnfd["vdu"], lambda a_vdu: "monitoring-parameter" in a_vdu)
+                    vdu_found = find_in_list(
+                        vnfd["vdu"], lambda a_vdu: "monitoring-parameter" in a_vdu
+                    )
                     if vdu_found:
                         if nsr_id not in dashboard_uids:
-                            nsr_name = nsr['name']
+                            nsr_name = nsr["name"]
                             project_id = nsr["_admin"]["projects_read"][0]
                             try:
                                 # Get project details from commondb
@@ -108,19 +118,20 @@ class DashboarderService:
                                 if self.keystone:
                                     # Serach project in keystone
                                     for project in projects:
-                                        if project_id == project['_id']:
+                                        if project_id == project["_id"]:
                                             project_name = project["name"]
                                 else:
-                                    log.info('Project %s not found', project_id)
-                                    log.debug('Exception %s' % e)
-                            self.grafana.create_dashboard(nsr_id, nsr_name,
-                                                          dashboard_path, project_name)
-                            log.debug('Created dashboard for NS: %s', nsr_id)
+                                    log.info("Project %s not found", project_id)
+                                    log.debug("Exception %s" % e)
+                            self.grafana.create_dashboard(
+                                nsr_id, nsr_name, dashboard_path, project_name
+                            )
+                            log.debug("Created dashboard for NS: %s", nsr_id)
                         else:
-                            log.debug('Dashboard already exists')
+                            log.debug("Dashboard already exists")
                         break
                     else:
-                        log.debug('NS does not has metrics')
+                        log.debug("NS does not has metrics")
                 except Exception:
                     log.exception("VNFD is not valid or has been renamed")
                     continue
@@ -129,14 +140,16 @@ class DashboarderService:
         for dashboard_uid in dashboard_uids:
             if dashboard_uid not in osm_resource_uids:
                 self.grafana.delete_dashboard(dashboard_uid)
-                log.debug('Deleted obsolete dashboard: %s', dashboard_uid)
+                log.debug("Deleted obsolete dashboard: %s", dashboard_uid)
             else:
-                log.debug('All dashboards in use')
+                log.debug("All dashboards in use")
 
     def create_grafana_user(self, user):
         self.grafana.create_grafana_users(user)
 
-    def create_grafana_team_member(self, project_data, userid=None, project_list=None, user=None):
+    def create_grafana_team_member(
+        self, project_data, userid=None, project_list=None, user=None
+    ):
         if user:
             user_name = user
         else:
@@ -151,16 +164,18 @@ class DashboarderService:
                     user = self.keystone.getUserById(userid)
                     user_name = user.name
                 else:
-                    log.info('User %s not found', userid)
-                    log.debug('Exception %s' % e)
+                    log.info("User %s not found", userid)
+                    log.debug("Exception %s" % e)
         if project_list:
             # user-project mapping is done by osm cli
             for proj in project_data:
                 project = self.common_db.get_project(proj["project"])
-                proj_name = project['name']
+                proj_name = project["name"]
                 role_obj = self.common_db.get_role_by_id(proj["role"])
                 is_admin = role_obj["permissions"].get("admin")
-                self.grafana.create_grafana_teams_members(proj_name, user_name, is_admin, project_list)
+                self.grafana.create_grafana_teams_members(
+                    proj_name, user_name, is_admin, project_list
+                )
         else:
             # user-project mapping is done by osm ui
             proj_list = []
@@ -172,22 +187,34 @@ class DashboarderService:
                 users_proj_list = user.get("project_role_mappings")
                 for project in users_proj_list:
                     proj_data = self.common_db.get_project(project["project"])
-                    proj_list.append(proj_data['name'])
+                    proj_list.append(proj_data["name"])
             for proj in project_data:
                 if self.keystone:
                     # Backend authentication type is keystone
                     try:
                         # Getting project and role objects from keystone using ids
                         role_obj = self.keystone.getRoleById(proj["role"])
-                        proj_data = self.keystone.getProjectsByProjectId(proj["project"])
-                        log.info('role object {} {}'.format(role_obj.permissions, proj_data.name))
-                        is_admin = role_obj.permissions['admin']
+                        proj_data = self.keystone.getProjectsByProjectId(
+                            proj["project"]
+                        )
+                        log.info(
+                            "role object {} {}".format(
+                                role_obj.permissions, proj_data.name
+                            )
+                        )
+                        is_admin = role_obj.permissions["admin"]
                     except Exception:
                         # Getting project and role objects from keystone using names
                         role_obj = self.keystone.getRoleByName(proj["role"])[0]
-                        proj_data = self.keystone.getProjectsByProjectName(proj["project"])[0]
-                        is_admin = role_obj.to_dict().get("permissions").get('admin')
-                        log.info('role object {} {}'.format(role_obj.to_dict(), proj_data.name))
+                        proj_data = self.keystone.getProjectsByProjectName(
+                            proj["project"]
+                        )[0]
+                        is_admin = role_obj.to_dict().get("permissions").get("admin")
+                        log.info(
+                            "role object {} {}".format(
+                                role_obj.to_dict(), proj_data.name
+                            )
+                        )
                     proj_name = proj_data.name
                 else:
                     # Backend authentication type is internal
@@ -199,9 +226,11 @@ class DashboarderService:
                         # Getting project and role object from commondb using ids
                         role_obj = self.common_db.get_role_by_id(proj["role"])
                         proj_data = self.common_db.get_project(proj["project"])
-                        proj_name = proj_data['name']
+                        proj_name = proj_data["name"]
                     is_admin = role_obj["permissions"].get("admin")
-                self.grafana.create_grafana_teams_members(proj_name, user_name, is_admin, proj_list)
+                self.grafana.create_grafana_teams_members(
+                    proj_name, user_name, is_admin, proj_list
+                )
 
     def create_grafana_team(self, team_name):
         self.grafana.create_grafana_teams(team_name)
@@ -227,6 +256,6 @@ class DashboarderService:
                 user = self.keystone.getUserById(user_id)
                 user_name = user.name
             else:
-                log.info('User %s not found', user_id)
-                log.debug('Exception %s' % e)
+                log.info("User %s not found", user_id)
+                log.debug("Exception %s" % e)
         self.grafana.remove_grafana_team_member(user_name, proj_data)
index 070cf69..94024d7 100644 (file)
@@ -29,11 +29,10 @@ from osm_mon.evaluator.backends.base import BaseBackend
 
 log = logging.getLogger(__name__)
 
-OSM_METRIC_PREFIX = 'osm_'
+OSM_METRIC_PREFIX = "osm_"
 
 
 class PrometheusBackend(BaseBackend):
-
     def __init__(self, config: Config):
         super().__init__(config)
         self.conf = config
@@ -42,31 +41,42 @@ class PrometheusBackend(BaseBackend):
         query = self._build_query(metric_name, tags)
         request_url = self._build_url(query)
         log.info("Querying Prometheus: %s", request_url)
-        r = requests.get(request_url, timeout=int(self.conf.get('global', 'request_timeout')))
+        r = requests.get(
+            request_url, timeout=int(self.conf.get("global", "request_timeout"))
+        )
         if r.status_code == 200:
             json_response = r.json()
-            if json_response['status'] == 'success':
+            if json_response["status"] == "success":
                 return self._get_metric_value_from_response(json_response)
             else:
-                log.warning("Prometheus response is not success. Got status %s", json_response['status'])
+                log.warning(
+                    "Prometheus response is not success. Got status %s",
+                    json_response["status"],
+                )
         else:
-            log.warning("Error contacting Prometheus. Got status code %s: %s", r.status_code, r.text)
+            log.warning(
+                "Error contacting Prometheus. Got status code %s: %s",
+                r.status_code,
+                r.text,
+            )
         return None
 
     def _build_query(self, metric_name: str, tags: dict) -> str:
         query_section_tags = []
         for k, v in tags.items():
-            query_section_tags.append(k + '=\"' + v + '\"')
-        query_section = "query={0}{{{1}}}".format(OSM_METRIC_PREFIX + metric_name, ','.join(query_section_tags))
+            query_section_tags.append(k + '="' + v + '"')
+        query_section = "query={0}{{{1}}}".format(
+            OSM_METRIC_PREFIX + metric_name, ",".join(query_section_tags)
+        )
         return query_section
 
     def _build_url(self, query: str):
-        return self.conf.get('prometheus', 'url') + "/api/v1/query?" + query
+        return self.conf.get("prometheus", "url") + "/api/v1/query?" + query
 
     def _get_metric_value_from_response(self, json_response):
-        result = json_response['data']['result']
+        result = json_response["data"]["result"]
         if len(result):
-            metric_value = float(result[0]['value'][1])
+            metric_value = float(result[0]["value"][1])
             log.info("Metric value: %s", metric_value)
             return metric_value
         else:
index 0b4efde..2d319c9 100644 (file)
@@ -35,7 +35,6 @@ log = logging.getLogger(__name__)
 
 
 class Evaluator:
-
     def __init__(self, config: Config, loop=None):
         self.conf = config
         if not loop:
@@ -45,33 +44,34 @@ class Evaluator:
         self.msg_bus = MessageBusClient(config)
 
     def evaluate_forever(self):
-        log.debug('evaluate_forever')
+        log.debug("evaluate_forever")
         while True:
             try:
                 self.evaluate()
-                time.sleep(int(self.conf.get('evaluator', 'interval')))
+                time.sleep(int(self.conf.get("evaluator", "interval")))
             except Exception:
                 log.exception("Error evaluating alarms")
 
     def evaluate(self):
-        log.debug('evaluate')
-        log.info('Starting alarm evaluation')
+        log.debug("evaluate")
+        log.info("Starting alarm evaluation")
         alarms_tuples = self.service.evaluate_alarms()
         processes = []
         for alarm, status in alarms_tuples:
-            p = multiprocessing.Process(target=self.notify_alarm,
-                                        args=(alarm, status))
+            p = multiprocessing.Process(target=self.notify_alarm, args=(alarm, status))
             p.start()
             processes.append(p)
         for process in processes:
             process.join(timeout=10)
-        log.info('Alarm evaluation is complete')
+        log.info("Alarm evaluation is complete")
 
     def notify_alarm(self, alarm: Alarm, status: AlarmStatus):
         log.debug("_notify_alarm")
         resp_message = self._build_alarm_response(alarm, status)
         log.info("Sent alarm notification: %s", resp_message)
-        self.loop.run_until_complete(self.msg_bus.aiowrite('alarm_response', 'notify_alarm', resp_message))
+        self.loop.run_until_complete(
+            self.msg_bus.aiowrite("alarm_response", "notify_alarm", resp_message)
+        )
 
     def _build_alarm_response(self, alarm: Alarm, status: AlarmStatus):
         log.debug("_build_alarm_response")
@@ -81,7 +81,7 @@ class Evaluator:
             tags[name] = value
         now = time.strftime("%d-%m-%Y") + " " + time.strftime("%X")
         return response.generate_response(
-            'notify_alarm',
+            "notify_alarm",
             alarm_id=alarm.uuid,
             metric_name=alarm.metric,
             operation=alarm.operation,
@@ -89,4 +89,5 @@ class Evaluator:
             sev=alarm.severity,
             status=status.value,
             date=now,
-            tags=tags)
+            tags=tags,
+        )
index b3b0d26..2ba0625 100644 (file)
@@ -32,54 +32,49 @@ from osm_mon.evaluator.backends.prometheus import PrometheusBackend
 
 log = logging.getLogger(__name__)
 
-BACKENDS = {
-    'prometheus': PrometheusBackend
-}
+BACKENDS = {"prometheus": PrometheusBackend}
 
 
 class AlarmStatus(Enum):
-    ALARM = 'alarm'
-    OK = 'ok'
-    INSUFFICIENT = 'insufficient-data'
+    ALARM = "alarm"
+    OK = "ok"
+    INSUFFICIENT = "insufficient-data"
 
 
 class EvaluatorService:
-
     def __init__(self, config: Config):
         self.conf = config
         self.common_db = CommonDbClient(self.conf)
         self.queue = multiprocessing.Queue()
 
-    def _get_metric_value(self,
-                          metric_name: str,
-                          tags: dict):
-        return BACKENDS[self.conf.get('evaluator', 'backend')](self.conf).get_metric_value(metric_name, tags)
+    def _get_metric_value(self, metric_name: str, tags: dict):
+        return BACKENDS[self.conf.get("evaluator", "backend")](
+            self.conf
+        ).get_metric_value(metric_name, tags)
 
-    def _evaluate_metric(self,
-                         alarm: Alarm):
+    def _evaluate_metric(self, alarm: Alarm):
         log.debug("_evaluate_metric")
         metric_value = self._get_metric_value(alarm.metric, alarm.tags)
         if metric_value is None:
             log.warning("No metric result for alarm %s", alarm.uuid)
             self.queue.put((alarm, AlarmStatus.INSUFFICIENT))
         else:
-            if alarm.operation.upper() == 'GT':
+            if alarm.operation.upper() == "GT":
                 if metric_value > alarm.threshold:
                     self.queue.put((alarm, AlarmStatus.ALARM))
                 else:
                     self.queue.put((alarm, AlarmStatus.OK))
-            elif alarm.operation.upper() == 'LT':
+            elif alarm.operation.upper() == "LT":
                 if metric_value < alarm.threshold:
                     self.queue.put((alarm, AlarmStatus.ALARM))
                 else:
                     self.queue.put((alarm, AlarmStatus.OK))
 
     def evaluate_alarms(self) -> List[Tuple[Alarm, AlarmStatus]]:
-        log.debug('evaluate_alarms')
+        log.debug("evaluate_alarms")
         processes = []
         for alarm in self.common_db.get_alarms():
-            p = multiprocessing.Process(target=self._evaluate_metric,
-                                        args=(alarm,))
+            p = multiprocessing.Process(target=self._evaluate_metric, args=(alarm,))
             processes.append(p)
             p.start()
 
index 117c054..ce6255c 100755 (executable)
@@ -36,7 +36,6 @@ log = logging.getLogger(__name__)
 
 
 class Server:
-
     def __init__(self, config: Config, loop=None):
         self.conf = config
         if not loop:
@@ -49,9 +48,7 @@ class Server:
         self.loop.run_until_complete(self.start())
 
     async def start(self):
-        topics = [
-            "alarm_request"
-        ]
+        topics = ["alarm_request"]
         try:
             await self.msg_bus.aioread(topics, self._process_msg)
         except Exception as e:
@@ -65,53 +62,71 @@ class Server:
 
             if topic == "alarm_request":
                 if key == "create_alarm_request":
-                    alarm_details = values['alarm_create_request']
-                    cor_id = alarm_details['correlation_id']
+                    alarm_details = values["alarm_create_request"]
+                    cor_id = alarm_details["correlation_id"]
                     response_builder = ResponseBuilder()
                     try:
                         alarm = self.service.create_alarm(
-                            alarm_details['alarm_name'],
-                            alarm_details['threshold_value'],
-                            alarm_details['operation'].lower(),
-                            alarm_details['severity'].lower(),
-                            alarm_details['statistic'].lower(),
-                            alarm_details['metric_name'],
-                            alarm_details['tags']
+                            alarm_details["alarm_name"],
+                            alarm_details["threshold_value"],
+                            alarm_details["operation"].lower(),
+                            alarm_details["severity"].lower(),
+                            alarm_details["statistic"].lower(),
+                            alarm_details["metric_name"],
+                            alarm_details["tags"],
+                        )
+                        response = response_builder.generate_response(
+                            "create_alarm_response",
+                            cor_id=cor_id,
+                            status=True,
+                            alarm_id=alarm.uuid,
                         )
-                        response = response_builder.generate_response('create_alarm_response',
-                                                                      cor_id=cor_id,
-                                                                      status=True,
-                                                                      alarm_id=alarm.uuid)
                     except Exception:
                         log.exception("Error creating alarm: ")
-                        response = response_builder.generate_response('create_alarm_response',
-                                                                      cor_id=cor_id,
-                                                                      status=False,
-                                                                      alarm_id=None)
-                    await self._publish_response('alarm_response_' + str(cor_id), 'create_alarm_response', response)
+                        response = response_builder.generate_response(
+                            "create_alarm_response",
+                            cor_id=cor_id,
+                            status=False,
+                            alarm_id=None,
+                        )
+                    await self._publish_response(
+                        "alarm_response_" + str(cor_id),
+                        "create_alarm_response",
+                        response,
+                    )
 
                 if key == "delete_alarm_request":
-                    alarm_details = values['alarm_delete_request']
-                    alarm_uuid = alarm_details['alarm_uuid']
+                    alarm_details = values["alarm_delete_request"]
+                    alarm_uuid = alarm_details["alarm_uuid"]
                     response_builder = ResponseBuilder()
-                    cor_id = alarm_details['correlation_id']
+                    cor_id = alarm_details["correlation_id"]
                     try:
                         self.service.delete_alarm(alarm_uuid)
-                        response = response_builder.generate_response('delete_alarm_response',
-                                                                      cor_id=cor_id,
-                                                                      status=True,
-                                                                      alarm_id=alarm_uuid)
+                        response = response_builder.generate_response(
+                            "delete_alarm_response",
+                            cor_id=cor_id,
+                            status=True,
+                            alarm_id=alarm_uuid,
+                        )
                     except Exception:
                         log.exception("Error deleting alarm: ")
-                        response = response_builder.generate_response('delete_alarm_response',
-                                                                      cor_id=cor_id,
-                                                                      status=False,
-                                                                      alarm_id=alarm_uuid)
-                    await self._publish_response('alarm_response_' + str(cor_id), 'delete_alarm_response', response)
+                        response = response_builder.generate_response(
+                            "delete_alarm_response",
+                            cor_id=cor_id,
+                            status=False,
+                            alarm_id=alarm_uuid,
+                        )
+                    await self._publish_response(
+                        "alarm_response_" + str(cor_id),
+                        "delete_alarm_response",
+                        response,
+                    )
 
         except Exception:
             log.exception("Exception processing message: ")
 
     async def _publish_response(self, topic: str, key: str, msg: dict):
-        log.info("Sending response %s to topic %s with key %s", json.dumps(msg), topic, key)
+        log.info(
+            "Sending response %s to topic %s with key %s", json.dumps(msg), topic, key
+        )
         await self.msg_bus.aiowrite(topic, key, msg)
index b68b367..298043e 100755 (executable)
@@ -30,26 +30,28 @@ log = logging.getLogger(__name__)
 
 
 class ServerService:
-
     def __init__(self, config: Config):
         self.common_db = CommonDbClient(config)
 
-    def create_alarm(self,
-                     name: str,
-                     threshold: float,
-                     operation: str,
-                     severity: str,
-                     statistic: str,
-                     metric_name: str,
-                     tags: dict) -> Alarm:
+    def create_alarm(
+        self,
+        name: str,
+        threshold: float,
+        operation: str,
+        severity: str,
+        statistic: str,
+        metric_name: str,
+        tags: dict,
+    ) -> Alarm:
         log.debug("create_alarm")
-        alarm = Alarm(name, severity, threshold, operation, statistic, metric_name, tags)
+        alarm = Alarm(
+            name, severity, threshold, operation, statistic, metric_name, tags
+        )
         self.common_db.create_alarm(alarm)
         log.info("Alarm %s created", alarm.name)
         return alarm
 
-    def delete_alarm(self,
-                     alarm_uuid: str) -> None:
+    def delete_alarm(self, alarm_uuid: str) -> None:
         log.debug("delete_alarm")
         self.common_db.delete_alarm(alarm_uuid)
         log.info("Alarm %s is deleted", alarm_uuid)
index fc2146c..63209ed 100644 (file)
@@ -39,36 +39,47 @@ class CollectorServiceTest(TestCase):
     @mock.patch.object(OpenstackCollector, "collect")
     @mock.patch.object(CommonDbClient, "get_vim_account")
     def test_init_vim_collector_and_collect_openstack(self, _get_vim_account, collect):
-        _get_vim_account.return_value = {'vim_type': 'openstack'}
+        _get_vim_account.return_value = {"vim_type": "openstack"}
         collector = CollectorService(self.config)
-        collector._collect_vim_metrics(self.config, {}, 'test_vim_account_id')
+        collector._collect_vim_metrics(self.config, {}, "test_vim_account_id")
         collect.assert_called_once_with({})
 
     @mock.patch.object(OpenstackCollector, "collect")
     @mock.patch.object(CommonDbClient, "get_vim_account")
-    def test_init_vim_collector_and_collect_unknown(self, _get_vim_account, openstack_collect):
-        _get_vim_account.return_value = {'vim_type': 'unknown'}
+    def test_init_vim_collector_and_collect_unknown(
+        self, _get_vim_account, openstack_collect
+    ):
+        _get_vim_account.return_value = {"vim_type": "unknown"}
         collector = CollectorService(self.config)
-        collector._collect_vim_metrics(self.config, {}, 'test_vim_account_id')
+        collector._collect_vim_metrics(self.config, {}, "test_vim_account_id")
         openstack_collect.assert_not_called()
 
     @mock.patch.object(OpenstackCollector, "__init__", lambda *args, **kwargs: None)
     @mock.patch.object(OpenstackCollector, "collect")
     @mock.patch.object(CommonDbClient, "get_vim_account")
-    def test_init_vim_collector_and_collect_vio_with_openstack_collector(self, _get_vim_account, openstack_collect):
-        _get_vim_account.return_value = {'vim_type': 'openstack', 'config': {'vim_type': 'VIO'}}
+    def test_init_vim_collector_and_collect_vio_with_openstack_collector(
+        self, _get_vim_account, openstack_collect
+    ):
+        _get_vim_account.return_value = {
+            "vim_type": "openstack",
+            "config": {"vim_type": "VIO"},
+        }
         collector = CollectorService(self.config)
-        collector._collect_vim_metrics(self.config, {}, 'test_vim_account_id')
+        collector._collect_vim_metrics(self.config, {}, "test_vim_account_id")
         openstack_collect.assert_called_once_with({})
 
     @mock.patch.object(VIOCollector, "__init__", lambda *args, **kwargs: None)
     @mock.patch.object(VIOCollector, "collect")
     @mock.patch.object(CommonDbClient, "get_vim_account")
-    def test_init_vim_collector_and_collect_vio_with_vrops_collector(self, _get_vim_account, vio_collect):
-        _get_vim_account.return_value = {'vim_type': 'openstack',
-                                         'config': {'vim_type': 'VIO', 'vrops_site': 'https://vrops'}}
+    def test_init_vim_collector_and_collect_vio_with_vrops_collector(
+        self, _get_vim_account, vio_collect
+    ):
+        _get_vim_account.return_value = {
+            "vim_type": "openstack",
+            "config": {"vim_type": "VIO", "vrops_site": "https://vrops"},
+        }
         collector = CollectorService(self.config)
-        collector._collect_vim_metrics(self.config, {}, 'test_vim_account_id')
+        collector._collect_vim_metrics(self.config, {}, "test_vim_account_id")
         vio_collect.assert_called_once_with({})
 
     @mock.patch("osm_mon.collector.service.VCACollector", autospec=True)
index 7cfa4bf..bf37076 100644 (file)
@@ -25,51 +25,48 @@ from unittest import TestCase, mock
 from osm_mon.collector.utils.openstack import OpenstackUtils
 
 
-@mock.patch('osm_mon.collector.utils.openstack.session')
+@mock.patch("osm_mon.collector.utils.openstack.session")
 class OpenstackUtilsTest(TestCase):
-
     def setUp(self):
         super().setUp()
 
     def test_session_without_insecure(self, mock_session):
         creds = {
-            'config': {
-            },
-            'vim_url': 'url',
-            'vim_user': 'user',
-            'vim_password': 'password',
-            'vim_tenant_name': 'tenant_name'
+            "config": {},
+            "vim_url": "url",
+            "vim_user": "user",
+            "vim_password": "password",
+            "vim_tenant_name": "tenant_name",
         }
         OpenstackUtils.get_session(creds)
 
         mock_session.Session.assert_called_once_with(
-            auth=mock.ANY, verify=True, timeout=10)
+            auth=mock.ANY, verify=True, timeout=10
+        )
 
     def test_session_with_insecure(self, mock_session):
         creds = {
-            'config': {
-                'insecure': True
-            },
-            'vim_url': 'url',
-            'vim_user': 'user',
-            'vim_password': 'password',
-            'vim_tenant_name': 'tenant_name'
+            "config": {"insecure": True},
+            "vim_url": "url",
+            "vim_user": "user",
+            "vim_password": "password",
+            "vim_tenant_name": "tenant_name",
         }
         OpenstackUtils.get_session(creds)
 
         mock_session.Session.assert_called_once_with(
-            auth=mock.ANY, verify=False, timeout=10)
+            auth=mock.ANY, verify=False, timeout=10
+        )
 
     def test_session_with_insecure_false(self, mock_session):
         creds = {
-            'config': {
-                'insecure': False
-            },
-            'vim_url': 'url',
-            'vim_user': 'user',
-            'vim_password': 'password',
-            'vim_tenant_name': 'tenant_name'
+            "config": {"insecure": False},
+            "vim_url": "url",
+            "vim_user": "user",
+            "vim_password": "password",
+            "vim_tenant_name": "tenant_name",
         }
         OpenstackUtils.get_session(creds)
         mock_session.Session.assert_called_once_with(
-            auth=mock.ANY, verify=True, timeout=10)
+            auth=mock.ANY, verify=True, timeout=10
+        )
index 9af1492..b42e0d4 100644 (file)
@@ -40,86 +40,157 @@ class CollectorTest(TestCase):
     def tearDown(self):
         super().tearDown()
 
-    @mock.patch.object(GnocchiBackend, '_build_neutron_client')
-    @mock.patch.object(GnocchiBackend, '_build_gnocchi_client')
+    @mock.patch.object(GnocchiBackend, "_build_neutron_client")
+    @mock.patch.object(GnocchiBackend, "_build_gnocchi_client")
     def test_collect_gnocchi_rate_instance(self, build_gnocchi_client, _):
         mock_gnocchi_client = mock.Mock()
         mock_gnocchi_client.metric = mock.Mock()
-        mock_gnocchi_client.metric.get_measures.return_value = [(datetime.datetime(2019, 4, 12, 15, 43,
-                                                                                   tzinfo=datetime.timezone(
-                                                                                       datetime.timedelta(0),
-                                                                                       '+00:00')), 60.0, 0.0345442539),
-                                                                (datetime.datetime(2019, 4, 12, 15, 44,
-                                                                                   tzinfo=datetime.timezone(
-                                                                                       datetime.timedelta(0),
-                                                                                       '+00:00')), 60.0, 600000000)]
+        mock_gnocchi_client.metric.get_measures.return_value = [
+            (
+                datetime.datetime(
+                    2019,
+                    4,
+                    12,
+                    15,
+                    43,
+                    tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+                ),
+                60.0,
+                0.0345442539,
+            ),
+            (
+                datetime.datetime(
+                    2019,
+                    4,
+                    12,
+                    15,
+                    44,
+                    tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+                ),
+                60.0,
+                600000000,
+            ),
+        ]
         build_gnocchi_client.return_value = mock_gnocchi_client
 
-        backend = GnocchiBackend({'_id': 'test_uuid'})
-        value = backend._collect_instance_metric('cpu', 'test_resource_id')
+        backend = GnocchiBackend({"_id": "test_uuid"})
+        value = backend._collect_instance_metric("cpu", "test_resource_id")
         self.assertEqual(value, 1.0)
-        mock_gnocchi_client.metric.get_measures.assert_called_once_with('cpu',
-                                                                        aggregation="rate:mean",
-                                                                        start=mock.ANY,
-                                                                        resource_id='test_resource_id')
-
-    @mock.patch.object(GnocchiBackend, '_build_neutron_client')
-    @mock.patch.object(GnocchiBackend, '_build_gnocchi_client')
+        mock_gnocchi_client.metric.get_measures.assert_called_once_with(
+            "cpu",
+            aggregation="rate:mean",
+            start=mock.ANY,
+            resource_id="test_resource_id",
+        )
+
+    @mock.patch.object(GnocchiBackend, "_build_neutron_client")
+    @mock.patch.object(GnocchiBackend, "_build_gnocchi_client")
     def test_collect_gnocchi_non_rate_instance(self, build_gnocchi_client, _):
         mock_gnocchi_client = mock.Mock()
-        mock_gnocchi_client.metric.get_measures.return_value = [(datetime.datetime(2019, 4, 12, 15, 43,
-                                                                                   tzinfo=datetime.timezone(
-                                                                                       datetime.timedelta(0),
-                                                                                       '+00:00')), 60.0, 0.0345442539),
-                                                                (datetime.datetime(2019, 4, 12, 15, 44,
-                                                                                   tzinfo=datetime.timezone(
-                                                                                       datetime.timedelta(0),
-                                                                                       '+00:00')), 60.0, 128)]
+        mock_gnocchi_client.metric.get_measures.return_value = [
+            (
+                datetime.datetime(
+                    2019,
+                    4,
+                    12,
+                    15,
+                    43,
+                    tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+                ),
+                60.0,
+                0.0345442539,
+            ),
+            (
+                datetime.datetime(
+                    2019,
+                    4,
+                    12,
+                    15,
+                    44,
+                    tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+                ),
+                60.0,
+                128,
+            ),
+        ]
         build_gnocchi_client.return_value = mock_gnocchi_client
 
-        backend = GnocchiBackend({'_id': 'test_uuid'})
-        value = backend._collect_instance_metric('memory.usage', 'test_resource_id')
+        backend = GnocchiBackend({"_id": "test_uuid"})
+        value = backend._collect_instance_metric("memory.usage", "test_resource_id")
         self.assertEqual(value, 128)
-        mock_gnocchi_client.metric.get_measures.assert_called_once_with('memory.usage',
-                                                                        aggregation=None,
-                                                                        start=mock.ANY,
-                                                                        resource_id='test_resource_id')
-
-    @mock.patch.object(GnocchiBackend, '_build_neutron_client')
-    @mock.patch.object(GnocchiBackend, '_build_gnocchi_client')
+        mock_gnocchi_client.metric.get_measures.assert_called_once_with(
+            "memory.usage",
+            aggregation=None,
+            start=mock.ANY,
+            resource_id="test_resource_id",
+        )
+
+    @mock.patch.object(GnocchiBackend, "_build_neutron_client")
+    @mock.patch.object(GnocchiBackend, "_build_gnocchi_client")
     def test_collect_gnocchi_no_metric(self, build_gnocchi_client, _):
         mock_gnocchi_client = mock.Mock()
-        mock_gnocchi_client.metric.get_measures.side_effect = gnocchiclient.exceptions.NotFound()
+        mock_gnocchi_client.metric.get_measures.side_effect = (
+            gnocchiclient.exceptions.NotFound()
+        )
         build_gnocchi_client.return_value = mock_gnocchi_client
 
-        backend = GnocchiBackend({'_id': 'test_uuid'})
-        value = backend._collect_instance_metric('memory.usage', 'test_resource_id')
+        backend = GnocchiBackend({"_id": "test_uuid"})
+        value = backend._collect_instance_metric("memory.usage", "test_resource_id")
         self.assertIsNone(value)
-        mock_gnocchi_client.metric.get_measures.assert_called_once_with('memory.usage',
-                                                                        aggregation=None,
-                                                                        start=mock.ANY,
-                                                                        resource_id='test_resource_id')
-
-    @mock.patch.object(GnocchiBackend, '_build_neutron_client')
-    @mock.patch.object(GnocchiBackend, '_build_gnocchi_client')
-    def test_collect_interface_all_metric(self, build_gnocchi_client, build_neutron_client):
+        mock_gnocchi_client.metric.get_measures.assert_called_once_with(
+            "memory.usage",
+            aggregation=None,
+            start=mock.ANY,
+            resource_id="test_resource_id",
+        )
+
+    @mock.patch.object(GnocchiBackend, "_build_neutron_client")
+    @mock.patch.object(GnocchiBackend, "_build_gnocchi_client")
+    def test_collect_interface_all_metric(
+        self, build_gnocchi_client, build_neutron_client
+    ):
         mock_gnocchi_client = mock.Mock()
-        mock_gnocchi_client.resource.search.return_value = [{'id': 'test_id_1'}, {'id': 'test_id_2'}]
-        mock_gnocchi_client.metric.get_measures.return_value = [(datetime.datetime(2019, 4, 12, 15, 43,
-                                                                                   tzinfo=datetime.timezone(
-                                                                                       datetime.timedelta(0),
-                                                                                       '+00:00')), 60.0, 0.0345442539),
-                                                                (datetime.datetime(2019, 4, 12, 15, 44,
-                                                                                   tzinfo=datetime.timezone(
-                                                                                       datetime.timedelta(0),
-                                                                                       '+00:00')), 60.0, 0.0333070363)]
+        mock_gnocchi_client.resource.search.return_value = [
+            {"id": "test_id_1"},
+            {"id": "test_id_2"},
+        ]
+        mock_gnocchi_client.metric.get_measures.return_value = [
+            (
+                datetime.datetime(
+                    2019,
+                    4,
+                    12,
+                    15,
+                    43,
+                    tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+                ),
+                60.0,
+                0.0345442539,
+            ),
+            (
+                datetime.datetime(
+                    2019,
+                    4,
+                    12,
+                    15,
+                    44,
+                    tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+                ),
+                60.0,
+                0.0333070363,
+            ),
+        ]
 
         build_gnocchi_client.return_value = mock_gnocchi_client
 
-        backend = GnocchiBackend({'_id': 'test_uuid'})
-        value = backend._collect_interface_all_metric('packets_received', 'test_resource_id')
+        backend = GnocchiBackend({"_id": "test_uuid"})
+        value = backend._collect_interface_all_metric(
+            "packets_received", "test_resource_id"
+        )
         self.assertEqual(value, 0.0666140726)
-        mock_gnocchi_client.metric.get_measures.assert_any_call('packets_received', resource_id='test_id_1',
-                                                                limit=1)
-        mock_gnocchi_client.metric.get_measures.assert_any_call('packets_received', resource_id='test_id_2',
-                                                                limit=1)
+        mock_gnocchi_client.metric.get_measures.assert_any_call(
+            "packets_received", resource_id="test_id_1", limit=1
+        )
+        mock_gnocchi_client.metric.get_measures.assert_any_call(
+            "packets_received", resource_id="test_id_2", limit=1
+        )
index 36ee361..21145be 100644 (file)
@@ -24,11 +24,20 @@ import os
 import re
 
 
-def mock_http_response(mocker, method='GET', site='https://vrops', url_pattern='', response_file='OK',
-                       status_code=200, exception=None):
-    '''Helper function to load a canned response from a file.'''
-    with open(os.path.join(os.path.dirname(__file__), 'vmware_mocks',
-                           '%s' % response_file), 'r') as f:
+def mock_http_response(
+    mocker,
+    method="GET",
+    site="https://vrops",
+    url_pattern="",
+    response_file="OK",
+    status_code=200,
+    exception=None,
+):
+    """Helper function to load a canned response from a file."""
+    with open(
+        os.path.join(os.path.dirname(__file__), "vmware_mocks", "%s" % response_file),
+        "r",
+    ) as f:
         response = f.read()
 
     matcher = re.compile(site + url_pattern)
index ccab0d2..3cac999 100644 (file)
 
 from osm_mon.collector.vnf_collectors.vmware import VMwareCollector
 from osm_mon.core.config import Config
-from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import mock_http_response
+from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import (
+    mock_http_response,
+)
 from unittest import TestCase, mock
 
 import json
 import os
 import requests_mock
 
-VIM_ACCOUNT = {"vrops_site": "https://vrops",
-               "vrops_user": "",
-               "vrops_password": "",
-               "vim_url": "https://vcd",
-               "admin_username": "",
-               "admin_password": "",
-               "vim_uuid": ""}
+VIM_ACCOUNT = {
+    "vrops_site": "https://vrops",
+    "vrops_user": "",
+    "vrops_password": "",
+    "vim_url": "https://vcd",
+    "admin_username": "",
+    "admin_password": "",
+    "vim_uuid": "",
+}
 
 
-@mock.patch.object(VMwareCollector, 'get_vm_moref_id',
-                   spec_set=True, autospec=True)
+@mock.patch.object(VMwareCollector, "get_vm_moref_id", spec_set=True, autospec=True)
 class CollectorTest(TestCase):
-
-    @mock.patch.object(VMwareCollector, 'get_vim_account',
-                       spec_set=True, autospec=True)
-    @mock.patch('osm_mon.collector.vnf_collectors.vmware.CommonDbClient')
+    @mock.patch.object(VMwareCollector, "get_vim_account", spec_set=True, autospec=True)
+    @mock.patch("osm_mon.collector.vnf_collectors.vmware.CommonDbClient")
     def setUp(self, mock_db, mock_get_vim_account):
         super().setUp()
         mock_get_vim_account.return_value = VIM_ACCOUNT
-        self.collector = VMwareCollector(Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4")
+        self.collector = VMwareCollector(
+            Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4"
+        )
         self.mock_db = mock_db
-        with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFR.json'), 'r') as f:
+        with open(
+            os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFR.json"), "r"
+        ) as f:
             self.vnfr = json.load(f)
-        with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFD.json'), 'r') as f:
+        with open(
+            os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFD.json"), "r"
+        ) as f:
             self.vnfd = json.load(f)
 
     def tearDown(self):
@@ -61,180 +68,244 @@ class CollectorTest(TestCase):
     def test_collect_cpu_and_memory(self, mock_vm_moref_id):
 
         mock_vm_moref_id.return_value = "VMWARE-OID-VM-1"
-        self.vnfd['vdu'][0]['monitoring-parameter'] = [
+        self.vnfd["vdu"][0]["monitoring-parameter"] = [
             {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
-            {"id": "ubuntu_vnf_average_memory_utilization", "performance-metric": "average_memory_utilization"}
-            ]
+            {
+                "id": "ubuntu_vnf_average_memory_utilization",
+                "performance-metric": "average_memory_utilization",
+            },
+        ]
         self.mock_db.return_value.get_vnfd.return_value = self.vnfd
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='vrops_resources.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="vrops_resources.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
             metrics = self.collector.collect(self.vnfr)
         self.assertEqual(len(metrics), 2, "Number of metrics returned")
         self.assertEqual(metrics[0].name, "cpu_utilization", "First metric name")
         self.assertEqual(metrics[0].value, 100.0, "CPU metric value")
-        self.assertEqual(metrics[1].name, "average_memory_utilization", "Second metric name")
+        self.assertEqual(
+            metrics[1].name, "average_memory_utilization", "Second metric name"
+        )
         self.assertEqual(metrics[1].value, 20.515941619873047, "Memory metric value")
 
     def test_collect_no_moref(self, mock_vm_moref_id):
         mock_vm_moref_id.return_value = None
         self.mock_db.return_value.get_vnfd.return_value = self.vnfd
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='404.txt', status_code=404)
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="404.txt",
+                status_code=404,
+            )
             metrics = self.collector.collect(self.vnfr)
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_no_monitoring_param(self, _):
-        self.vnfd['vdu'][0]['monitoring-parameter'] = []
+        self.vnfd["vdu"][0]["monitoring-parameter"] = []
         self.mock_db.return_value.get_vnfd.return_value = self.vnfd
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='vrops_resources.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="vrops_resources.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
             metrics = self.collector.collect(self.vnfr)
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_empty_monitoring_param(self, _):
-        del self.vnfd['vdu'][0]['monitoring-parameter']
+        del self.vnfd["vdu"][0]["monitoring-parameter"]
         self.mock_db.return_value.get_vnfd.return_value = self.vnfd
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='vrops_resources.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="vrops_resources.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
             metrics = self.collector.collect(self.vnfr)
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_no_name(self, _):
-        del self.vnfr['vdur'][0]['name']
-        del self.vnfr['vdur'][1]['name']
+        del self.vnfr["vdur"][0]["name"]
+        del self.vnfr["vdur"][1]["name"]
         self.mock_db.return_value.get_vnfd.return_value = self.vnfd
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='vrops_resources.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="vrops_resources.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
             metrics = self.collector.collect(self.vnfr)
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
 
 class VApp_Details_Test(TestCase):
-
-    @mock.patch.object(VMwareCollector, 'get_vim_account',
-                       spec_set=True, autospec=True)
-    @mock.patch('osm_mon.collector.vnf_collectors.vmware.CommonDbClient')
+    @mock.patch.object(VMwareCollector, "get_vim_account", spec_set=True, autospec=True)
+    @mock.patch("osm_mon.collector.vnf_collectors.vmware.CommonDbClient")
     def setUp(self, mock_db, mock_get_vim_account):
         super().setUp()
         self.mock_db = mock_db
         mock_get_vim_account.return_value = VIM_ACCOUNT
-        self.collector = VMwareCollector(Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4")
+        self.collector = VMwareCollector(
+            Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4"
+        )
 
     def tearDown(self):
         super().tearDown()
 
-    @mock.patch('osm_mon.collector.vnf_collectors.vmware.Client')
+    @mock.patch("osm_mon.collector.vnf_collectors.vmware.Client")
     def test_get_vapp_details(self, mock_vcd_client):
-        mock_vcd_client.return_value._session.headers = {'x-vcloud-authorization': ''}
+        mock_vcd_client.return_value._session.headers = {"x-vcloud-authorization": ""}
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests,
-                               site='https://vcd',
-                               url_pattern='/api/vApp/.*',
-                               response_file='vcd_vapp_response.xml')
-            response = self.collector.get_vapp_details_rest('')
-        self.assertDictContainsSubset({'vm_vcenter_info': {'vm_moref_id': 'vm-4055'}},
-                                      response, 'Managed object reference id incorrect')
+            mock_http_response(
+                mock_requests,
+                site="https://vcd",
+                url_pattern="/api/vApp/.*",
+                response_file="vcd_vapp_response.xml",
+            )
+            response = self.collector.get_vapp_details_rest("")
+        self.assertDictContainsSubset(
+            {"vm_vcenter_info": {"vm_moref_id": "vm-4055"}},
+            response,
+            "Managed object reference id incorrect",
+        )
 
     def test_no_admin_connect(self):
-        response = self.collector.get_vapp_details_rest('')
-        self.assertDictEqual(response, {}, 'Failed to connect should return empty dictionary')
+        response = self.collector.get_vapp_details_rest("")
+        self.assertDictEqual(
+            response, {}, "Failed to connect should return empty dictionary"
+        )
 
     def test_no_id(self):
         response = self.collector.get_vapp_details_rest()
-        self.assertDictEqual(response, {}, 'No id supplied should return empty dictionary')
+        self.assertDictEqual(
+            response, {}, "No id supplied should return empty dictionary"
+        )
 
-    @mock.patch('osm_mon.collector.vnf_collectors.vmware.Client')
+    @mock.patch("osm_mon.collector.vnf_collectors.vmware.Client")
     def test_get_vapp_details_404(self, mock_vcd_client):
-        mock_vcd_client.return_value._session.headers = {'x-vcloud-authorization': ''}
+        mock_vcd_client.return_value._session.headers = {"x-vcloud-authorization": ""}
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests,
-                               site='https://vcd',
-                               url_pattern='/api/vApp/.*',
-                               response_file='404.txt', status_code=404)
-            response = self.collector.get_vapp_details_rest('')
-        self.assertDictEqual(response, {}, 'HTTP error should return empty dictionary')
-
-    @mock.patch('osm_mon.collector.vnf_collectors.vmware.Client')
+            mock_http_response(
+                mock_requests,
+                site="https://vcd",
+                url_pattern="/api/vApp/.*",
+                response_file="404.txt",
+                status_code=404,
+            )
+            response = self.collector.get_vapp_details_rest("")
+        self.assertDictEqual(response, {}, "HTTP error should return empty dictionary")
+
+    @mock.patch("osm_mon.collector.vnf_collectors.vmware.Client")
     def test_get_vapp_details_xml_parse_error(self, mock_vcd_client):
-        mock_vcd_client.return_value._session.headers = {'x-vcloud-authorization': ''}
+        mock_vcd_client.return_value._session.headers = {"x-vcloud-authorization": ""}
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests,
-                               site='https://vcd',
-                               url_pattern='/api/vApp/.*',
-                               response_file='404.txt')
-            response = self.collector.get_vapp_details_rest('')
-        self.assertDictEqual(response, {}, 'XML parse error should return empty dictionary')
+            mock_http_response(
+                mock_requests,
+                site="https://vcd",
+                url_pattern="/api/vApp/.*",
+                response_file="404.txt",
+            )
+            response = self.collector.get_vapp_details_rest("")
+        self.assertDictEqual(
+            response, {}, "XML parse error should return empty dictionary"
+        )
 
 
 class Get_VM_Moref_Test(TestCase):
-
-    @mock.patch.object(VMwareCollector, 'get_vim_account',
-                       spec_set=True, autospec=True)
-    @mock.patch('osm_mon.collector.vnf_collectors.vmware.CommonDbClient')
+    @mock.patch.object(VMwareCollector, "get_vim_account", spec_set=True, autospec=True)
+    @mock.patch("osm_mon.collector.vnf_collectors.vmware.CommonDbClient")
     def setUp(self, mock_db, mock_get_vim_account):
         super().setUp()
         self.mock_db = mock_db
         mock_get_vim_account.return_value = VIM_ACCOUNT
-        self.collector = VMwareCollector(Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4")
+        self.collector = VMwareCollector(
+            Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4"
+        )
 
     def tearDown(self):
         super().tearDown()
 
-    @mock.patch.object(VMwareCollector, 'get_vapp_details_rest',
-                       spec_set=True, autospec=True)
+    @mock.patch.object(
+        VMwareCollector, "get_vapp_details_rest", spec_set=True, autospec=True
+    )
     def test_get_vm_moref_id(self, mock_vapp_details):
-        mock_vapp_details.return_value = {'vm_vcenter_info': {'vm_moref_id': 'vm-4055'}}
-        response = self.collector.get_vm_moref_id('1234')
-        self.assertEqual(response, 'vm-4055', 'Did not fetch correct ref id from dictionary')
+        mock_vapp_details.return_value = {"vm_vcenter_info": {"vm_moref_id": "vm-4055"}}
+        response = self.collector.get_vm_moref_id("1234")
+        self.assertEqual(
+            response, "vm-4055", "Did not fetch correct ref id from dictionary"
+        )
 
-    @mock.patch.object(VMwareCollector, 'get_vapp_details_rest',
-                       spec_set=True, autospec=True)
+    @mock.patch.object(
+        VMwareCollector, "get_vapp_details_rest", spec_set=True, autospec=True
+    )
     def test_get_vm_moref_bad_content(self, mock_vapp_details):
         mock_vapp_details.return_value = {}
-        response = self.collector.get_vm_moref_id('1234')
-        self.assertEqual(response, None, 'Error fetching vapp details should return None')
+        response = self.collector.get_vm_moref_id("1234")
+        self.assertEqual(
+            response, None, "Error fetching vapp details should return None"
+        )
 
-    @mock.patch.object(VMwareCollector, 'get_vapp_details_rest',
-                       spec_set=True, autospec=True)
+    @mock.patch.object(
+        VMwareCollector, "get_vapp_details_rest", spec_set=True, autospec=True
+    )
     def test_get_vm_moref_has_exception(self, mock_vapp_details):
-        mock_vapp_details.side_effect = Exception('Testing')
-        response = self.collector.get_vm_moref_id('1234')
-        self.assertEqual(response, None, 'Exception while fetching vapp details should return None')
+        mock_vapp_details.side_effect = Exception("Testing")
+        response = self.collector.get_vm_moref_id("1234")
+        self.assertEqual(
+            response, None, "Exception while fetching vapp details should return None"
+        )
index 2c72ea0..f838c5a 100644 (file)
 
 from osm_mon.collector.vnf_collectors.vio import VIOCollector
 from osm_mon.core.config import Config
-from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import mock_http_response
+from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import (
+    mock_http_response,
+)
 from unittest import TestCase, mock
 
 import json
 import os
 import requests_mock
 
-VIM_ACCOUNT = {"vrops_site": "https://vrops",
-               "vrops_user": "",
-               "vrops_password": "",
-               "vim_url": "",
-               "admin_username": "",
-               "admin_password": "",
-               "vim_uuid": ""}
+VIM_ACCOUNT = {
+    "vrops_site": "https://vrops",
+    "vrops_user": "",
+    "vrops_password": "",
+    "vim_url": "",
+    "admin_username": "",
+    "admin_password": "",
+    "vim_uuid": "",
+}
 
 
 class CollectorTest(TestCase):
-
-    @mock.patch.object(VIOCollector, 'get_vim_account',
-                       spec_set=True, autospec=True)
-    @mock.patch('osm_mon.collector.vnf_collectors.vio.CommonDbClient')
+    @mock.patch.object(VIOCollector, "get_vim_account", spec_set=True, autospec=True)
+    @mock.patch("osm_mon.collector.vnf_collectors.vio.CommonDbClient")
     def setUp(self, mock_db, mock_get_vim_account):
         super().setUp()
         self.mock_db = mock_db
         mock_get_vim_account.return_value = VIM_ACCOUNT
         self.collector = VIOCollector(Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4")
-        with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFR.json'), 'r') as f:
+        with open(
+            os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFR.json"), "r"
+        ) as f:
             self.vnfr = json.load(f)
-        with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFD.json'), 'r') as f:
+        with open(
+            os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFD.json"), "r"
+        ) as f:
             self.vnfd = json.load(f)
 
     def tearDown(self):
         super().tearDown()
 
     def test_collect_cpu_and_memory(self):
-        self.vnfd['vdu'][0]['monitoring-parameter'] = [
+        self.vnfd["vdu"][0]["monitoring-parameter"] = [
             {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
-            {"id": "ubuntu_vnf_average_memory_utilization", "performance-metric": "average_memory_utilization"}
-            ]
+            {
+                "id": "ubuntu_vnf_average_memory_utilization",
+                "performance-metric": "average_memory_utilization",
+            },
+        ]
         self.mock_db.return_value.get_vnfd.return_value = self.vnfd
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='vrops_resources.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="vrops_resources.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
             metrics = self.collector.collect(self.vnfr)
             self.assertEqual(len(metrics), 4, "Number of metrics returned")
             self.assertEqual(metrics[0].name, "cpu_utilization", "First metric name")
             self.assertEqual(metrics[0].value, 100.0, "CPU metric value")
-            self.assertEqual(metrics[1].name, "average_memory_utilization", "Second metric name")
-            self.assertEqual(metrics[1].value, 20.515941619873047, "Memory metric value")
+            self.assertEqual(
+                metrics[1].name, "average_memory_utilization", "Second metric name"
+            )
+            self.assertEqual(
+                metrics[1].value, 20.515941619873047, "Memory metric value"
+            )
             self.assertEqual(metrics[2].name, "cpu_utilization", "First metric name")
             self.assertEqual(metrics[2].value, 0.05400000140070915, "CPU metric value")
-            self.assertEqual(metrics[3].name, "average_memory_utilization", "Second metric name")
+            self.assertEqual(
+                metrics[3].name, "average_memory_utilization", "Second metric name"
+            )
             self.assertEqual(metrics[3].value, 15.23439884185791, "Memory metric value")
 
     def test_collect_no_monitoring_param(self):
-        self.vnfd['vdu'][0]['monitoring-parameter'] = []
+        self.vnfd["vdu"][0]["monitoring-parameter"] = []
         self.mock_db.return_value.get_vnfd.return_value = self.vnfd
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='vrops_resources.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="vrops_resources.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
             metrics = self.collector.collect(self.vnfr)
             self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_empty_monitoring_param(self):
-        del self.vnfd['vdu'][0]['monitoring-parameter']
+        del self.vnfd["vdu"][0]["monitoring-parameter"]
         self.mock_db.return_value.get_vnfd.return_value = self.vnfd
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='vrops_resources.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="vrops_resources.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
             metrics = self.collector.collect(self.vnfr)
             self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_no_name(self):
-        del self.vnfr['vdur'][0]['name']
-        del self.vnfr['vdur'][1]['name']
+        del self.vnfr["vdur"][0]["name"]
+        del self.vnfr["vdur"][1]["name"]
         self.mock_db.return_value.get_vnfd.return_value = self.vnfd
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='vrops_resources.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="vrops_resources.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
             metrics = self.collector.collect(self.vnfr)
             self.assertEqual(len(metrics), 0, "Number of metrics returned")
index 3589d7d..db8b28a 100644 (file)
@@ -21,7 +21,9 @@
 # #
 
 from osm_mon.collector.vnf_collectors.vrops.vrops_helper import vROPS_Helper
-from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import mock_http_response
+from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import (
+    mock_http_response,
+)
 from unittest import TestCase
 
 import json
@@ -30,7 +32,6 @@ import requests_mock
 
 
 class vROPS_Helper_Resource_List_Test(TestCase):
-
     def setUp(self):
         super().setUp()
         self.vrops = vROPS_Helper()
@@ -40,64 +41,93 @@ class vROPS_Helper_Resource_List_Test(TestCase):
 
     def test_get_vm_resource_list_from_vrops(self):
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='vrops_resources.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="vrops_resources.json",
+            )
             resource_list = self.vrops.get_vm_resource_list_from_vrops()
-            self.assertEqual(len(resource_list), 3, "List of resources from canned vrops_resources.json")
+            self.assertEqual(
+                len(resource_list),
+                3,
+                "List of resources from canned vrops_resources.json",
+            )
 
     def test_get_vm_resource_list_from_vrops_http_404(self):
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='404.txt', status_code=404)
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="404.txt",
+                status_code=404,
+            )
             resource_list = self.vrops.get_vm_resource_list_from_vrops()
             self.assertEqual(len(resource_list), 0, "Should return an empty list")
 
     def test_get_vm_resource_list_from_vrops_bad_json(self):
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
-                               response_file='malformed.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+                response_file="malformed.json",
+            )
             resource_list = self.vrops.get_vm_resource_list_from_vrops()
             self.assertEqual(len(resource_list), 0, "Should return an empty list")
 
 
 class vROPS_Helper_Get_Metrics_Test(TestCase):
-
     def setUp(self):
         super().setUp()
         self.vrops = vROPS_Helper()
-        with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFR.json'), 'r') as f:
+        with open(
+            os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFR.json"), "r"
+        ) as f:
             self.vnfr = json.load(f)
 
     def tearDown(self):
         super().tearDown()
 
     def test_collect_one_metric_only(self):
-        vdu_mappings = {'VMWARE-OID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
-                         'vrops_id': 'VROPS-UUID-1'}}
+        vdu_mappings = {
+            "VMWARE-OID-VM-1": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-1",
+            }
+        }
         monitoring_params = [
             {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
-            ]
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
 
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
         self.assertEqual(len(metrics), 1, "Number of metrics returned")
@@ -105,45 +135,66 @@ class vROPS_Helper_Get_Metrics_Test(TestCase):
         self.assertEqual(metrics[0].value, 100.0, "CPU metric value")
 
     def test_collect_cpu_and_memory(self):
-        vdu_mappings = {'VMWARE-OID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
-                         'vrops_id': 'VROPS-UUID-1'}}
+        vdu_mappings = {
+            "VMWARE-OID-VM-1": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-1",
+            }
+        }
         monitoring_params = [
             {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
-            {"id": "ubuntu_vnf_average_memory_utilization", "performance-metric": "average_memory_utilization"}
-            ]
+            {
+                "id": "ubuntu_vnf_average_memory_utilization",
+                "performance-metric": "average_memory_utilization",
+            },
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
 
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
 
         self.assertEqual(len(metrics), 2, "Number of metrics returned")
         self.assertEqual(metrics[0].name, "cpu_utilization", "First metric name")
         self.assertEqual(metrics[0].value, 100.0, "CPU metric value")
-        self.assertEqual(metrics[1].name, "average_memory_utilization", "Second metric name")
+        self.assertEqual(
+            metrics[1].name, "average_memory_utilization", "Second metric name"
+        )
         self.assertEqual(metrics[1].value, 20.515941619873047, "Memory metric value")
 
     def test_collect_adjusted_metric(self):
-        vdu_mappings = {'VMWARE-OID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
-                         'vrops_id': 'VROPS-UUID-1'}}
+        vdu_mappings = {
+            "VMWARE-OID-VM-1": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-1",
+            }
+        }
         monitoring_params = [
-            {'id': 'ubuntu_vnf_cpu_util', 'performance-metric': 'disk_read_bytes'}
-            ]
+            {"id": "ubuntu_vnf_cpu_util", "performance-metric": "disk_read_bytes"}
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
 
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
 
@@ -152,151 +203,232 @@ class vROPS_Helper_Get_Metrics_Test(TestCase):
         self.assertEqual(metrics[0].value, 10240.0, "Disk read bytes (not KB/s)")
 
     def test_collect_not_provided_metric(self):
-        vdu_mappings = {'VMWARE-OID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
-                         'vrops_id': 'VROPS-UUID-1'}}
+        vdu_mappings = {
+            "VMWARE-OID-VM-1": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-1",
+            }
+        }
         monitoring_params = [
-            {'id': 'cirros_vnf_packets_sent', 'performance-metric': 'packets_in_dropped'},
-            ]
+            {
+                "id": "cirros_vnf_packets_sent",
+                "performance-metric": "packets_in_dropped",
+            },
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
 
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
 
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_unkown_metric(self):
-        vdu_mappings = {'VMWARE-OID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
-                         'vrops_id': 'VROPS-UUID-1'}}
+        vdu_mappings = {
+            "VMWARE-OID-VM-1": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-1",
+            }
+        }
         monitoring_params = [
-            {'id': 'cirros_vnf-Unknown_Metric', 'performance-metric': 'unknown'},
-            ]
+            {"id": "cirros_vnf-Unknown_Metric", "performance-metric": "unknown"},
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
 
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
 
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_vrops_no_data(self):
-        vdu_mappings = {'VMWARE-OID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
-                         'vrops_id': 'VROPS-UUID-1'}}
+        vdu_mappings = {
+            "VMWARE-OID-VM-1": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-1",
+            }
+        }
         monitoring_params = [
             {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
-            ]
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='OK.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="OK.json",
+            )
 
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
 
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_vrops_unknown_vim_id(self):
-        vdu_mappings = {'VMWARE-OID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2'}}
+        vdu_mappings = {
+            "VMWARE-OID-VM-1": {"name": "vmware-scaling-1-ubuntu_vnfd-VM-2"}
+        }
         monitoring_params = [
             {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
-            ]
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
 
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
 
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_vrops_http_error(self):
-        vdu_mappings = {'VMWARE-OID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
-                         'vrops_id': 'VROPS-UUID-1'}}
+        vdu_mappings = {
+            "VMWARE-OID-VM-1": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-1",
+            }
+        }
         monitoring_params = [
             {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
-            ]
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='404.txt', status_code=404)
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="404.txt",
+                status_code=404,
+            )
 
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
 
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_vrops_json_parse_error(self):
-        vdu_mappings = {'VMWARE-OID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
-                         'vrops_id': 'VROPS-UUID-1'}}
+        vdu_mappings = {
+            "VMWARE-OID-VM-1": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-1",
+            }
+        }
         monitoring_params = [
             {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
-            ]
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='404.txt')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="404.txt",
+            )
 
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
 
         self.assertEqual(len(metrics), 0, "Number of metrics returned")
 
     def test_collect_multi_vdu(self):
-        vdu_mappings = {'VMWARE-UUID-VM-1':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-1', 'vrops_id': 'VROPS-UUID-1'},
-                        'VMWARE-UUID-VM-2':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2', 'vrops_id': 'VROPS-UUID-2'},
-                        'VMWARE-UUID-VM-3':
-                        {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2', 'vrops_id': 'VROPS-UUID-3'}
-                        }
+        vdu_mappings = {
+            "VMWARE-UUID-VM-1": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-1",
+                "vrops_id": "VROPS-UUID-1",
+            },
+            "VMWARE-UUID-VM-2": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-2",
+            },
+            "VMWARE-UUID-VM-3": {
+                "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+                "vrops_id": "VROPS-UUID-3",
+            },
+        }
         monitoring_params = [
-            {'id': 'ubuntu_vnf_cpu_util', 'performance-metric': 'cpu_utilization'},
-            {'id': 'ubuntu_vnf_average_memory_utilization', 'performance-metric': 'average_memory_utilization'},
-            {'id': 'ubuntu_vnf_disk_read_ops', 'performance-metric': 'disk_read_ops'},
-            {'id': 'ubuntu_vnf_disk_write_ops', 'performance-metric': 'disk_write_ops'},
-            {'id': 'ubuntu_vnf_disk_read_bytes', 'performance-metric': 'disk_read_bytes'},
-            {'id': 'ubuntu_vnf_disk_write_bytes', 'performance-metric': 'disk_write_bytes'},
-            {'id': 'ubuntu_vnf_packets_out_dropped', 'performance-metric': 'packets_out_dropped'},
-            {'id': 'ubuntu_vnf_packets_received', 'performance-metric': 'packets_received'},
-            {'id': 'ubuntu_vnf_packets_sent', 'performance-metric': 'packets_sent'}
-            ]
+            {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
+            {
+                "id": "ubuntu_vnf_average_memory_utilization",
+                "performance-metric": "average_memory_utilization",
+            },
+            {"id": "ubuntu_vnf_disk_read_ops", "performance-metric": "disk_read_ops"},
+            {"id": "ubuntu_vnf_disk_write_ops", "performance-metric": "disk_write_ops"},
+            {
+                "id": "ubuntu_vnf_disk_read_bytes",
+                "performance-metric": "disk_read_bytes",
+            },
+            {
+                "id": "ubuntu_vnf_disk_write_bytes",
+                "performance-metric": "disk_write_bytes",
+            },
+            {
+                "id": "ubuntu_vnf_packets_out_dropped",
+                "performance-metric": "packets_out_dropped",
+            },
+            {
+                "id": "ubuntu_vnf_packets_received",
+                "performance-metric": "packets_received",
+            },
+            {"id": "ubuntu_vnf_packets_sent", "performance-metric": "packets_sent"},
+        ]
 
         with requests_mock.Mocker() as mock_requests:
-            mock_http_response(mock_requests, method='POST',
-                               url_pattern='/suite-api/api/auth/token/acquire',
-                               response_file='vrops_token.json')
-            mock_http_response(mock_requests,
-                               url_pattern='/suite-api/api/resources/stats.*',
-                               response_file='vrops_multi.json')
+            mock_http_response(
+                mock_requests,
+                method="POST",
+                url_pattern="/suite-api/api/auth/token/acquire",
+                response_file="vrops_token.json",
+            )
+            mock_http_response(
+                mock_requests,
+                url_pattern="/suite-api/api/resources/stats.*",
+                response_file="vrops_multi.json",
+            )
             metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
 
-        self.assertEqual(len(metrics), len(monitoring_params) * len(vdu_mappings), "Number of metrics returned")
+        self.assertEqual(
+            len(metrics),
+            len(monitoring_params) * len(vdu_mappings),
+            "Number of metrics returned",
+        )
index b982bca..b639b7a 100644 (file)
@@ -38,63 +38,75 @@ class CommonDbClientTest(unittest.TestCase):
     @mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
     @mock.patch.object(CommonDbClient, "get_vnfr")
     def test_get_vim_id(self, get_vnfr):
-        get_vnfr.return_value = {'_id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
-                                 '_admin': {
-                                     'projects_read': ['admin'], 'created': 1526044312.102287,
-                                     'modified': 1526044312.102287, 'projects_write': ['admin']
-                                 },
-                                 'vim-account-id': 'c1740601-7287-48c8-a2c9-bce8fee459eb',
-                                 'nsr-id-ref': '5ec3f571-d540-4cb0-9992-971d1b08312e',
-                                 'vdur': [
-                                     {
-                                         'internal-connection-point': [],
-                                         'vdu-id-ref': 'ubuntuvnf_vnfd-VM',
-                                         'id': 'ffd73f33-c8bb-4541-a977-44dcc3cbe28d',
-                                         'vim-id': '27042672-5190-4209-b844-95bbaeea7ea7',
-                                         'name': 'ubuntuvnf_vnfd-VM'
-                                     }
-                                 ],
-                                 'vnfd-ref': 'ubuntuvnf_vnfd',
-                                 'member-vnf-index-ref': '1',
-                                 'created-time': 1526044312.0999322,
-                                 'vnfd-id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
-                                 'id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01'}
+        get_vnfr.return_value = {
+            "_id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+            "_admin": {
+                "projects_read": ["admin"],
+                "created": 1526044312.102287,
+                "modified": 1526044312.102287,
+                "projects_write": ["admin"],
+            },
+            "vim-account-id": "c1740601-7287-48c8-a2c9-bce8fee459eb",
+            "nsr-id-ref": "5ec3f571-d540-4cb0-9992-971d1b08312e",
+            "vdur": [
+                {
+                    "internal-connection-point": [],
+                    "vdu-id-ref": "ubuntuvnf_vnfd-VM",
+                    "id": "ffd73f33-c8bb-4541-a977-44dcc3cbe28d",
+                    "vim-id": "27042672-5190-4209-b844-95bbaeea7ea7",
+                    "name": "ubuntuvnf_vnfd-VM",
+                }
+            ],
+            "vnfd-ref": "ubuntuvnf_vnfd",
+            "member-vnf-index-ref": "1",
+            "created-time": 1526044312.0999322,
+            "vnfd-id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+            "id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+        }
         common_db_client = CommonDbClient(self.config)
-        vim_account_id = common_db_client.get_vim_account_id('5ec3f571-d540-4cb0-9992-971d1b08312e', 1)
-        self.assertEqual(vim_account_id, 'c1740601-7287-48c8-a2c9-bce8fee459eb')
+        vim_account_id = common_db_client.get_vim_account_id(
+            "5ec3f571-d540-4cb0-9992-971d1b08312e", 1
+        )
+        self.assertEqual(vim_account_id, "c1740601-7287-48c8-a2c9-bce8fee459eb")
 
     @mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
     @mock.patch.object(dbmongo.DbMongo, "get_one")
     def test_get_vdur(self, get_one):
-        get_one.return_value = {'_id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
-                                '_admin': {
-                                    'projects_read': ['admin'], 'created': 1526044312.102287,
-                                    'modified': 1526044312.102287, 'projects_write': ['admin']
-                                },
-                                'vim-account-id': 'c1740601-7287-48c8-a2c9-bce8fee459eb',
-                                'nsr-id-ref': '5ec3f571-d540-4cb0-9992-971d1b08312e',
-                                'vdur': [
-                                    {
-                                        'internal-connection-point': [],
-                                        'vdu-id-ref': 'ubuntuvnf_vnfd-VM',
-                                        'id': 'ffd73f33-c8bb-4541-a977-44dcc3cbe28d',
-                                        'vim-id': '27042672-5190-4209-b844-95bbaeea7ea7',
-                                        'name': 'ubuntuvnf_vnfd-VM'
-                                    }
-                                ],
-                                'vnfd-ref': 'ubuntuvnf_vnfd',
-                                'member-vnf-index-ref': '1',
-                                'created-time': 1526044312.0999322,
-                                'vnfd-id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
-                                'id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01'}
+        get_one.return_value = {
+            "_id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+            "_admin": {
+                "projects_read": ["admin"],
+                "created": 1526044312.102287,
+                "modified": 1526044312.102287,
+                "projects_write": ["admin"],
+            },
+            "vim-account-id": "c1740601-7287-48c8-a2c9-bce8fee459eb",
+            "nsr-id-ref": "5ec3f571-d540-4cb0-9992-971d1b08312e",
+            "vdur": [
+                {
+                    "internal-connection-point": [],
+                    "vdu-id-ref": "ubuntuvnf_vnfd-VM",
+                    "id": "ffd73f33-c8bb-4541-a977-44dcc3cbe28d",
+                    "vim-id": "27042672-5190-4209-b844-95bbaeea7ea7",
+                    "name": "ubuntuvnf_vnfd-VM",
+                }
+            ],
+            "vnfd-ref": "ubuntuvnf_vnfd",
+            "member-vnf-index-ref": "1",
+            "created-time": 1526044312.0999322,
+            "vnfd-id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+            "id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+        }
         common_db_client = CommonDbClient(self.config)
-        vdur = common_db_client.get_vdur('5ec3f571-d540-4cb0-9992-971d1b08312e', '1', 'ubuntuvnf_vnfd-VM')
+        vdur = common_db_client.get_vdur(
+            "5ec3f571-d540-4cb0-9992-971d1b08312e", "1", "ubuntuvnf_vnfd-VM"
+        )
         expected_vdur = {
-            'internal-connection-point': [],
-            'vdu-id-ref': 'ubuntuvnf_vnfd-VM',
-            'id': 'ffd73f33-c8bb-4541-a977-44dcc3cbe28d',
-            'vim-id': '27042672-5190-4209-b844-95bbaeea7ea7',
-            'name': 'ubuntuvnf_vnfd-VM'
+            "internal-connection-point": [],
+            "vdu-id-ref": "ubuntuvnf_vnfd-VM",
+            "id": "ffd73f33-c8bb-4541-a977-44dcc3cbe28d",
+            "vim-id": "27042672-5190-4209-b844-95bbaeea7ea7",
+            "name": "ubuntuvnf_vnfd-VM",
         }
 
         self.assertDictEqual(vdur, expected_vdur)
@@ -103,82 +115,97 @@ class CommonDbClientTest(unittest.TestCase):
     @mock.patch.object(dbmongo.DbMongo, "get_one")
     @mock.patch.object(CommonDbClient, "decrypt_vim_password")
     def test_get_vim_account_default_schema(self, decrypt_vim_password, get_one):
-        schema_version = '10.0'
-        vim_id = '1'
+        schema_version = "10.0"
+        vim_id = "1"
         get_one.return_value = {
-            '_id': vim_id,
-            'vim_password': 'vim_password',
-            'schema_version': schema_version,
-            'config': {
-                'admin_password': 'admin_password',
-                'vrops_password': 'vrops_password',
-                'nsx_password': 'nsx_password',
-                'vcenter_password': 'vcenter_password'
-            }
+            "_id": vim_id,
+            "vim_password": "vim_password",
+            "schema_version": schema_version,
+            "config": {
+                "admin_password": "admin_password",
+                "vrops_password": "vrops_password",
+                "nsx_password": "nsx_password",
+                "vcenter_password": "vcenter_password",
+            },
         }
 
         common_db_client = CommonDbClient(self.config)
-        common_db_client.get_vim_account('1')
+        common_db_client.get_vim_account("1")
 
-        decrypt_vim_password.assert_any_call('vim_password', schema_version, vim_id)
-        decrypt_vim_password.assert_any_call('vrops_password', schema_version, vim_id)
-        decrypt_vim_password.assert_any_call('admin_password', schema_version, vim_id)
-        decrypt_vim_password.assert_any_call('nsx_password', schema_version, vim_id)
-        decrypt_vim_password.assert_any_call('vcenter_password', schema_version, vim_id)
+        decrypt_vim_password.assert_any_call("vim_password", schema_version, vim_id)
+        decrypt_vim_password.assert_any_call("vrops_password", schema_version, vim_id)
+        decrypt_vim_password.assert_any_call("admin_password", schema_version, vim_id)
+        decrypt_vim_password.assert_any_call("nsx_password", schema_version, vim_id)
+        decrypt_vim_password.assert_any_call("vcenter_password", schema_version, vim_id)
 
     @mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
     @mock.patch.object(dbmongo.DbMongo, "get_one")
     @mock.patch.object(CommonDbClient, "decrypt_vim_password")
     def test_get_vim_account_1_1_schema(self, decrypt_vim_password, get_one):
-        schema_version = '1.1'
-        vim_id = '1'
+        schema_version = "1.1"
+        vim_id = "1"
         get_one.return_value = {
-            '_id': vim_id,
-            'vim_password': 'vim_password',
-            'schema_version': schema_version,
-            'config': {
-                'vrops_password': 'vrops_password'
-            }
+            "_id": vim_id,
+            "vim_password": "vim_password",
+            "schema_version": schema_version,
+            "config": {"vrops_password": "vrops_password"},
         }
 
         common_db_client = CommonDbClient(self.config)
-        common_db_client.get_vim_account('1')
+        common_db_client.get_vim_account("1")
 
-        decrypt_vim_password.assert_any_call('vim_password', schema_version, vim_id)
-        self.assertRaises(AssertionError, decrypt_vim_password.assert_any_call, 'vrops_password', schema_version,
-                          vim_id)
+        decrypt_vim_password.assert_any_call("vim_password", schema_version, vim_id)
+        self.assertRaises(
+            AssertionError,
+            decrypt_vim_password.assert_any_call,
+            "vrops_password",
+            schema_version,
+            vim_id,
+        )
 
     @mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
     @mock.patch.object(dbmongo.DbMongo, "get_list")
     def test_get_alarms(self, get_list):
-        get_list.return_value = [{
-            'uuid': '1',
-            'name': 'name',
-            'severity': 'severity',
-            'threshold': 50,
-            'operation': 'operation',
-            'statistic': 'statistic',
-            'tags': {},
-        }]
+        get_list.return_value = [
+            {
+                "uuid": "1",
+                "name": "name",
+                "severity": "severity",
+                "threshold": 50,
+                "operation": "operation",
+                "statistic": "statistic",
+                "tags": {},
+            }
+        ]
 
         common_db_client = CommonDbClient(self.config)
         alarms = common_db_client.get_alarms()
-        self.assertEqual('1', alarms[0].uuid)
+        self.assertEqual("1", alarms[0].uuid)
 
     @mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
     @mock.patch.object(dbmongo.DbMongo, "create")
     def test_create_alarm(self, create):
-        alarm = Alarm('name', 'severity', 50.0, 'operation', 'statistic', 'metric', {})
-        alarm.uuid = '1'
+        alarm = Alarm("name", "severity", 50.0, "operation", "statistic", "metric", {})
+        alarm.uuid = "1"
         common_db_client = CommonDbClient(self.config)
         common_db_client.create_alarm(alarm)
-        create.assert_called_with('alarms', {'tags': {}, 'threshold': 50.0, 'metric': 'metric',
-                                             'severity': 'severity', 'statistic': 'statistic',
-                                             'name': 'name', 'operation': 'operation', 'uuid': '1'})
+        create.assert_called_with(
+            "alarms",
+            {
+                "tags": {},
+                "threshold": 50.0,
+                "metric": "metric",
+                "severity": "severity",
+                "statistic": "statistic",
+                "name": "name",
+                "operation": "operation",
+                "uuid": "1",
+            },
+        )
 
     @mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
     @mock.patch.object(dbmongo.DbMongo, "del_one")
     def test_delete_alarm(self, delete):
         common_db_client = CommonDbClient(self.config)
-        common_db_client.delete_alarm('1')
-        delete.assert_called_with('alarms', {'uuid': '1'})
+        common_db_client.delete_alarm("1")
+        delete.assert_called_with("alarms", {"uuid": "1"})
index 292fbe3..126eb9f 100644 (file)
@@ -31,43 +31,42 @@ from osm_mon.core.config import Config
 
 
 class TestMessageBusClient(TestCase):
-
     def setUp(self):
         self.config = Config()
-        self.config.set('message', 'driver', 'kafka')
+        self.config.set("message", "driver", "kafka")
         self.loop = asyncio.new_event_loop()
 
-    @mock.patch.object(MsgKafka, 'aioread')
+    @mock.patch.object(MsgKafka, "aioread")
     def test_aioread(self, aioread):
         async def mock_callback():
             pass
 
         future = asyncio.Future(loop=self.loop)
-        future.set_result('mock')
+        future.set_result("mock")
         aioread.return_value = future
         msg_bus = MessageBusClient(self.config, loop=self.loop)
-        topic = 'test_topic'
+        topic = "test_topic"
         self.loop.run_until_complete(msg_bus.aioread([topic], mock_callback))
-        aioread.assert_called_with(['test_topic'], self.loop, aiocallback=mock_callback)
+        aioread.assert_called_with(["test_topic"], self.loop, aiocallback=mock_callback)
 
-    @mock.patch.object(MsgKafka, 'aiowrite')
+    @mock.patch.object(MsgKafka, "aiowrite")
     def test_aiowrite(self, aiowrite):
         future = asyncio.Future(loop=self.loop)
-        future.set_result('mock')
+        future.set_result("mock")
         aiowrite.return_value = future
         msg_bus = MessageBusClient(self.config, loop=self.loop)
-        topic = 'test_topic'
-        key = 'test_key'
-        msg = {'test': 'test_msg'}
+        topic = "test_topic"
+        key = "test_key"
+        msg = {"test": "test_msg"}
         self.loop.run_until_complete(msg_bus.aiowrite(topic, key, msg))
         aiowrite.assert_called_with(topic, key, msg, self.loop)
 
-    @mock.patch.object(MsgKafka, 'aioread')
+    @mock.patch.object(MsgKafka, "aioread")
     def test_aioread_once(self, aioread):
         future = asyncio.Future(loop=self.loop)
-        future.set_result('mock')
+        future.set_result("mock")
         aioread.return_value = future
         msg_bus = MessageBusClient(self.config, loop=self.loop)
-        topic = 'test_topic'
+        topic = "test_topic"
         self.loop.run_until_complete(msg_bus.aioread_once(topic))
-        aioread.assert_called_with('test_topic', self.loop)
+        aioread.assert_called_with("test_topic", self.loop)
index 43cec96..ce72d09 100644 (file)
@@ -36,16 +36,18 @@ class EvaluatorTest(TestCase):
         super().setUp()
         self.config = Config()
 
-    @mock.patch('multiprocessing.Process')
+    @mock.patch("multiprocessing.Process")
     @mock.patch.object(Evaluator, "notify_alarm")
     @mock.patch.object(EvaluatorService, "evaluate_alarms")
     def test_evaluate(self, evaluate_alarms, notify_alarm, process):
         mock_alarm = mock.Mock()
-        mock_alarm.operation = 'gt'
+        mock_alarm.operation = "gt"
         mock_alarm.threshold = 50.0
         evaluate_alarms.return_value = [(mock_alarm, AlarmStatus.ALARM)]
 
         evaluator = Evaluator(self.config)
         evaluator.evaluate()
 
-        process.assert_called_with(target=notify_alarm, args=(mock_alarm, AlarmStatus.ALARM))
+        process.assert_called_with(
+            target=notify_alarm, args=(mock_alarm, AlarmStatus.ALARM)
+        )
index a72dcaf..15af5b6 100644 (file)
@@ -40,7 +40,7 @@ vnfr_record_mock = {
                 {
                     "mac-address": "fa:16:3e:71:fd:b8",
                     "name": "eth0",
-                    "ip-address": "192.168.160.2"
+                    "ip-address": "192.168.160.2",
                 }
             ],
             "status": "ACTIVE",
@@ -48,7 +48,7 @@ vnfr_record_mock = {
             "name": "cirros_ns-1-cirros_vnfd-VM-1",
             "status-detailed": None,
             "ip-address": "192.168.160.2",
-            "vdu-id-ref": "cirros_vnfd-VM"
+            "vdu-id-ref": "cirros_vnfd-VM",
         }
     ],
     "id": "0d9d06ad-3fc2-418c-9934-465e815fafe2",
@@ -56,23 +56,13 @@ vnfr_record_mock = {
     "vnfd-id": "63f44c41-45ee-456b-b10d-5f08fb1796e0",
     "_admin": {
         "created": 1535392482.0067868,
-        "projects_read": [
-            "admin"
-        ],
+        "projects_read": ["admin"],
         "modified": 1535392482.0067868,
-        "projects_write": [
-            "admin"
-        ]
+        "projects_write": ["admin"],
     },
     "nsr-id-ref": "87776f33-b67c-417a-8119-cb08e4098951",
     "member-vnf-index-ref": "1",
-    "connection-point": [
-        {
-            "name": "eth0",
-            "id": None,
-            "connection-point-id": None
-        }
-    ]
+    "connection-point": [{"name": "eth0", "id": None, "connection-point-id": None}],
 }
 
 vnfd_record_mock = {
@@ -87,83 +77,51 @@ vnfd_record_mock = {
             "name": "cirros_vnfd-VM",
             "int-cpd": [
                 {
-                    "virtual-network-interface-requirement": [
-                        {
-                            "name": "vdu-eth0"
-                        }
-                    ],
-                    "id": "vdu-eth0-int"
+                    "virtual-network-interface-requirement": [{"name": "vdu-eth0"}],
+                    "id": "vdu-eth0-int",
                 }
             ],
             "virtual-compute-desc": "cirros_vnfd-VM-compute",
-            "virtual-storage-desc": [
-                "cirros_vnfd-VM-storage"
-            ],
+            "virtual-storage-desc": ["cirros_vnfd-VM-storage"],
             "sw-image-desc": "cirros034",
             "monitoring-parameter": [
                 {
                     "id": "cirros_vnfd-VM_memory_util",
                     "name": "cirros_vnfd-VM_memory_util",
-                    "performance-metric": "average_memory_utilization"
+                    "performance-metric": "average_memory_utilization",
                 }
-            ]
+            ],
         }
     ],
     "virtual-compute-desc": [
         {
             "id": "cirros_vnfd-VM-compute",
-            "virtual-cpu": {
-                "num-virtual-cpu": 1
-            },
-            "virtual-memory": {
-                "size": 1
-            }
-        }
-    ],
-    "virtual-storage-desc": [
-        {
-            "id": "cirros_vnfd-VM-storage",
-            "size-of-storage": 2
-        }
-    ],
-    "sw-image-desc": [
-        {
-            "id": "cirros034",
-            "name": "cirros034",
-            "image": "cirros034"
+            "virtual-cpu": {"num-virtual-cpu": 1},
+            "virtual-memory": {"size": 1},
         }
     ],
+    "virtual-storage-desc": [{"id": "cirros_vnfd-VM-storage", "size-of-storage": 2}],
+    "sw-image-desc": [{"id": "cirros034", "name": "cirros034", "image": "cirros034"}],
     "ext-cpd": [
         {
-            "int-cpd": {
-                "vdu-id": "cirros_vnfd-VM",
-                "cpd": "vdu-eth0-int"
-            },
-            "id": "vnf-cp0-ext"
+            "int-cpd": {"vdu-id": "cirros_vnfd-VM", "cpd": "vdu-eth0-int"},
+            "id": "vnf-cp0-ext",
         }
     ],
     "df": [
         {
             "id": "default-df",
-            "vdu-profile": [
-                {
-                    "id": "cirros_vnfd-VM"
-                }
-            ],
+            "vdu-profile": [{"id": "cirros_vnfd-VM"}],
             "instantiation-level": [
                 {
                     "id": "default-instantiation-level",
-                    "vdu-level": [
-                        {
-                            "vdu-id": "cirros_vnfd-VM"
-                        }
-                    ]
+                    "vdu-level": [{"vdu-id": "cirros_vnfd-VM"}],
                 }
-            ]
+            ],
         }
     ],
     "description": "Simple VNF example with a cirros and a scaling group descriptor",
-    "mgmt-cp": "vnf-cp0-ext"
+    "mgmt-cp": "vnf-cp0-ext",
 }
 
 
@@ -177,9 +135,9 @@ class EvaluatorTest(TestCase):
     @mock.patch.object(EvaluatorService, "_get_metric_value")
     def test_evaluate_metric(self, get_metric_value):
         mock_alarm = mock.Mock()
-        mock_alarm.operation = 'gt'
+        mock_alarm.operation = "gt"
         mock_alarm.threshold = 50.0
-        mock_alarm.metric = 'metric_name'
+        mock_alarm.metric = "metric_name"
         get_metric_value.return_value = 100.0
 
         service = EvaluatorService(self.config)
@@ -188,7 +146,7 @@ class EvaluatorTest(TestCase):
         service.queue.put.assert_called_with((mock_alarm, AlarmStatus.ALARM))
         service.queue.reset_mock()
 
-        mock_alarm.operation = 'lt'
+        mock_alarm.operation = "lt"
         service._evaluate_metric(mock_alarm)
         service.queue.put.assert_called_with((mock_alarm, AlarmStatus.OK))
         service.queue.reset_mock()
@@ -197,16 +155,18 @@ class EvaluatorTest(TestCase):
         service._evaluate_metric(mock_alarm)
         service.queue.put.assert_called_with((mock_alarm, AlarmStatus.INSUFFICIENT))
 
-    @mock.patch('multiprocessing.Process')
+    @mock.patch("multiprocessing.Process")
     @mock.patch.object(EvaluatorService, "_evaluate_metric")
     @mock.patch.object(CommonDbClient, "get_vnfd")
     @mock.patch.object(CommonDbClient, "get_vnfr")
     @mock.patch.object(CommonDbClient, "get_alarms")
-    def test_evaluate_alarms(self, alarm_list, get_vnfr, get_vnfd, evaluate_metric, process):
+    def test_evaluate_alarms(
+        self, alarm_list, get_vnfr, get_vnfd, evaluate_metric, process
+    ):
         mock_alarm = mock.Mock()
-        mock_alarm.vdur_name = 'cirros_ns-1-cirros_vnfd-VM-1'
-        mock_alarm.monitoring_param = 'cirros_vnf_memory_util'
-        mock_alarm.tags = {'name': 'value'}
+        mock_alarm.vdur_name = "cirros_ns-1-cirros_vnfd-VM-1"
+        mock_alarm.monitoring_param = "cirros_vnf_memory_util"
+        mock_alarm.tags = {"name": "value"}
         alarm_list.return_value = [mock_alarm]
         get_vnfr.return_value = vnfr_record_mock
         get_vnfd.return_value = vnfd_record_mock
@@ -218,8 +178,8 @@ class EvaluatorTest(TestCase):
 
     @mock.patch.object(PrometheusBackend, "get_metric_value")
     def test_get_metric_value_prometheus(self, get_metric_value):
-        self.config.set('evaluator', 'backend', 'prometheus')
+        self.config.set("evaluator", "backend", "prometheus")
         evaluator = EvaluatorService(self.config)
-        evaluator._get_metric_value('test', {})
+        evaluator._get_metric_value("test", {})
 
-        get_metric_value.assert_called_with('test', {})
+        get_metric_value.assert_called_with("test", {})
index 1d20c1e..6919517 100644 (file)
@@ -35,7 +35,9 @@ class EvaluatorTest(TestCase):
     def test_build_query(self):
         prometheus = PrometheusBackend(self.config)
         alarm_tags = collections.OrderedDict()
-        alarm_tags['tag_1'] = 'value_1'
-        alarm_tags['tag_2'] = 'value_2'
-        query = prometheus._build_query('metric_name', alarm_tags)
-        self.assertEqual(query, 'query=osm_metric_name{tag_1="value_1",tag_2="value_2"}')
+        alarm_tags["tag_1"] = "value_1"
+        alarm_tags["tag_2"] = "value_2"
+        query = prometheus._build_query("metric_name", alarm_tags)
+        self.assertEqual(
+            query, 'query=osm_metric_name{tag_1="value_1",tag_2="value_2"}'
+        )
index 04d35e3..3c91487 100644 (file)
--- a/setup.py
+++ b/setup.py
 
 from setuptools import setup
 
-_name = 'osm_mon'
-_version_command = ('git describe --match v* --tags --long --dirty', 'pep440-git-full')
-_description = 'OSM Monitoring Module'
+_name = "osm_mon"
+_version_command = ("git describe --match v* --tags --long --dirty", "pep440-git-full")
+_description = "OSM Monitoring Module"
 _author = "OSM Support"
-_author_email = 'osmsupport@etsi.org'
-_maintainer = 'OSM Support'
-_maintainer_email = 'osmsupport@etsi.org'
-_license = 'Apache 2.0'
-_url = 'https://osm.etsi.org/gitweb/?p=osm/MON.git;a=tree'
+_author_email = "osmsupport@etsi.org"
+_maintainer = "OSM Support"
+_maintainer_email = "osmsupport@etsi.org"
+_license = "Apache 2.0"
+_url = "https://osm.etsi.org/gitweb/?p=osm/MON.git;a=tree"
 
 setup(
     name=_name,
     version_command=_version_command,
     description=_description,
-    long_description=open('README.rst', encoding='utf-8').read(),
+    long_description=open("README.rst", encoding="utf-8").read(),
     author=_author,
     author_email=_author_email,
     maintainer=_maintainer,
@@ -54,5 +54,5 @@ setup(
             "osm-mon-healthcheck = osm_mon.cmd.mon_healthcheck:main",
         ]
     },
-    setup_requires=['setuptools-version-command']
+    setup_requires=["setuptools-version-command"],
 )
diff --git a/tox.ini b/tox.ini
index 478d1ab..db88368 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -34,6 +34,7 @@ deps = black
 skip_install = true
 commands =
         - black --check --diff osm_mon/
+        - black --check --diff setup.py
 
 
 #######################################################################################