def main():
- parser = argparse.ArgumentParser(prog='osm-mon-collector')
- parser.add_argument('--config-file', nargs='?', help='MON configuration file')
+ parser = argparse.ArgumentParser(prog="osm-mon-collector")
+ parser.add_argument("--config-file", nargs="?", help="MON configuration file")
args = parser.parse_args()
cfg = Config(args.config_file)
root = logging.getLogger()
- root.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
+ root.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
ch = logging.StreamHandler(sys.stdout)
- ch.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
+ ch.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
+ formatter = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p"
+ )
ch.setFormatter(formatter)
root.addHandler(ch)
log.error("Failed to start MON Collector")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
def main():
- parser = argparse.ArgumentParser(prog='osm-policy-agent')
- parser.add_argument('--config-file', nargs='?', help='POL configuration file')
+ parser = argparse.ArgumentParser(prog="osm-policy-agent")
+ parser.add_argument("--config-file", nargs="?", help="POL configuration file")
args = parser.parse_args()
cfg = Config(args.config_file)
root = logging.getLogger()
- root.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
+ root.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
ch = logging.StreamHandler(sys.stdout)
- ch.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
+ ch.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
+ formatter = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p"
+ )
ch.setFormatter(formatter)
root.addHandler(ch)
log.error("Failed to start MON Dashboarder")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
def main():
- parser = argparse.ArgumentParser(prog='osm-mon-evaluator')
- parser.add_argument('--config-file', nargs='?', help='MON configuration file')
+ parser = argparse.ArgumentParser(prog="osm-mon-evaluator")
+ parser.add_argument("--config-file", nargs="?", help="MON configuration file")
args = parser.parse_args()
cfg = Config(args.config_file)
root = logging.getLogger()
- root.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
+ root.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
ch = logging.StreamHandler(sys.stdout)
- ch.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
+ ch.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
+ formatter = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p"
+ )
ch.setFormatter(formatter)
root.addHandler(ch)
log.error("Failed to start MON Evaluator")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
def main():
- parser = argparse.ArgumentParser(prog='osm-mon-healthcheck')
- parser.add_argument('--config-file', nargs='?', help='MON configuration file')
+ parser = argparse.ArgumentParser(prog="osm-mon-healthcheck")
+ parser.add_argument("--config-file", nargs="?", help="MON configuration file")
# args = parser.parse_args()
# cfg = Config(args.config_file)
return True
return False
- processes_to_check = ['osm-mon-collector', 'osm-mon-evaluator', 'osm-mon-server']
- ps = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE).communicate()[0]
- processes_running = ps.decode().split('\n')
+ processes_to_check = ["osm-mon-collector", "osm-mon-evaluator", "osm-mon-server"]
+ ps = subprocess.Popen(["ps", "aux"], stdout=subprocess.PIPE).communicate()[0]
+ processes_running = ps.decode().split("\n")
for p in processes_to_check:
if not _contains_process(processes_running, p):
log.error("Process %s not running!" % p)
def _is_prometheus_exporter_ok():
try:
- r = requests.get('http://localhost:8000')
+ r = requests.get("http://localhost:8000")
r.raise_for_status()
return True
except Exception:
return False
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
def main():
- parser = argparse.ArgumentParser(prog='osm-mon-server')
- parser.add_argument('--config-file', nargs='?', help='MON configuration file')
+ parser = argparse.ArgumentParser(prog="osm-mon-server")
+ parser.add_argument("--config-file", nargs="?", help="MON configuration file")
args = parser.parse_args()
cfg = Config(args.config_file)
root = logging.getLogger()
- root.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
+ root.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
ch = logging.StreamHandler(sys.stdout)
- ch.setLevel(logging.getLevelName(cfg.get('global', 'loglevel')))
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p')
+ ch.setLevel(logging.getLevelName(cfg.get("global", "loglevel")))
+ formatter = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s", "%m/%d/%Y %I:%M:%S %p"
+ )
ch.setFormatter(formatter)
root.addHandler(ch)
log.error("Failed to start MON Server")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
logging.debug("wait_till_commondb_is_ready")
- while(True):
+ while True:
commondb_url = config.conf["database"].get("uri")
try:
commondb = pymongo.MongoClient(commondb_url)
commondb.server_info()
break
except Exception:
- logging.info("{} process is waiting for commondb to come up...".format(process_name))
+ logging.info(
+ "{} process is waiting for commondb to come up...".format(process_name)
+ )
time.sleep(commondb_wait_time)
logging.debug("wait_till_kafka_is_ready")
- while(True):
+ while True:
kafka_ready = False
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
== 0
):
# Get the list of topics. If kafka is not ready exception will be thrown.
- consumer = kafka.KafkaConsumer(group_id=config.conf["message"].get("group_id"),
- bootstrap_servers=[config.conf.get("message", {}).get("host",
- "kafka") + ":" + config.conf["message"]
- .get("port")])
+ consumer = kafka.KafkaConsumer(
+ group_id=config.conf["message"].get("group_id"),
+ bootstrap_servers=[
+ config.conf.get("message", {}).get("host", "kafka")
+ + ":"
+ + config.conf["message"].get("port")
+ ],
+ )
all_topics = consumer.topics()
logging.debug("Number of topics found: %s", len(all_topics))
# Send dummy message in kafka topics. If kafka is not ready exception will be thrown.
- producer = kafka.KafkaProducer(bootstrap_servers=[config.conf.get("message", {}).get("host",
- "kafka") + ":" + config.conf["message"]
- .get("port")])
+ producer = kafka.KafkaProducer(
+ bootstrap_servers=[
+ config.conf.get("message", {}).get("host", "kafka")
+ + ":"
+ + config.conf["message"].get("port")
+ ]
+ )
mon_topics = ["alarm_request", "users", "project"]
for mon_topic in mon_topics:
producer.send(mon_topic, key=b"echo", value=b"dummy message")
if kafka_ready:
break
else:
- logging.info("{} process is waiting for kafka to come up...".format(process_name))
+ logging.info(
+ "{} process is waiting for kafka to come up...".format(process_name)
+ )
time.sleep(kafka_wait_time)
-def wait_till_core_services_are_ready(config, process_name="osm-mon", commondb_wait_time=5, kafka_wait_time=5):
+def wait_till_core_services_are_ready(
+ config, process_name="osm-mon", commondb_wait_time=5, kafka_wait_time=5
+):
logging.debug("wait_till_core_services_are_ready")
log = logging.getLogger(__name__)
-OSM_METRIC_PREFIX = 'osm_'
+OSM_METRIC_PREFIX = "osm_"
class PrometheusBackend(BaseBackend):
-
def __init__(self):
self.custom_collector = CustomCollector()
self._start_exporter(8000)
def handle(self, metrics: List[Metric]):
- log.debug('handle')
- log.debug('metrics: %s', metrics)
+ log.debug("handle")
+ log.debug("metrics: %s", metrics)
prometheus_metrics = {}
for metric in metrics:
if metric.name not in prometheus_metrics:
prometheus_metrics[metric.name] = GaugeMetricFamily(
OSM_METRIC_PREFIX + metric.name,
- 'OSM metric',
- labels=list(metric.tags.keys())
+ "OSM metric",
+ labels=list(metric.tags.keys()),
)
- prometheus_metrics[metric.name].add_metric(list(metric.tags.values()), metric.value)
+ prometheus_metrics[metric.name].add_metric(
+ list(metric.tags.values()), metric.value
+ )
self.custom_collector.metrics = prometheus_metrics.values()
def _start_exporter(self, port):
- log.debug('_start_exporter')
- log.debug('port: %s', port)
+ log.debug("_start_exporter")
+ log.debug("port: %s", port)
REGISTRY.register(self.custom_collector)
log.info("Starting MON Prometheus exporter at port %s", port)
start_http_server(port)
class CustomCollector(object):
-
def __init__(self):
self.metrics = []
def describe(self):
- log.debug('describe')
+ log.debug("describe")
return []
def collect(self):
log = logging.getLogger(__name__)
-METRIC_BACKENDS = [
- PrometheusBackend
-]
+METRIC_BACKENDS = [PrometheusBackend]
class Collector:
self._init_backends()
def collect_forever(self):
- log.debug('collect_forever')
+ log.debug("collect_forever")
while True:
try:
self.collect_metrics()
- time.sleep(int(self.conf.get('collector', 'interval')))
+ time.sleep(int(self.conf.get("collector", "interval")))
except Exception:
log.exception("Error collecting metrics")
class BaseInfraCollector:
-
def collect(self) -> List[Metric]:
pass
def collect(self) -> List[Metric]:
metrics = []
vim_status = self.is_vim_ok()
- if self.vim_account['_admin']['projects_read']:
- vim_project_id = self.vim_account['_admin']['projects_read'][0]
+ if self.vim_account["_admin"]["projects_read"]:
+ vim_project_id = self.vim_account["_admin"]["projects_read"][0]
else:
- vim_project_id = ''
+ vim_project_id = ""
vim_tags = {
- 'vim_account_id': self.vim_account['_id'],
- 'project_id': vim_project_id
+ "vim_account_id": self.vim_account["_id"],
+ "project_id": vim_project_id,
}
- vim_status_metric = Metric(vim_tags, 'vim_status', vim_status)
+ vim_status_metric = Metric(vim_tags, "vim_status", vim_status)
metrics.append(vim_status_metric)
- vnfrs = self.common_db.get_vnfrs(vim_account_id=self.vim_account['_id'])
+ vnfrs = self.common_db.get_vnfrs(vim_account_id=self.vim_account["_id"])
for vnfr in vnfrs:
- nsr_id = vnfr['nsr-id-ref']
- ns_name = self.common_db.get_nsr(nsr_id)['name']
- vnf_member_index = vnfr['member-vnf-index-ref']
- if vnfr['_admin']['projects_read']:
- vnfr_project_id = vnfr['_admin']['projects_read'][0]
+ nsr_id = vnfr["nsr-id-ref"]
+ ns_name = self.common_db.get_nsr(nsr_id)["name"]
+ vnf_member_index = vnfr["member-vnf-index-ref"]
+ if vnfr["_admin"]["projects_read"]:
+ vnfr_project_id = vnfr["_admin"]["projects_read"][0]
else:
- vnfr_project_id = ''
- for vdur in vnfr['vdur']:
- if 'vim-id' not in vdur:
+ vnfr_project_id = ""
+ for vdur in vnfr["vdur"]:
+ if "vim-id" not in vdur:
log.debug("Field vim-id is not present in vdur")
continue
- resource_uuid = vdur['vim-id']
+ resource_uuid = vdur["vim-id"]
tags = {
- 'vim_account_id': self.vim_account['_id'],
- 'resource_uuid': resource_uuid,
- 'nsr_id': nsr_id,
- 'ns_name': ns_name,
- 'vnf_member_index': vnf_member_index,
- 'vdur_name': vdur.get("name", ""),
- 'project_id': vnfr_project_id
+ "vim_account_id": self.vim_account["_id"],
+ "resource_uuid": resource_uuid,
+ "nsr_id": nsr_id,
+ "ns_name": ns_name,
+ "vnf_member_index": vnf_member_index,
+ "vdur_name": vdur.get("name", ""),
+ "project_id": vnfr_project_id,
}
try:
vm = self.nova.servers.get(resource_uuid)
- vm_status = (1 if vm.status == 'ACTIVE' else 0)
- vm_status_metric = Metric(tags, 'vm_status', vm_status)
+ vm_status = 1 if vm.status == "ACTIVE" else 0
+ vm_status_metric = Metric(tags, "vm_status", vm_status)
except Exception as e:
log.warning("VM status is not OK: %s" % e)
- vm_status_metric = Metric(tags, 'vm_status', 0)
+ vm_status_metric = Metric(tags, "vm_status", 0)
metrics.append(vm_status_metric)
return metrics
def collect(self) -> List[Metric]:
metrics = []
sdnc_status = self.is_sdnc_ok()
- if self.sdnc['_admin']['projects_read']:
- sdnc_project_id = self.sdnc['_admin']['projects_read'][0]
+ if self.sdnc["_admin"]["projects_read"]:
+ sdnc_project_id = self.sdnc["_admin"]["projects_read"][0]
else:
- sdnc_project_id = ''
- sdnc_tags = {
- 'sdnc_id': self.sdnc['_id'],
- 'project_id': sdnc_project_id
- }
- sdnc_status_metric = Metric(sdnc_tags, 'sdnc_status', sdnc_status)
+ sdnc_project_id = ""
+ sdnc_tags = {"sdnc_id": self.sdnc["_id"], "project_id": sdnc_project_id}
+ sdnc_status_metric = Metric(sdnc_tags, "sdnc_status", sdnc_status)
metrics.append(sdnc_status_metric)
return metrics
def is_sdnc_ok(self) -> bool:
try:
- ip = self.sdnc['ip']
- port = self.sdnc['port']
- user = self.sdnc['user']
- password = self.common_db.decrypt_sdnc_password(self.sdnc['password'],
- self.sdnc['schema_version'],
- self.sdnc['_id'])
+ ip = self.sdnc["ip"]
+ port = self.sdnc["port"]
+ user = self.sdnc["user"]
+ password = self.common_db.decrypt_sdnc_password(
+ self.sdnc["password"], self.sdnc["schema_version"], self.sdnc["_id"]
+ )
# TODO: Add support for https
- url = 'http://{}:{}/onos/v1/devices'.format(ip, port)
+ url = "http://{}:{}/onos/v1/devices".format(ip, port)
requests.get(url, auth=HTTPBasicAuth(user, password))
return True
except Exception:
from osm_mon.core.config import Config
log = logging.getLogger(__name__)
-API_VERSION = '27.0'
+API_VERSION = "27.0"
class VMwareInfraCollector(BaseVimInfraCollector):
-
def __init__(self, config: Config, vim_account_id: str):
super().__init__(config, vim_account_id)
self.vim_account_id = vim_account_id
self.common_db = CommonDbClient(config)
vim_account = self.get_vim_account(vim_account_id)
- self.vcloud_site = vim_account['vim_url']
- self.admin_username = vim_account['admin_username']
- self.admin_password = vim_account['admin_password']
- self.vim_uuid = vim_account['vim_uuid']
- self.org_name = vim_account['orgname']
- self.vim_project_id = vim_account['project_id']
+ self.vcloud_site = vim_account["vim_url"]
+ self.admin_username = vim_account["admin_username"]
+ self.admin_password = vim_account["admin_password"]
+ self.vim_uuid = vim_account["vim_uuid"]
+ self.org_name = vim_account["orgname"]
+ self.vim_project_id = vim_account["project_id"]
def connect_vim_as_admin(self):
- """ Method connect as pvdc admin user to vCloud director.
- There are certain action that can be done only by provider vdc admin user.
- Organization creation / provider network creation etc.
+ """Method connect as pvdc admin user to vCloud director.
+ There are certain action that can be done only by provider vdc admin user.
+ Organization creation / provider network creation etc.
- Returns:
- The return client object that letter can be used to connect to vcloud direct as admin for provider vdc
+ Returns:
+ The return client object that letter can be used to connect to vcloud direct as admin for provider vdc
"""
log.info("Logging into vCD org as admin.")
host = self.vcloud_site
admin_user = self.admin_username
admin_passwd = self.admin_password
- org = 'System'
+ org = "System"
client = Client(host, verify_ssl_certs=False)
client.set_highest_supported_version()
- client.set_credentials(BasicLoginCredentials(admin_user, org,
- admin_passwd))
+ client.set_credentials(BasicLoginCredentials(admin_user, org, admin_passwd))
return client
except Exception as e:
- log.info("Can't connect to a vCloud director as: {} with exception {}".format(admin_user, e))
+ log.info(
+ "Can't connect to a vCloud director as: {} with exception {}".format(
+ admin_user, e
+ )
+ )
def get_vim_account(self, vim_account_id: str):
"""
- Method to get VIM account details by its ID
- arg - VIM ID
- return - dict with vim account details
+ Method to get VIM account details by its ID
+ arg - VIM ID
+ return - dict with vim account details
"""
vim_account = {}
vim_account_info = self.common_db.get_vim_account(vim_account_id)
- vim_account['name'] = vim_account_info['name']
- vim_account['vim_tenant_name'] = vim_account_info['vim_tenant_name']
- vim_account['vim_type'] = vim_account_info['vim_type']
- vim_account['vim_url'] = vim_account_info['vim_url']
- vim_account['org_user'] = vim_account_info['vim_user']
- vim_account['vim_uuid'] = vim_account_info['_id']
- if vim_account_info['_admin']['projects_read']:
- vim_account['project_id'] = vim_account_info['_admin']['projects_read'][0]
+ vim_account["name"] = vim_account_info["name"]
+ vim_account["vim_tenant_name"] = vim_account_info["vim_tenant_name"]
+ vim_account["vim_type"] = vim_account_info["vim_type"]
+ vim_account["vim_url"] = vim_account_info["vim_url"]
+ vim_account["org_user"] = vim_account_info["vim_user"]
+ vim_account["vim_uuid"] = vim_account_info["_id"]
+ if vim_account_info["_admin"]["projects_read"]:
+ vim_account["project_id"] = vim_account_info["_admin"]["projects_read"][0]
else:
- vim_account['project_id'] = ''
+ vim_account["project_id"] = ""
- vim_config = vim_account_info['config']
- vim_account['admin_username'] = vim_config['admin_username']
- vim_account['admin_password'] = vim_config['admin_password']
+ vim_config = vim_account_info["config"]
+ vim_account["admin_username"] = vim_config["admin_username"]
+ vim_account["admin_password"] = vim_config["admin_password"]
- if vim_config['orgname'] is not None:
- vim_account['orgname'] = vim_config['orgname']
+ if vim_config["orgname"] is not None:
+ vim_account["orgname"] = vim_config["orgname"]
return vim_account
if client._session:
org_list = client.get_org_list()
for org in org_list.Org:
- if org.get('name') == self.org_name:
- org_uuid = org.get('href').split('/')[-1]
+ if org.get("name") == self.org_name:
+ org_uuid = org.get("href").split("/")[-1]
- url = '{}/api/org/{}'.format(self.vcloud_site, org_uuid)
+ url = "{}/api/org/{}".format(self.vcloud_site, org_uuid)
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
- response = requests.get(url=url,
- headers=headers,
- verify=False)
+ response = requests.get(url=url, headers=headers, verify=False)
- if response.status_code != requests.codes.ok: # pylint: disable=no-member
+ if (
+ response.status_code != requests.codes.ok
+ ): # pylint: disable=no-member
log.info("check_vim_status(): failed to get org details")
else:
org_details = XmlElementTree.fromstring(response.content)
vdc_list = {}
for child in org_details:
- if 'type' in child.attrib:
- if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
- vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+ if "type" in child.attrib:
+ if (
+ child.attrib["type"]
+ == "application/vnd.vmware.vcloud.vdc+xml"
+ ):
+ vdc_list[
+ child.attrib["href"].split("/")[-1:][0]
+ ] = child.attrib["name"]
if vdc_list:
return True
try:
client = self.connect_vim_as_admin()
if client._session:
- url = '{}/api/vApp/vapp-{}'.format(self.vcloud_site, vapp_id)
+ url = "{}/api/vApp/vapp-{}".format(self.vcloud_site, vapp_id)
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
- response = requests.get(url=url,
- headers=headers,
- verify=False)
+ response = requests.get(url=url, headers=headers, verify=False)
- if response.status_code != requests.codes.ok: # pylint: disable=no-member
+ if (
+ response.status_code != requests.codes.ok
+ ): # pylint: disable=no-member
log.info("check_vm_status(): failed to get vApp details")
else:
vapp_details = XmlElementTree.fromstring(response.content)
vm_list = []
for child in vapp_details:
- if child.tag.split("}")[1] == 'Children':
+ if child.tag.split("}")[1] == "Children":
for item in child.getchildren():
vm_list.append(item.attrib)
return vm_list
vim_status = self.check_vim_status()
vim_account_id = self.vim_account_id
vim_project_id = self.vim_project_id
- vim_tags = {
- 'vim_account_id': vim_account_id,
- 'project_id': vim_project_id
- }
- vim_status_metric = Metric(vim_tags, 'vim_status', vim_status)
+ vim_tags = {"vim_account_id": vim_account_id, "project_id": vim_project_id}
+ vim_status_metric = Metric(vim_tags, "vim_status", vim_status)
metrics.append(vim_status_metric)
vnfrs = self.common_db.get_vnfrs(vim_account_id=vim_account_id)
for vnfr in vnfrs:
- nsr_id = vnfr['nsr-id-ref']
- ns_name = self.common_db.get_nsr(nsr_id)['name']
- vnf_member_index = vnfr['member-vnf-index-ref']
- if vnfr['_admin']['projects_read']:
- vnfr_project_id = vnfr['_admin']['projects_read'][0]
+ nsr_id = vnfr["nsr-id-ref"]
+ ns_name = self.common_db.get_nsr(nsr_id)["name"]
+ vnf_member_index = vnfr["member-vnf-index-ref"]
+ if vnfr["_admin"]["projects_read"]:
+ vnfr_project_id = vnfr["_admin"]["projects_read"][0]
else:
- vnfr_project_id = ''
- for vdur in vnfr['vdur']:
- resource_uuid = vdur['vim-id']
+ vnfr_project_id = ""
+ for vdur in vnfr["vdur"]:
+ resource_uuid = vdur["vim-id"]
tags = {
- 'vim_account_id': self.vim_account_id,
- 'resource_uuid': resource_uuid,
- 'nsr_id': nsr_id,
- 'ns_name': ns_name,
- 'vnf_member_index': vnf_member_index,
- 'vdur_name': vdur['name'],
- 'project_id': vnfr_project_id
+ "vim_account_id": self.vim_account_id,
+ "resource_uuid": resource_uuid,
+ "nsr_id": nsr_id,
+ "ns_name": ns_name,
+ "vnf_member_index": vnf_member_index,
+ "vdur_name": vdur["name"],
+ "project_id": vnfr_project_id,
}
try:
vm_list = self.check_vm_status(resource_uuid)
for vm in vm_list:
- if vm['status'] == '4' and vm['deployed'] == 'true':
+ if vm["status"] == "4" and vm["deployed"] == "true":
vm_status = 1
else:
vm_status = 0
- vm_status_metric = Metric(tags, 'vm_status', vm_status)
+ vm_status_metric = Metric(tags, "vm_status", vm_status)
except Exception:
log.exception("VM status is not OK!")
- vm_status_metric = Metric(tags, 'vm_status', 0)
+ vm_status_metric = Metric(tags, "vm_status", 0)
metrics.append(vm_status_metric)
return metrics
VIM_COLLECTORS = {
"openstack": OpenstackCollector,
"vmware": VMwareCollector,
- "vio": VIOCollector
+ "vio": VIOCollector,
}
VIM_INFRA_COLLECTORS = {
"openstack": OpenstackInfraCollector,
"vmware": VMwareInfraCollector,
- "vio": VIOInfraCollector
-}
-SDN_INFRA_COLLECTORS = {
- "onosof": OnosInfraCollector,
- "onos_vpls": OnosInfraCollector
+ "vio": VIOInfraCollector,
}
+SDN_INFRA_COLLECTORS = {"onosof": OnosInfraCollector, "onos_vpls": OnosInfraCollector}
class CollectorService:
-
def __init__(self, config: Config):
self.conf = config
self.common_db = CommonDbClient(self.conf)
def _get_vim_type(conf: Config, vim_account_id: str) -> str:
common_db = CommonDbClient(conf)
vim_account = common_db.get_vim_account(vim_account_id)
- vim_type = vim_account['vim_type']
- if 'config' in vim_account and 'vim_type' in vim_account['config']:
- vim_type = vim_account['config']['vim_type'].lower()
- if vim_type == 'vio' and 'vrops_site' not in vim_account['config']:
- vim_type = 'openstack'
+ vim_type = vim_account["vim_type"]
+ if "config" in vim_account and "vim_type" in vim_account["config"]:
+ vim_type = vim_account["config"]["vim_type"].lower()
+ if vim_type == "vio" and "vrops_site" not in vim_account["config"]:
+ vim_type = "openstack"
return vim_type
@staticmethod
log.info("Collecting sdnc metrics")
metrics = []
common_db = CommonDbClient(conf)
- sdn_type = common_db.get_sdnc(sdnc_id)['type']
+ sdn_type = common_db.get_sdnc(sdnc_id)["type"]
if sdn_type in SDN_INFRA_COLLECTORS:
collector = SDN_INFRA_COLLECTORS[sdn_type](conf, sdnc_id)
metrics = collector.collect()
@staticmethod
def _stop_process_pool(executor):
- log.info('Shutting down process pool')
+ log.info("Shutting down process pool")
try:
- log.debug('Stopping residual processes in the process pool')
+ log.debug("Stopping residual processes in the process pool")
for pid, process in executor._processes.items():
if process.is_alive():
process.terminate()
try:
# Shutting down executor
- log.debug('Shutting down process pool executor')
+ log.debug("Shutting down process pool executor")
executor.shutdown()
except RuntimeError as e:
- log.info('RuntimeError in shutting down executer')
- log.debug('RuntimeError %s' % (e))
+ log.info("RuntimeError in shutting down executer")
+ log.debug("RuntimeError %s" % (e))
return
def collect_metrics(self) -> List[Metric]:
start_time = time.time()
# Starting executor pool with pool size process_pool_size. Default process_pool_size is 20
- with concurrent.futures.ProcessPoolExecutor(self.conf.get('collector', 'process_pool_size')) as executor:
- log.info('Started metric collector process pool with pool size %s' % (self.conf.get('collector',
- 'process_pool_size')))
+ with concurrent.futures.ProcessPoolExecutor(
+ self.conf.get("collector", "process_pool_size")
+ ) as executor:
+ log.info(
+ "Started metric collector process pool with pool size %s"
+ % (self.conf.get("collector", "process_pool_size"))
+ )
futures = []
for vnfr in vnfrs:
- nsr_id = vnfr['nsr-id-ref']
- vnf_member_index = vnfr['member-vnf-index-ref']
- vim_account_id = self.common_db.get_vim_account_id(nsr_id, vnf_member_index)
- futures.append(executor.submit(CollectorService._collect_vim_metrics, self.conf, vnfr, vim_account_id))
- futures.append(executor.submit(CollectorService._collect_vca_metrics, self.conf, vnfr))
+ nsr_id = vnfr["nsr-id-ref"]
+ vnf_member_index = vnfr["member-vnf-index-ref"]
+ vim_account_id = self.common_db.get_vim_account_id(
+ nsr_id, vnf_member_index
+ )
+ futures.append(
+ executor.submit(
+ CollectorService._collect_vim_metrics,
+ self.conf,
+ vnfr,
+ vim_account_id,
+ )
+ )
+ futures.append(
+ executor.submit(
+ CollectorService._collect_vca_metrics, self.conf, vnfr
+ )
+ )
vims = self.common_db.get_vim_accounts()
for vim in vims:
- futures.append(executor.submit(CollectorService._collect_vim_infra_metrics, self.conf, vim['_id']))
+ futures.append(
+ executor.submit(
+ CollectorService._collect_vim_infra_metrics,
+ self.conf,
+ vim["_id"],
+ )
+ )
sdncs = self.common_db.get_sdncs()
for sdnc in sdncs:
- futures.append(executor.submit(CollectorService._collect_sdnc_infra_metrics, self.conf, sdnc['_id']))
+ futures.append(
+ executor.submit(
+ CollectorService._collect_sdnc_infra_metrics,
+ self.conf,
+ sdnc["_id"],
+ )
+ )
try:
# Wait for future calls to complete till process_execution_timeout. Default is 50 seconds
- for future in concurrent.futures.as_completed(futures, self.conf.get('collector',
- 'process_execution_timeout')):
- result = future.result(timeout=int(self.conf.get('collector',
- 'process_execution_timeout')))
+ for future in concurrent.futures.as_completed(
+ futures, self.conf.get("collector", "process_execution_timeout")
+ ):
+ result = future.result(
+ timeout=int(
+ self.conf.get("collector", "process_execution_timeout")
+ )
+ )
metrics.extend(result)
- log.debug('result = %s' % (result))
+ log.debug("result = %s" % (result))
except concurrent.futures.TimeoutError as e:
# Some processes have not completed due to timeout error
- log.info('Some processes have not finished due to TimeoutError exception')
- log.debug('concurrent.futures.TimeoutError exception %s' % (e))
+ log.info(
+ "Some processes have not finished due to TimeoutError exception"
+ )
+ log.debug("concurrent.futures.TimeoutError exception %s" % (e))
# Shutting down process pool executor
CollectorService._stop_process_pool(executor)
class OpenstackUtils:
-
@staticmethod
def get_session(creds: dict):
verify_ssl = True
- project_domain_name = 'Default'
- user_domain_name = 'Default'
- if 'config' in creds:
- vim_config = creds['config']
- if 'insecure' in vim_config and vim_config['insecure']:
+ project_domain_name = "Default"
+ user_domain_name = "Default"
+ if "config" in creds:
+ vim_config = creds["config"]
+ if "insecure" in vim_config and vim_config["insecure"]:
verify_ssl = False
- if 'ca_cert' in vim_config:
- verify_ssl = vim_config['ca_cert']
- if 'project_domain_name' in vim_config:
- project_domain_name = vim_config['project_domain_name']
- if 'user_domain_name' in vim_config:
- user_domain_name = vim_config['user_domain_name']
- auth = v3.Password(auth_url=creds['vim_url'],
- username=creds['vim_user'],
- password=creds['vim_password'],
- project_name=creds['vim_tenant_name'],
- project_domain_name=project_domain_name,
- user_domain_name=user_domain_name)
+ if "ca_cert" in vim_config:
+ verify_ssl = vim_config["ca_cert"]
+ if "project_domain_name" in vim_config:
+ project_domain_name = vim_config["project_domain_name"]
+ if "user_domain_name" in vim_config:
+ user_domain_name = vim_config["user_domain_name"]
+ auth = v3.Password(
+ auth_url=creds["vim_url"],
+ username=creds["vim_user"],
+ password=creds["vim_password"],
+ project_name=creds["vim_tenant_name"],
+ project_domain_name=project_domain_name,
+ user_domain_name=user_domain_name,
+ )
return session.Session(auth=auth, verify=verify_ssl, timeout=10)
"cpu_utilization": "cpu",
}
-METRIC_MULTIPLIERS = {
- "cpu": 0.0000001
-}
+METRIC_MULTIPLIERS = {"cpu": 0.0000001}
-METRIC_AGGREGATORS = {
- "cpu": "rate:mean"
-}
+METRIC_AGGREGATORS = {"cpu": "rate:mean"}
-INTERFACE_METRICS = ['packets_in_dropped', 'packets_out_dropped', 'packets_received', 'packets_sent']
+INTERFACE_METRICS = [
+ "packets_in_dropped",
+ "packets_out_dropped",
+ "packets_received",
+ "packets_sent",
+]
class MetricType(Enum):
- INSTANCE = 'instance'
- INTERFACE_ALL = 'interface_all'
- INTERFACE_ONE = 'interface_one'
+ INSTANCE = "instance"
+ INTERFACE_ALL = "interface_all"
+ INTERFACE_ONE = "interface_one"
class OpenstackCollector(BaseVimCollector):
sess = OpenstackUtils.get_session(vim_account)
return keystone_client.Client(session=sess)
- def _get_resource_uuid(self, nsr_id: str, vnf_member_index: str, vdur_name: str) -> str:
+ def _get_resource_uuid(
+ self, nsr_id: str, vnf_member_index: str, vdur_name: str
+ ) -> str:
vdur = self.common_db.get_vdur(nsr_id, vnf_member_index, vdur_name)
- return vdur['vim-id']
+ return vdur["vim-id"]
def collect(self, vnfr: dict) -> List[Metric]:
- nsr_id = vnfr['nsr-id-ref']
- vnf_member_index = vnfr['member-vnf-index-ref']
- vnfd = self.common_db.get_vnfd(vnfr['vnfd-id'])
+ nsr_id = vnfr["nsr-id-ref"]
+ vnf_member_index = vnfr["member-vnf-index-ref"]
+ vnfd = self.common_db.get_vnfd(vnfr["vnfd-id"])
# Populate extra tags for metrics
tags = {}
- tags['ns_name'] = self.common_db.get_nsr(nsr_id)['name']
- if vnfr['_admin']['projects_read']:
- tags['project_id'] = vnfr['_admin']['projects_read'][0]
+ tags["ns_name"] = self.common_db.get_nsr(nsr_id)["name"]
+ if vnfr["_admin"]["projects_read"]:
+ tags["project_id"] = vnfr["_admin"]["projects_read"][0]
else:
- tags['project_id'] = ''
+ tags["project_id"] = ""
metrics = []
- for vdur in vnfr['vdur']:
+ for vdur in vnfr["vdur"]:
# This avoids errors when vdur records have not been completely filled
- if 'name' not in vdur:
+ if "name" not in vdur:
continue
- vdu = next(
- filter(lambda vdu: vdu['id'] == vdur['vdu-id-ref'], vnfd['vdu'])
- )
- if 'monitoring-parameter' in vdu:
- for param in vdu['monitoring-parameter']:
- metric_name = param['performance-metric']
+ vdu = next(filter(lambda vdu: vdu["id"] == vdur["vdu-id-ref"], vnfd["vdu"]))
+ if "monitoring-parameter" in vdu:
+ for param in vdu["monitoring-parameter"]:
+ metric_name = param["performance-metric"]
openstack_metric_name = METRIC_MAPPINGS[metric_name]
metric_type = self._get_metric_type(metric_name)
try:
- resource_id = self._get_resource_uuid(nsr_id, vnf_member_index, vdur['name'])
+ resource_id = self._get_resource_uuid(
+ nsr_id, vnf_member_index, vdur["name"]
+ )
except ValueError:
log.warning(
"Could not find resource_uuid for vdur %s, vnf_member_index %s, nsr_id %s. "
"Was it recently deleted?",
- vdur['name'], vnf_member_index, nsr_id)
+ vdur["name"],
+ vnf_member_index,
+ nsr_id,
+ )
continue
try:
log.info(
"Collecting metric type: %s and metric_name: %s and resource_id %s and ",
metric_type,
metric_name,
- resource_id)
- value = self.backend.collect_metric(metric_type, openstack_metric_name, resource_id)
+ resource_id,
+ )
+ value = self.backend.collect_metric(
+ metric_type, openstack_metric_name, resource_id
+ )
if value is not None:
log.info("value: %s", value)
- metric = VnfMetric(nsr_id, vnf_member_index, vdur['name'], metric_name, value, tags)
+ metric = VnfMetric(
+ nsr_id,
+ vnf_member_index,
+ vdur["name"],
+ metric_name,
+ value,
+ tags,
+ )
metrics.append(metric)
else:
log.info("metric value is empty")
except Exception as e:
- log.exception("Error collecting metric %s for vdu %s" % (metric_name, vdur['name']))
+ log.exception(
+ "Error collecting metric %s for vdu %s"
+ % (metric_name, vdur["name"])
+ )
log.info("Error in metric collection: %s" % e)
return metrics
class OpenstackBackend:
- def collect_metric(self, metric_type: MetricType, metric_name: str, resource_id: str):
+ def collect_metric(
+ self, metric_type: MetricType, metric_name: str, resource_id: str
+ ):
pass
class GnocchiBackend(OpenstackBackend):
-
def __init__(self, vim_account: dict):
self.client = self._build_gnocchi_client(vim_account)
self.neutron = self._build_neutron_client(vim_account)
sess = OpenstackUtils.get_session(vim_account)
return neutron_client.Client(session=sess)
- def collect_metric(self, metric_type: MetricType, metric_name: str, resource_id: str):
+ def collect_metric(
+ self, metric_type: MetricType, metric_name: str, resource_id: str
+ ):
if metric_type == MetricType.INTERFACE_ALL:
return self._collect_interface_all_metric(metric_name, resource_id)
return self._collect_instance_metric(metric_name, resource_id)
else:
- raise Exception('Unknown metric type %s' % metric_type.value)
+ raise Exception("Unknown metric type %s" % metric_type.value)
def _collect_interface_all_metric(self, openstack_metric_name, resource_id):
total_measure = None
- interfaces = self.client.resource.search(resource_type='instance_network_interface',
- query={'=': {'instance_id': resource_id}})
+ interfaces = self.client.resource.search(
+ resource_type="instance_network_interface",
+ query={"=": {"instance_id": resource_id}},
+ )
for interface in interfaces:
try:
- measures = self.client.metric.get_measures(openstack_metric_name,
- resource_id=interface['id'],
- limit=1)
+ measures = self.client.metric.get_measures(
+ openstack_metric_name, resource_id=interface["id"], limit=1
+ )
if measures:
if not total_measure:
total_measure = 0.0
except (gnocchiclient.exceptions.NotFound, TypeError) as e:
# Gnocchi in some Openstack versions raise TypeError instead of NotFound
- log.debug("No metric %s found for interface %s: %s", openstack_metric_name,
- interface['id'], e)
+ log.debug(
+ "No metric %s found for interface %s: %s",
+ openstack_metric_name,
+ interface["id"],
+ e,
+ )
return total_measure
def _collect_instance_metric(self, openstack_metric_name, resource_id):
aggregation = METRIC_AGGREGATORS.get(openstack_metric_name)
try:
- measures = self.client.metric.get_measures(openstack_metric_name,
- aggregation=aggregation,
- start=time.time() - 1200,
- resource_id=resource_id)
+ measures = self.client.metric.get_measures(
+ openstack_metric_name,
+ aggregation=aggregation,
+ start=time.time() - 1200,
+ resource_id=resource_id,
+ )
if measures:
value = measures[-1][2]
- except (gnocchiclient.exceptions.NotFound, gnocchiclient.exceptions.BadRequest, TypeError) as e:
+ except (
+ gnocchiclient.exceptions.NotFound,
+ gnocchiclient.exceptions.BadRequest,
+ TypeError,
+ ) as e:
# CPU metric in previous Openstack versions do not support rate:mean aggregation method
# Gnocchi in some Openstack versions raise TypeError instead of NotFound or BadRequest
if openstack_metric_name == "cpu":
- log.debug("No metric %s found for instance %s: %s", openstack_metric_name, resource_id, e)
- log.info("Retrying to get metric %s for instance %s without aggregation",
- openstack_metric_name, resource_id)
- measures = self.client.metric.get_measures(openstack_metric_name,
- resource_id=resource_id,
- limit=1)
+ log.debug(
+ "No metric %s found for instance %s: %s",
+ openstack_metric_name,
+ resource_id,
+ e,
+ )
+ log.info(
+ "Retrying to get metric %s for instance %s without aggregation",
+ openstack_metric_name,
+ resource_id,
+ )
+ measures = self.client.metric.get_measures(
+ openstack_metric_name, resource_id=resource_id, limit=1
+ )
else:
raise e
# measures[-1] is the last measure
if openstack_metric_name in METRIC_MULTIPLIERS:
value = value * METRIC_MULTIPLIERS[openstack_metric_name]
except gnocchiclient.exceptions.NotFound as e:
- log.debug("No metric %s found for instance %s: %s", openstack_metric_name, resource_id,
- e)
+ log.debug(
+ "No metric %s found for instance %s: %s",
+ openstack_metric_name,
+ resource_id,
+ e,
+ )
return value
sess = OpenstackUtils.get_session(vim_account)
return ceilometer_client.Client("2", session=sess)
- def collect_metric(self, metric_type: MetricType, metric_name: str, resource_id: str):
+ def collect_metric(
+ self, metric_type: MetricType, metric_name: str, resource_id: str
+ ):
if metric_type != MetricType.INSTANCE:
- raise NotImplementedError('Ceilometer backend only support instance metrics')
- measures = self.client.samples.list(meter_name=metric_name, limit=1, q=[
- {'field': 'resource_id', 'op': 'eq', 'value': resource_id}])
+ raise NotImplementedError(
+ "Ceilometer backend only support instance metrics"
+ )
+ measures = self.client.samples.list(
+ meter_name=metric_name,
+ limit=1,
+ q=[{"field": "resource_id", "op": "eq", "value": resource_id}],
+ )
return measures[0].counter_volume if measures else None
super().__init__(config, vim_account_id)
self.common_db = CommonDbClient(config)
cfg = self.get_vim_account(vim_account_id)
- self.vrops = vROPS_Helper(vrops_site=cfg['vrops_site'],
- vrops_user=cfg['vrops_user'],
- vrops_password=cfg['vrops_password'])
+ self.vrops = vROPS_Helper(
+ vrops_site=cfg["vrops_site"],
+ vrops_user=cfg["vrops_user"],
+ vrops_password=cfg["vrops_password"],
+ )
def get_vim_account(self, vim_account_id: str):
vim_account_info = self.common_db.get_vim_account(vim_account_id)
- return vim_account_info['config']
+ return vim_account_info["config"]
def collect(self, vnfr: dict):
- vnfd = self.common_db.get_vnfd(vnfr['vnfd-id'])
+ vnfd = self.common_db.get_vnfd(vnfr["vnfd-id"])
vdu_mappings = {}
# Populate extra tags for metrics
- nsr_id = vnfr['nsr-id-ref']
+ nsr_id = vnfr["nsr-id-ref"]
tags = {}
- tags['ns_name'] = self.common_db.get_nsr(nsr_id)['name']
- if vnfr['_admin']['projects_read']:
- tags['project_id'] = vnfr['_admin']['projects_read'][0]
+ tags["ns_name"] = self.common_db.get_nsr(nsr_id)["name"]
+ if vnfr["_admin"]["projects_read"]:
+ tags["project_id"] = vnfr["_admin"]["projects_read"][0]
else:
- tags['project_id'] = ''
+ tags["project_id"] = ""
# Fetch the list of all known resources from vROPS.
resource_list = self.vrops.get_vm_resource_list_from_vrops()
- for vdur in vnfr['vdur']:
+ for vdur in vnfr["vdur"]:
# This avoids errors when vdur records have not been completely filled
- if 'name' not in vdur:
+ if "name" not in vdur:
continue
- vdu = next(
- filter(lambda vdu: vdu['id'] == vdur['vdu-id-ref'], vnfd['vdu'])
- )
- if 'monitoring-parameter' not in vdu:
+ vdu = next(filter(lambda vdu: vdu["id"] == vdur["vdu-id-ref"], vnfd["vdu"]))
+ if "monitoring-parameter" not in vdu:
continue
- vim_id = vdur['vim-id']
- vdu_mappings[vim_id] = {'name': vdur['name']}
+ vim_id = vdur["vim-id"]
+ vdu_mappings[vim_id] = {"name": vdur["name"]}
# Map the vROPS instance id to the vim-id so we can look it up.
for resource in resource_list:
- for resourceIdentifier in resource['resourceKey']['resourceIdentifiers']:
- if resourceIdentifier['identifierType']['name'] == 'VMEntityInstanceUUID':
- if resourceIdentifier['value'] != vim_id:
+ for resourceIdentifier in resource["resourceKey"][
+ "resourceIdentifiers"
+ ]:
+ if (
+ resourceIdentifier["identifierType"]["name"]
+ == "VMEntityInstanceUUID"
+ ):
+ if resourceIdentifier["value"] != vim_id:
continue
- vdu_mappings[vim_id]['vrops_id'] = resource['identifier']
+ vdu_mappings[vim_id]["vrops_id"] = resource["identifier"]
if len(vdu_mappings) != 0:
- return self.vrops.get_metrics(vdu_mappings=vdu_mappings,
- monitoring_params=vdu['monitoring-parameter'],
- vnfr=vnfr,
- tags=tags
- )
+ return self.vrops.get_metrics(
+ vdu_mappings=vdu_mappings,
+ monitoring_params=vdu["monitoring-parameter"],
+ vnfr=vnfr,
+ tags=tags,
+ )
else:
return []
log = logging.getLogger(__name__)
-API_VERSION = '27.0'
+API_VERSION = "27.0"
class VMwareCollector(BaseVimCollector):
super().__init__(config, vim_account_id)
self.common_db = CommonDbClient(config)
vim_account = self.get_vim_account(vim_account_id)
- self.vcloud_site = vim_account['vim_url']
- self.admin_username = vim_account['admin_username']
- self.admin_password = vim_account['admin_password']
- self.vrops = vROPS_Helper(vrops_site=vim_account['vrops_site'],
- vrops_user=vim_account['vrops_user'],
- vrops_password=vim_account['vrops_password'])
+ self.vcloud_site = vim_account["vim_url"]
+ self.admin_username = vim_account["admin_username"]
+ self.admin_password = vim_account["admin_password"]
+ self.vrops = vROPS_Helper(
+ vrops_site=vim_account["vrops_site"],
+ vrops_user=vim_account["vrops_user"],
+ vrops_password=vim_account["vrops_password"],
+ )
def connect_as_admin(self):
- """ Method connect as pvdc admin user to vCloud director.
- There are certain action that can be done only by provider vdc admin user.
- Organization creation / provider network creation etc.
+ """Method connect as pvdc admin user to vCloud director.
+ There are certain action that can be done only by provider vdc admin user.
+ Organization creation / provider network creation etc.
- Returns:
- The return client object that letter can be used to connect to vcloud direct as admin for provider vdc
+ Returns:
+ The return client object that letter can be used to connect to vcloud direct as admin for provider vdc
"""
log.debug("Logging into vCD org as admin.")
host = self.vcloud_site
admin_user = self.admin_username
admin_passwd = self.admin_password
- org = 'System'
+ org = "System"
client = Client(host, verify_ssl_certs=False)
client.set_highest_supported_version()
- client.set_credentials(BasicLoginCredentials(admin_user, org,
- admin_passwd))
+ client.set_credentials(BasicLoginCredentials(admin_user, org, admin_passwd))
return client
except Exception as e:
- log.error("Can't connect to a vCloud director as: {} with exception {}".format(admin_user, e))
+ log.error(
+ "Can't connect to a vCloud director as: {} with exception {}".format(
+ admin_user, e
+ )
+ )
def get_vim_account(self, vim_account_id: str):
"""
- Method to get VIM account details by its ID
- arg - VIM ID
- return - dict with vim account details
+ Method to get VIM account details by its ID
+ arg - VIM ID
+ return - dict with vim account details
"""
vim_account = {}
vim_account_info = self.common_db.get_vim_account(vim_account_id)
- vim_account['vim_url'] = vim_account_info['vim_url']
+ vim_account["vim_url"] = vim_account_info["vim_url"]
- vim_config = vim_account_info['config']
- vim_account['admin_username'] = vim_config['admin_username']
- vim_account['admin_password'] = vim_config['admin_password']
- vim_account['vrops_site'] = vim_config['vrops_site']
- vim_account['vrops_user'] = vim_config['vrops_user']
- vim_account['vrops_password'] = vim_config['vrops_password']
+ vim_config = vim_account_info["config"]
+ vim_account["admin_username"] = vim_config["admin_username"]
+ vim_account["admin_password"] = vim_config["admin_password"]
+ vim_account["vrops_site"] = vim_config["vrops_site"]
+ vim_account["vrops_user"] = vim_config["vrops_user"]
+ vim_account["vrops_password"] = vim_config["vrops_password"]
return vim_account
def get_vm_moref_id(self, vapp_uuid):
"""
- Method to get the moref_id of given VM
- arg - vapp_uuid
- return - VM mored_id
+ Method to get the moref_id of given VM
+ arg - vapp_uuid
+ return - VM mored_id
"""
vm_moref_id = None
try:
if vm_details and "vm_vcenter_info" in vm_details:
vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
- log.debug("Found vm_moref_id: {} for vApp UUID: {}".format(vm_moref_id, vapp_uuid))
+ log.debug(
+ "Found vm_moref_id: {} for vApp UUID: {}".format(
+ vm_moref_id, vapp_uuid
+ )
+ )
else:
- log.error("Failed to find vm_moref_id from vApp UUID: {}".format(vapp_uuid))
+ log.error(
+ "Failed to find vm_moref_id from vApp UUID: {}".format(
+ vapp_uuid
+ )
+ )
except Exception as exp:
- log.warning("Error occurred while getting VM moref ID for VM: {}\n{}".format(exp, traceback.format_exc()))
+ log.warning(
+ "Error occurred while getting VM moref ID for VM: {}\n{}".format(
+ exp, traceback.format_exc()
+ )
+ )
return vm_moref_id
log.error("Failed to connect to vCD")
return parsed_respond
- url_list = [self.vcloud_site, '/api/vApp/vapp-', vapp_uuid]
- get_vapp_restcall = ''.join(url_list)
+ url_list = [self.vcloud_site, "/api/vApp/vapp-", vapp_uuid]
+ get_vapp_restcall = "".join(url_list)
if vca._session:
- headers = {'Accept': 'application/*+xml;version=' + API_VERSION,
- 'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
- response = requests.get(get_vapp_restcall,
- headers=headers,
- verify=False)
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": vca._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = requests.get(get_vapp_restcall, headers=headers, verify=False)
if response.status_code != 200:
- log.error("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
- response.content))
+ log.error(
+ "REST API call {} failed. Return status code {}".format(
+ get_vapp_restcall, response.content
+ )
+ )
return parsed_respond
try:
xmlroot_respond = XmlElementTree.fromstring(response.content)
- namespaces = {'vm': 'http://www.vmware.com/vcloud/v1.5',
- "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
- "xmlns": "http://www.vmware.com/vcloud/v1.5"}
+ namespaces = {
+ "vm": "http://www.vmware.com/vcloud/v1.5",
+ "vmext": "http://www.vmware.com/vcloud/extension/v1.5",
+ "xmlns": "http://www.vmware.com/vcloud/v1.5",
+ }
# parse children section for other attrib
- children_section = xmlroot_respond.find('vm:Children/', namespaces)
+ children_section = xmlroot_respond.find("vm:Children/", namespaces)
if children_section is not None:
- vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+ vCloud_extension_section = children_section.find(
+ "xmlns:VCloudExtension", namespaces
+ )
if vCloud_extension_section is not None:
vm_vcenter_info = {}
- vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
- vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+ vim_info = vCloud_extension_section.find(
+ "vmext:VmVimInfo", namespaces
+ )
+ vmext = vim_info.find("vmext:VmVimObjectRef", namespaces)
if vmext is not None:
- vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+ vm_vcenter_info["vm_moref_id"] = vmext.find(
+ "vmext:MoRef", namespaces
+ ).text
parsed_respond["vm_vcenter_info"] = vm_vcenter_info
except Exception as exp:
- log.warning("Error occurred for getting vApp details: {}\n{}".format(exp,
- traceback.format_exc()))
+ log.warning(
+ "Error occurred for getting vApp details: {}\n{}".format(
+ exp, traceback.format_exc()
+ )
+ )
return parsed_respond
def collect(self, vnfr: dict):
- vnfd = self.common_db.get_vnfd(vnfr['vnfd-id'])
+ vnfd = self.common_db.get_vnfd(vnfr["vnfd-id"])
vdu_mappings = {}
# Populate extra tags for metrics
- nsr_id = vnfr['nsr-id-ref']
+ nsr_id = vnfr["nsr-id-ref"]
tags = {}
- tags['ns_name'] = self.common_db.get_nsr(nsr_id)['name']
- if vnfr['_admin']['projects_read']:
- tags['project_id'] = vnfr['_admin']['projects_read'][0]
+ tags["ns_name"] = self.common_db.get_nsr(nsr_id)["name"]
+ if vnfr["_admin"]["projects_read"]:
+ tags["project_id"] = vnfr["_admin"]["projects_read"][0]
else:
- tags['project_id'] = ''
+ tags["project_id"] = ""
# Fetch the list of all known resources from vROPS.
resource_list = self.vrops.get_vm_resource_list_from_vrops()
- for vdur in vnfr['vdur']:
+ for vdur in vnfr["vdur"]:
# This avoids errors when vdur records have not been completely filled
- if 'name' not in vdur:
+ if "name" not in vdur:
continue
- vdu = next(
- filter(lambda vdu: vdu['id'] == vdur['vdu-id-ref'], vnfd['vdu'])
- )
+ vdu = next(filter(lambda vdu: vdu["id"] == vdur["vdu-id-ref"], vnfd["vdu"]))
- if 'monitoring-parameter' not in vdu:
+ if "monitoring-parameter" not in vdu:
continue
- resource_uuid = vdur['vim-id']
+ resource_uuid = vdur["vim-id"]
# Find vm_moref_id from vApp uuid in vCD
vim_id = self.get_vm_moref_id(resource_uuid)
if vim_id is None:
- log.debug("Failed to find vROPS ID for vApp in vCD: {}".format(resource_uuid))
+ log.debug(
+ "Failed to find vROPS ID for vApp in vCD: {}".format(resource_uuid)
+ )
continue
- vdu_mappings[vim_id] = {'name': vdur['name']}
+ vdu_mappings[vim_id] = {"name": vdur["name"]}
# Map the vROPS instance id to the vim-id so we can look it up.
for resource in resource_list:
- for resourceIdentifier in resource['resourceKey']['resourceIdentifiers']:
- if resourceIdentifier['identifierType']['name'] == 'VMEntityObjectID':
- if resourceIdentifier['value'] != vim_id:
+ for resourceIdentifier in resource["resourceKey"][
+ "resourceIdentifiers"
+ ]:
+ if (
+ resourceIdentifier["identifierType"]["name"]
+ == "VMEntityObjectID"
+ ):
+ if resourceIdentifier["value"] != vim_id:
continue
- vdu_mappings[vim_id]['vrops_id'] = resource['identifier']
+ vdu_mappings[vim_id]["vrops_id"] = resource["identifier"]
if len(vdu_mappings) != 0:
- return self.vrops.get_metrics(vdu_mappings=vdu_mappings,
- monitoring_params=vdu['monitoring-parameter'],
- vnfr=vnfr,
- tags=tags
- )
+ return self.vrops.get_metrics(
+ vdu_mappings=vdu_mappings,
+ monitoring_params=vdu["monitoring-parameter"],
+ vnfr=vnfr,
+ tags=tags,
+ )
else:
return []
"disk_read_bytes": 1024,
"disk_write_bytes": 1024,
"packets_received": 1024,
- "packets_sent": 1024
+ "packets_sent": 1024,
}
-class vROPS_Helper():
-
- def __init__(self,
- vrops_site='https://vrops',
- vrops_user='',
- vrops_password=''):
+class vROPS_Helper:
+ def __init__(self, vrops_site="https://vrops", vrops_user="", vrops_password=""):
self.vrops_site = vrops_site
self.vrops_user = vrops_user
self.vrops_password = vrops_password
def get_vrops_token(self):
- """Fetches token from vrops
- """
- auth_url = '/suite-api/api/auth/token/acquire'
- headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ """Fetches token from vrops"""
+ auth_url = "/suite-api/api/auth/token/acquire"
+ headers = {"Content-Type": "application/json", "Accept": "application/json"}
req_body = {"username": self.vrops_user, "password": self.vrops_password}
- resp = requests.post(self.vrops_site + auth_url,
- json=req_body, verify=False,
- headers=headers)
+ resp = requests.post(
+ self.vrops_site + auth_url, json=req_body, verify=False, headers=headers
+ )
if resp.status_code != 200:
- log.error("Failed to get token from vROPS: {} {}".format(resp.status_code,
- resp.content))
+ log.error(
+ "Failed to get token from vROPS: {} {}".format(
+ resp.status_code, resp.content
+ )
+ )
return None
- resp_data = json.loads(resp.content.decode('utf-8'))
- return resp_data['token']
+ resp_data = json.loads(resp.content.decode("utf-8"))
+ return resp_data["token"]
def get_vm_resource_list_from_vrops(self):
- """ Find all known resource IDs in vROPs
- """
+ """Find all known resource IDs in vROPs"""
auth_token = self.get_vrops_token()
- api_url = '/suite-api/api/resources?resourceKind=VirtualMachine'
- headers = {'Accept': 'application/json', 'Authorization': 'vRealizeOpsToken {}'.format(auth_token)}
+ api_url = "/suite-api/api/resources?resourceKind=VirtualMachine"
+ headers = {
+ "Accept": "application/json",
+ "Authorization": "vRealizeOpsToken {}".format(auth_token),
+ }
resource_list = []
- resp = requests.get(self.vrops_site + api_url,
- verify=False, headers=headers)
+ resp = requests.get(self.vrops_site + api_url, verify=False, headers=headers)
if resp.status_code != 200:
- log.error("Failed to get resource list from vROPS: {} {}".format(resp.status_code,
- resp.content))
+ log.error(
+ "Failed to get resource list from vROPS: {} {}".format(
+ resp.status_code, resp.content
+ )
+ )
return resource_list
try:
- resp_data = json.loads(resp.content.decode('utf-8'))
- if resp_data.get('resourceList') is not None:
- resource_list = resp_data.get('resourceList')
+ resp_data = json.loads(resp.content.decode("utf-8"))
+ if resp_data.get("resourceList") is not None:
+ resource_list = resp_data.get("resourceList")
except Exception as exp:
- log.error("get_vm_resource_id: Error in parsing {}\n{}".format(exp, traceback.format_exc()))
+ log.error(
+ "get_vm_resource_id: Error in parsing {}\n{}".format(
+ exp, traceback.format_exc()
+ )
+ )
return resource_list
- def get_metrics(self,
- vdu_mappings={},
- monitoring_params={},
- vnfr=None,
- tags={}):
+ def get_metrics(self, vdu_mappings={}, monitoring_params={}, vnfr=None, tags={}):
monitoring_keys = {}
# Collect the names of all the metrics we need to query
for metric_entry in monitoring_params:
- metric_name = metric_entry['performance-metric']
+ metric_name = metric_entry["performance-metric"]
if metric_name not in METRIC_MAPPINGS:
log.debug("Metric {} not supported, ignoring".format(metric_name))
continue
sanitized_vdu_mappings = copy.deepcopy(vdu_mappings)
for key in vdu_mappings.keys():
vdu = vdu_mappings[key]
- if 'vrops_id' not in vdu:
+ if "vrops_id" not in vdu:
log.info("Could not find vROPS id for vdu {}".format(vdu))
del sanitized_vdu_mappings[key]
continue
- resource_ids += "&resourceId={}".format(vdu['vrops_id'])
+ resource_ids += "&resourceId={}".format(vdu["vrops_id"])
vdu_mappings = sanitized_vdu_mappings
try:
# Now we can make a single call to vROPS to collect all relevant metrics for resources we need to monitor
- api_url = "/suite-api/api/resources/stats?IntervalType=MINUTES&IntervalCount=1"\
+ api_url = (
+ "/suite-api/api/resources/stats?IntervalType=MINUTES&IntervalCount=1"
"&rollUpType=MAX¤tOnly=true{}{}".format(stats_key, resource_ids)
+ )
auth_token = self.get_vrops_token()
- headers = {'Accept': 'application/json', 'Authorization': 'vRealizeOpsToken {}'.format(auth_token)}
+ headers = {
+ "Accept": "application/json",
+ "Authorization": "vRealizeOpsToken {}".format(auth_token),
+ }
- resp = requests.get(self.vrops_site + api_url,
- verify=False, headers=headers)
+ resp = requests.get(
+ self.vrops_site + api_url, verify=False, headers=headers
+ )
if resp.status_code != 200:
- log.error("Failed to get Metrics data from vROPS for {} {}".format(resp.status_code,
- resp.content))
+ log.error(
+ "Failed to get Metrics data from vROPS for {} {}".format(
+ resp.status_code, resp.content
+ )
+ )
return metrics
- m_data = json.loads(resp.content.decode('utf-8'))
- if 'values' not in m_data:
+ m_data = json.loads(resp.content.decode("utf-8"))
+ if "values" not in m_data:
return metrics
- statistics = m_data['values']
+ statistics = m_data["values"]
for vdu_stat in statistics:
- vrops_id = vdu_stat['resourceId']
+ vrops_id = vdu_stat["resourceId"]
vdu_name = None
for vdu in vdu_mappings.values():
- if vdu['vrops_id'] == vrops_id:
- vdu_name = vdu['name']
+ if vdu["vrops_id"] == vrops_id:
+ vdu_name = vdu["name"]
if vdu_name is None:
continue
- for item in vdu_stat['stat-list']['stat']:
- reported_metric = item['statKey']['key']
+ for item in vdu_stat["stat-list"]["stat"]:
+ reported_metric = item["statKey"]["key"]
if reported_metric not in METRIC_MAPPINGS.values():
continue
# Convert the vROPS metric name back to OSM key
- metric_name = list(METRIC_MAPPINGS.keys())[list(METRIC_MAPPINGS.values()).
- index(reported_metric)]
+ metric_name = list(METRIC_MAPPINGS.keys())[
+ list(METRIC_MAPPINGS.values()).index(reported_metric)
+ ]
if metric_name in monitoring_keys.keys():
- metric_value = item['data'][-1]
+ metric_value = item["data"][-1]
if metric_name in METRIC_MULTIPLIERS:
metric_value *= METRIC_MULTIPLIERS[metric_name]
- metric = VnfMetric(vnfr['nsr-id-ref'],
- vnfr['member-vnf-index-ref'],
- vdu_name,
- metric_name,
- metric_value,
- tags
- )
+ metric = VnfMetric(
+ vnfr["nsr-id-ref"],
+ vnfr["member-vnf-index-ref"],
+ vdu_name,
+ metric_name,
+ metric_value,
+ tags,
+ )
metrics.append(metric)
except Exception as exp:
- log.error("Exception while parsing metrics data from vROPS {}\n{}".format(exp, traceback.format_exc()))
+ log.error(
+ "Exception while parsing metrics data from vROPS {}\n{}".format(
+ exp, traceback.format_exc()
+ )
+ )
return metrics
class VnfMetric(Metric):
- def __init__(self, nsr_id, vnf_member_index, vdur_name, name, value, extra_tags: dict = None):
+ def __init__(
+ self, nsr_id, vnf_member_index, vdur_name, name, value, extra_tags: dict = None
+ ):
tags = {
- 'ns_id': nsr_id,
- 'vnf_member_index': vnf_member_index,
- 'vdu_name': vdur_name
+ "ns_id": nsr_id,
+ "vnf_member_index": vnf_member_index,
+ "vdu_name": vdur_name,
}
if extra_tags:
tags.update(extra_tags)
- log.debug('Tags: %s', tags)
+ log.debug("Tags: %s", tags)
super().__init__(tags, name, value)
class CommonDbClient:
def __init__(self, config: Config):
- if config.get('database', 'driver') == "mongo":
+ if config.get("database", "driver") == "mongo":
self.common_db = dbmongo.DbMongo()
- elif config.get('database', 'driver') == "memory":
+ elif config.get("database", "driver") == "memory":
self.common_db = dbmemory.DbMemory()
else:
- raise Exception("Unknown database driver {}".format(config.get('section', 'driver')))
+ raise Exception(
+ "Unknown database driver {}".format(config.get("section", "driver"))
+ )
self.common_db.db_connect(config.get("database"))
def get_vnfr(self, nsr_id: str, member_index: int):
- vnfr = self.common_db.get_one("vnfrs",
- {"nsr-id-ref": nsr_id, "member-vnf-index-ref": str(member_index)})
+ vnfr = self.common_db.get_one(
+ "vnfrs", {"nsr-id-ref": nsr_id, "member-vnf-index-ref": str(member_index)}
+ )
return vnfr
def get_vnfrs(self, nsr_id: str = None, vim_account_id: str = None):
if nsr_id and vim_account_id:
raise NotImplementedError("Only one filter is currently supported")
if nsr_id:
- vnfrs = [self.get_vnfr(nsr_id, member['member-vnf-index']) for member in
- self.get_nsr(nsr_id)['nsd']['constituent-vnfd']]
+ vnfrs = [
+ self.get_vnfr(nsr_id, member["member-vnf-index"])
+ for member in self.get_nsr(nsr_id)["nsd"]["constituent-vnfd"]
+ ]
elif vim_account_id:
- vnfrs = self.common_db.get_list("vnfrs",
- {"vim-account-id": vim_account_id})
+ vnfrs = self.common_db.get_list("vnfrs", {"vim-account-id": vim_account_id})
else:
- vnfrs = self.common_db.get_list('vnfrs')
+ vnfrs = self.common_db.get_list("vnfrs")
return vnfrs
def get_vnfd(self, vnfd_id: str):
- vnfd = self.common_db.get_one("vnfds",
- {"_id": vnfd_id})
+ vnfd = self.common_db.get_one("vnfds", {"_id": vnfd_id})
return vnfd
def get_vnfd_by_id(self, vnfd_id: str, filter: dict = {}):
return None
def get_nsrs(self):
- return self.common_db.get_list('nsrs')
+ return self.common_db.get_list("nsrs")
def get_nsr(self, nsr_id: str):
- nsr = self.common_db.get_one("nsrs",
- {"id": nsr_id})
+ nsr = self.common_db.get_one("nsrs", {"id": nsr_id})
return nsr
def get_nslcmop(self, nslcmop_id):
- nslcmop = self.common_db.get_one("nslcmops",
- {"_id": nslcmop_id})
+ nslcmop = self.common_db.get_one("nslcmops", {"_id": nslcmop_id})
return nslcmop
def get_vdur(self, nsr_id, member_index, vdur_name):
vnfr = self.get_vnfr(nsr_id, member_index)
- for vdur in vnfr['vdur']:
- if vdur['name'] == vdur_name:
+ for vdur in vnfr["vdur"]:
+ if vdur["name"] == vdur_name:
return vdur
- raise ValueError('vdur not found for nsr-id {}, member_index {} and vdur_name {}'.format(nsr_id, member_index,
- vdur_name))
+ raise ValueError(
+ "vdur not found for nsr-id {}, member_index {} and vdur_name {}".format(
+ nsr_id, member_index, vdur_name
+ )
+ )
def decrypt_vim_password(self, vim_password: str, schema_version: str, vim_id: str):
return self.common_db.decrypt(vim_password, schema_version, vim_id)
- def decrypt_sdnc_password(self, sdnc_password: str, schema_version: str, sdnc_id: str):
+ def decrypt_sdnc_password(
+ self, sdnc_password: str, schema_version: str, sdnc_id: str
+ ):
return self.common_db.decrypt(sdnc_password, schema_version, sdnc_id)
def get_vim_account_id(self, nsr_id: str, vnf_member_index: int) -> str:
vnfr = self.get_vnfr(nsr_id, vnf_member_index)
- return vnfr['vim-account-id']
+ return vnfr["vim-account-id"]
def get_vim_accounts(self):
- return self.common_db.get_list('vim_accounts')
+ return self.common_db.get_list("vim_accounts")
def get_vim_account(self, vim_account_id: str) -> dict:
- vim_account = self.common_db.get_one('vim_accounts', {"_id": vim_account_id})
- vim_account['vim_password'] = self.decrypt_vim_password(vim_account['vim_password'],
- vim_account['schema_version'],
- vim_account_id)
+ vim_account = self.common_db.get_one("vim_accounts", {"_id": vim_account_id})
+ vim_account["vim_password"] = self.decrypt_vim_password(
+ vim_account["vim_password"], vim_account["schema_version"], vim_account_id
+ )
vim_config_encrypted_dict = {
"1.1": ("admin_password", "nsx_password", "vcenter_password"),
- "default": ("admin_password", "nsx_password", "vcenter_password", "vrops_password")
+ "default": (
+ "admin_password",
+ "nsx_password",
+ "vcenter_password",
+ "vrops_password",
+ ),
}
- vim_config_encrypted = vim_config_encrypted_dict['default']
- if vim_account['schema_version'] in vim_config_encrypted_dict.keys():
- vim_config_encrypted = vim_config_encrypted_dict[vim_account['schema_version']]
- if 'config' in vim_account:
- for key in vim_account['config']:
+ vim_config_encrypted = vim_config_encrypted_dict["default"]
+ if vim_account["schema_version"] in vim_config_encrypted_dict.keys():
+ vim_config_encrypted = vim_config_encrypted_dict[
+ vim_account["schema_version"]
+ ]
+ if "config" in vim_account:
+ for key in vim_account["config"]:
if key in vim_config_encrypted:
- vim_account['config'][key] = self.decrypt_vim_password(vim_account['config'][key],
- vim_account['schema_version'],
- vim_account_id)
+ vim_account["config"][key] = self.decrypt_vim_password(
+ vim_account["config"][key],
+ vim_account["schema_version"],
+ vim_account_id,
+ )
return vim_account
def get_sdncs(self):
- return self.common_db.get_list('sdns')
+ return self.common_db.get_list("sdns")
def get_sdnc(self, sdnc_id: str):
- return self.common_db.get_one('sdns', {'_id': sdnc_id})
+ return self.common_db.get_one("sdns", {"_id": sdnc_id})
def get_projects(self):
- return self.common_db.get_list('projects')
+ return self.common_db.get_list("projects")
def get_project(self, project_id: str):
- return self.common_db.get_one('projects', {'_id': project_id})
+ return self.common_db.get_one("projects", {"_id": project_id})
def create_alarm(self, alarm: Alarm):
- return self.common_db.create('alarms', alarm.to_dict())
+ return self.common_db.create("alarms", alarm.to_dict())
def delete_alarm(self, alarm_uuid: str):
- return self.common_db.del_one('alarms', {'uuid': alarm_uuid})
+ return self.common_db.del_one("alarms", {"uuid": alarm_uuid})
def get_alarms(self) -> List[Alarm]:
alarms = []
- alarm_dicts = self.common_db.get_list('alarms')
+ alarm_dicts = self.common_db.get_list("alarms")
for alarm_dict in alarm_dicts:
alarms.append(Alarm.from_dict(alarm_dict))
return alarms
def get_user(self, username: str):
- return self.common_db.get_one('users', {'username': username})
+ return self.common_db.get_one("users", {"username": username})
def get_user_by_id(self, userid: str):
- return self.common_db.get_one('users', {'_id': userid})
+ return self.common_db.get_one("users", {"_id": userid})
def get_role_by_name(self, name: str):
- return self.common_db.get_one('roles', {'name': name})
+ return self.common_db.get_one("roles", {"name": name})
def get_role_by_id(self, role_id: str):
- return self.common_db.get_one('roles', {'_id': role_id})
+ return self.common_db.get_one("roles", {"_id": role_id})
class Config:
- def __init__(self, config_file: str = ''):
+ def __init__(self, config_file: str = ""):
self.conf = {}
self._read_config_file(config_file)
self._read_env()
def _read_config_file(self, config_file):
if not config_file:
- path = 'mon.yaml'
+ path = "mon.yaml"
config_file = pkg_resources.resource_filename(__name__, path)
with open(config_file) as f:
self.conf = yaml.load(f)
if len(elements) < 3:
logger.warning(
"Environment variable %s=%s does not comply with required format. Section and/or field missing.",
- env, os.getenv(env))
+ env,
+ os.getenv(env),
+ )
continue
section = elements[1]
- field = '_'.join(elements[2:])
+ field = "_".join(elements[2:])
value = os.getenv(env)
if section not in self.conf:
self.conf[section] = {}
Object representing a connection with keystone, it's main use is to collect
projects and users from the OSM platform stored in keystone instead MongoDB
"""
+
def __init__(self, config):
- self.auth_url = config.get('keystone', 'url')
- self.username = config.get('keystone', 'service_user')
- self.project_name = config.get('keystone', 'service_project')
- self.project_domain_name_list = config.get('keystone', 'service_project_domain_name').split(",")
- self.password = config.get('keystone', 'service_password')
- self.user_domain_name_list = config.get('keystone', 'domain_name').split(",")
+ self.auth_url = config.get("keystone", "url")
+ self.username = config.get("keystone", "service_user")
+ self.project_name = config.get("keystone", "service_project")
+ self.project_domain_name_list = config.get(
+ "keystone", "service_project_domain_name"
+ ).split(",")
+ self.password = config.get("keystone", "service_password")
+ self.user_domain_name_list = config.get("keystone", "domain_name").split(",")
self.auth = v3.Password(
auth_url=self.auth_url,
username=self.username,
password=self.password,
project_domain_name=self.project_domain_name_list[0],
- project_name=self.project_name
+ project_name=self.project_name,
)
self.keystone_session = session.Session(auth=self.auth)
- self.keystone_client = client.Client(session=self.keystone_session, endpoint_override=self.auth_url)
+ self.keystone_client = client.Client(
+ session=self.keystone_session, endpoint_override=self.auth_url
+ )
def getProjects(self):
"""
class MessageBusClient:
def __init__(self, config: Config, loop=None):
- if config.get('message', 'driver') == "local":
+ if config.get("message", "driver") == "local":
self.msg_bus = msglocal.MsgLocal()
- elif config.get('message', 'driver') == "kafka":
+ elif config.get("message", "driver") == "kafka":
self.msg_bus = msgkafka.MsgKafka()
else:
- raise Exception("Unknown message bug driver {}".format(config.get('section', 'driver')))
- self.msg_bus.connect(config.get('message'))
+ raise Exception(
+ "Unknown message bug driver {}".format(config.get("section", "driver"))
+ )
+ self.msg_bus.connect(config.get("message"))
if not loop:
loop = asyncio.get_event_loop()
self.loop = loop
class Alarm:
-
- def __init__(self, name: str = None, severity: str = None, threshold: float = None, operation: str = None,
- statistic: str = None, metric: str = None, tags: dict = {}):
+ def __init__(
+ self,
+ name: str = None,
+ severity: str = None,
+ threshold: float = None,
+ operation: str = None,
+ statistic: str = None,
+ metric: str = None,
+ tags: dict = {},
+ ):
self.uuid = str(uuid.uuid4())
self.name = name
self.severity = severity
def to_dict(self) -> dict:
alarm = {
- 'uuid': self.uuid,
- 'name': self.name,
- 'severity': self.severity,
- 'threshold': self.threshold,
- 'statistic': self.statistic,
- 'metric': self.metric,
- 'tags': self.tags,
- 'operation': self.operation
+ "uuid": self.uuid,
+ "name": self.name,
+ "severity": self.severity,
+ "threshold": self.threshold,
+ "statistic": self.statistic,
+ "metric": self.metric,
+ "tags": self.tags,
+ "operation": self.operation,
}
return alarm
@staticmethod
def from_dict(data: dict):
alarm = Alarm()
- alarm.uuid = data.get('uuid', str(uuid.uuid4()))
- alarm.name = data.get('name')
- alarm.severity = data.get('severity')
- alarm.threshold = float(data.get('threshold'))
- alarm.statistic = data.get('statistic')
- alarm.metric = data.get('metric')
- alarm.tags = data.get('tags')
- alarm.operation = data.get('operation')
+ alarm.uuid = data.get("uuid", str(uuid.uuid4()))
+ alarm.name = data.get("name")
+ alarm.severity = data.get("severity")
+ alarm.threshold = float(data.get("threshold"))
+ alarm.statistic = data.get("statistic")
+ alarm.metric = data.get("metric")
+ alarm.tags = data.get("tags")
+ alarm.operation = data.get("operation")
return alarm
def create_alarm_response(self, **kwargs) -> dict:
"""Generate a response for a create alarm request."""
- create_alarm_resp = {"schema_version": schema_version,
- "schema_type": "create_alarm_response",
- "alarm_create_response": {
- "correlation_id": kwargs['cor_id'],
- "alarm_uuid": kwargs['alarm_id'],
- "status": kwargs['status']}}
+ create_alarm_resp = {
+ "schema_version": schema_version,
+ "schema_type": "create_alarm_response",
+ "alarm_create_response": {
+ "correlation_id": kwargs["cor_id"],
+ "alarm_uuid": kwargs["alarm_id"],
+ "status": kwargs["status"],
+ },
+ }
return create_alarm_resp
def delete_alarm_response(self, **kwargs) -> dict:
"""Generate a response for a delete alarm request."""
- delete_alarm_resp = {"schema_version": schema_version,
- "schema_type": "alarm_delete_response",
- "alarm_delete_response": {
- "correlation_id": kwargs['cor_id'],
- "alarm_uuid": kwargs['alarm_id'],
- "status": kwargs['status']}}
+ delete_alarm_resp = {
+ "schema_version": schema_version,
+ "schema_type": "alarm_delete_response",
+ "alarm_delete_response": {
+ "correlation_id": kwargs["cor_id"],
+ "alarm_uuid": kwargs["alarm_id"],
+ "status": kwargs["status"],
+ },
+ }
return delete_alarm_resp
def notify_alarm(self, **kwargs) -> dict:
"""Generate a response to send alarm notifications."""
- notify_alarm_resp = {"schema_version": schema_version,
- "schema_type": "notify_alarm",
- "notify_details": {
- "alarm_uuid": kwargs['alarm_id'],
- "metric_name": kwargs['metric_name'],
- "threshold_value": kwargs['threshold_value'],
- "operation": kwargs['operation'],
- "severity": kwargs['sev'],
- "status": kwargs['status'],
- "start_date": kwargs['date'],
- "tags": kwargs['tags']}}
+ notify_alarm_resp = {
+ "schema_version": schema_version,
+ "schema_type": "notify_alarm",
+ "notify_details": {
+ "alarm_uuid": kwargs["alarm_id"],
+ "metric_name": kwargs["metric_name"],
+ "threshold_value": kwargs["threshold_value"],
+ "operation": kwargs["operation"],
+ "severity": kwargs["sev"],
+ "status": kwargs["status"],
+ "start_date": kwargs["date"],
+ "tags": kwargs["tags"],
+ },
+ }
return notify_alarm_resp
# contact: fbravo@whitestack.com or agarcia@whitestack.com
##
+
def find_in_list(the_list, condition_lambda):
for item in the_list:
if condition_lambda(item):
class GrafanaBackend:
def __init__(self, config: Config):
self.conf = config
- self.url = config.get('grafana', 'url')
+ self.url = config.get("grafana", "url")
grafana_user = config.get("grafana", "user")
grafana_password = config.get("grafana", "password")
self.headers = {
- 'content-type': "application/json",
- 'authorization': "Basic %s" % base64.b64encode(
- (grafana_user + ":" + grafana_password).encode("utf-8")).decode()
+ "content-type": "application/json",
+ "authorization": "Basic %s"
+ % base64.b64encode(
+ (grafana_user + ":" + grafana_password).encode("utf-8")
+ ).decode(),
}
def get_all_dashboard_uids(self):
# Gets only dashboards that were automated by OSM (with tag 'osm_automated')
- response = requests.request("GET", self.url + "/api/search?tag=osm_automated", headers=self.headers)
+ response = requests.request(
+ "GET", self.url + "/api/search?tag=osm_automated", headers=self.headers
+ )
dashboards = response.json()
dashboard_uids = []
for dashboard in dashboards:
- dashboard_uids.append(dashboard['uid'])
+ dashboard_uids.append(dashboard["uid"])
log.debug("Searching for all dashboard uids: %s", dashboard_uids)
return dashboard_uids
def get_dashboard_status(self, uid):
- response = requests.request("GET", self.url + "/api/dashboards/uid/" + uid, headers=self.headers)
+ response = requests.request(
+ "GET", self.url + "/api/dashboards/uid/" + uid, headers=self.headers
+ )
log.debug("Searching for dashboard result: %s", response.text)
return response
with open(json_file) as f:
dashboard_data = f.read()
- dashboard_data = dashboard_data.replace('OSM_ID', uid).replace('OSM_NAME', name)
+ dashboard_data = dashboard_data.replace("OSM_ID", uid).replace(
+ "OSM_NAME", name
+ )
dashboard_json_data = json.loads(dashboard_data)
# Get folder id
if project_name:
else:
folder_name = name
response_folder_id = requests.request(
- "GET", self.url + "/api/folders/{}".format(folder_name), headers=self.headers)
+ "GET",
+ self.url + "/api/folders/{}".format(folder_name),
+ headers=self.headers,
+ )
if response_folder_id.status_code == 200:
folder_id = json.loads(response_folder_id.text)["id"]
dashboard_json_data["folderId"] = folder_id
# Admin dashboard will be created if already exists. Rest will remain same.
if json.loads(response.text).get("status") == "name-exists":
# Delete any previous project-admin dashboard if it already exist.
- if name == 'admin':
+ if name == "admin":
self.delete_admin_dashboard()
- response = self.send_request_for_creating_dashboard(dashboard_json_data)
+ response = self.send_request_for_creating_dashboard(
+ dashboard_json_data
+ )
else:
return
if project_name is not None:
name = project_name
response_team = requests.request(
- "GET", self.url + "/api/teams/search?name={}".format(name), headers=self.headers)
+ "GET",
+ self.url + "/api/teams/search?name={}".format(name),
+ headers=self.headers,
+ )
# Remove default permissions of admin user's dashboard so that it is not visible to non-admin users
if len(json.loads(response_team.text)["teams"]) == 0:
# As team information is not available so it is admin user
dahboard_id = json.loads(response.text)["id"]
requests.request(
- "POST", self.url + "/api/dashboards/id/{}/permissions".format(dahboard_id),
- headers=self.headers)
+ "POST",
+ self.url + "/api/dashboards/id/{}/permissions".format(dahboard_id),
+ headers=self.headers,
+ )
log.info("Dashboard %s is created in Grafana", name)
return response
def send_request_for_creating_dashboard(self, dashboard_data):
response = requests.request(
- "POST", self.url + "/api/dashboards/db/", data=json.dumps(dashboard_data), headers=self.headers)
+ "POST",
+ self.url + "/api/dashboards/db/",
+ data=json.dumps(dashboard_data),
+ headers=self.headers,
+ )
return response
def delete_dashboard(self, uid):
- response = requests.request("DELETE", self.url + "/api/dashboards/uid/" + uid, headers=self.headers)
+ response = requests.request(
+ "DELETE", self.url + "/api/dashboards/uid/" + uid, headers=self.headers
+ )
log.debug("Dashboard %s deleted from Grafana", uid)
return response
def delete_admin_dashboard(self):
requests.request(
- "DELETE", self.url + "/api/dashboards/db/osm-project-status-admin", headers=self.headers)
+ "DELETE",
+ self.url + "/api/dashboards/db/osm-project-status-admin",
+ headers=self.headers,
+ )
log.debug("Dashboard osm-project-status-admin deleted from Grafana")
def create_grafana_users(self, user):
"login": user,
"password": user,
}
- response_users = requests.request("POST", self.url + "/api/admin/users/", json=user_payload,
- headers=self.headers)
+ response_users = requests.request(
+ "POST",
+ self.url + "/api/admin/users/",
+ json=user_payload,
+ headers=self.headers,
+ )
json_data = json.loads(response_users.text)
url = "/api/org/users/{}/".format(json_data["id"])
- permission_payload = {"role": "Editor", }
- requests.request("PATCH", self.url + url, json=permission_payload, headers=self.headers)
+ permission_payload = {
+ "role": "Editor",
+ }
+ requests.request(
+ "PATCH", self.url + url, json=permission_payload, headers=self.headers
+ )
log.info("New user %s created in Grafana", user)
return response_users
# Create Grafana team with member
- def create_grafana_teams_members(self, project_name, user_name, is_admin, proj_list):
+ def create_grafana_teams_members(
+ self, project_name, user_name, is_admin, proj_list
+ ):
# Check if user exist in Grafana
- user_response = requests.request("GET", self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
- headers=self.headers)
+ user_response = requests.request(
+ "GET",
+ self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
+ headers=self.headers,
+ )
user_obj = json.loads(user_response.text)
if user_response.status_code != 200:
user_response = self.create_grafana_users(user_name)
user_id = user_obj["id"]
# Get teams for user
- team_objs = requests.request("GET", self.url + "/api/users/{}/teams".format(user_id), headers=self.headers)
+ team_objs = requests.request(
+ "GET",
+ self.url + "/api/users/{}/teams".format(user_id),
+ headers=self.headers,
+ )
team_obj = json.loads(team_objs.text)
team_list = []
if len(team_obj):
proj_unlink = set(team_list) - set(proj_list)
for prj in proj_unlink:
- response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(prj),
- headers=self.headers)
+ response_team = requests.request(
+ "GET",
+ self.url + "/api/teams/search?name={}".format(prj),
+ headers=self.headers,
+ )
team_id = json.loads(response_team.text)["teams"][0]["id"]
- requests.request("DELETE", self.url + "/api/teams/{}/members/{}".format(team_id, user_id),
- headers=self.headers)
+ requests.request(
+ "DELETE",
+ self.url + "/api/teams/{}/members/{}".format(team_id, user_id),
+ headers=self.headers,
+ )
if project_name != "admin":
# Add member to team
- response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(project_name),
- headers=self.headers)
+ response_team = requests.request(
+ "GET",
+ self.url + "/api/teams/search?name={}".format(project_name),
+ headers=self.headers,
+ )
# Search if team in Grafana corresponding to the project already exists
if not json.loads(response_team.text)["teams"]:
self.create_grafana_teams(project_name)
- response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(project_name),
- headers=self.headers)
+ response_team = requests.request(
+ "GET",
+ self.url + "/api/teams/search?name={}".format(project_name),
+ headers=self.headers,
+ )
team_id = json.loads(response_team.text)["teams"][0]["id"]
if project_name not in team_list:
# Create a team in Grafana corresponding to the project as it doesn't exist
- member_payload = {
- "userId": user_id
- }
- requests.request("POST", self.url + "/api/teams/{}/members".format(team_id), json=member_payload,
- headers=self.headers)
+ member_payload = {"userId": user_id}
+ requests.request(
+ "POST",
+ self.url + "/api/teams/{}/members".format(team_id),
+ json=member_payload,
+ headers=self.headers,
+ )
# Check if user role or project name is admin
- if is_admin or project_name == 'admin':
+ if is_admin or project_name == "admin":
# Give admin righsts to user
url = "/api/org/users/{}/".format(user_id)
- permission_payload = {"role": "Admin", }
- requests.request("PATCH", self.url + url, json=permission_payload, headers=self.headers)
+ permission_payload = {
+ "role": "Admin",
+ }
+ requests.request(
+ "PATCH", self.url + url, json=permission_payload, headers=self.headers
+ )
log.info("User %s is assigned Admin permission", user_name)
else:
# Give editor rights to user
url = "/api/org/users/{}/".format(user_id)
- permission_payload = {"role": "Editor", }
- requests.request("PATCH", self.url + url, json=permission_payload, headers=self.headers)
+ permission_payload = {
+ "role": "Editor",
+ }
+ requests.request(
+ "PATCH", self.url + url, json=permission_payload, headers=self.headers
+ )
log.info("User %s is assigned Editor permission", user_name)
# Create team in Grafana
def create_grafana_teams(self, team_name):
- team_payload = {"name": team_name, }
- requests.request("POST", self.url + "/api/teams", json=team_payload, headers=self.headers)
+ team_payload = {
+ "name": team_name,
+ }
+ requests.request(
+ "POST", self.url + "/api/teams", json=team_payload, headers=self.headers
+ )
log.info("New team %s created in Grafana", team_name)
# Create folder in Grafana
def create_grafana_folders(self, folder_name):
folder_payload = {"uid": folder_name, "title": folder_name}
- requests.request("POST", self.url + "/api/folders", json=folder_payload, headers=self.headers)
+ requests.request(
+ "POST", self.url + "/api/folders", json=folder_payload, headers=self.headers
+ )
log.info("Dashboard folder %s created", folder_name)
- response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(folder_name),
- headers=self.headers)
+ response_team = requests.request(
+ "GET",
+ self.url + "/api/teams/search?name={}".format(folder_name),
+ headers=self.headers,
+ )
# Create team if it doesn't already exists
if len(json.loads(response_team.text)["teams"]) == 0:
self.create_grafana_teams(folder_name)
- response_team = requests.request("GET", self.url + "/api/teams/search?name={}".format(folder_name),
- headers=self.headers)
+ response_team = requests.request(
+ "GET",
+ self.url + "/api/teams/search?name={}".format(folder_name),
+ headers=self.headers,
+ )
# Assign required permission to the team's folder
team_id = json.loads(response_team.text)["teams"][0]["id"]
- permission_data = {"items": [{"teamId": team_id, "permission": 2}, ]}
- requests.request("POST", self.url + "/api/folders/{}/permissions".format(folder_name),
- json=permission_data, headers=self.headers)
+ permission_data = {
+ "items": [
+ {"teamId": team_id, "permission": 2},
+ ]
+ }
+ requests.request(
+ "POST",
+ self.url + "/api/folders/{}/permissions".format(folder_name),
+ json=permission_data,
+ headers=self.headers,
+ )
# delete user from grafana
def delete_grafana_users(self, user_name):
# Get user id
- response_id = requests.request("GET", self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
- headers=self.headers)
+ response_id = requests.request(
+ "GET",
+ self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
+ headers=self.headers,
+ )
try:
user_id = json.loads(response_id.text)["id"]
except Exception:
log.exception("Exception processing message: ")
# Delete user
- response = requests.request("DELETE", self.url + "/api/admin/users/{}".format(user_id), headers=self.headers)
+ response = requests.request(
+ "DELETE",
+ self.url + "/api/admin/users/{}".format(user_id),
+ headers=self.headers,
+ )
log.info("User %s deleted in Grafana", user_name)
return response
# delete team from grafana
def delete_grafana_team(self, project_name):
# Delete Grafana folder
- requests.request("DELETE", self.url + "/api/folders/{}".format(project_name),
- headers=self.headers)
+ requests.request(
+ "DELETE",
+ self.url + "/api/folders/{}".format(project_name),
+ headers=self.headers,
+ )
# Delete Grafana team
- team_obj = requests.request("GET", self.url + "/api/teams/search?name={}".format(project_name),
- headers=self.headers)
+ team_obj = requests.request(
+ "GET",
+ self.url + "/api/teams/search?name={}".format(project_name),
+ headers=self.headers,
+ )
team_id = json.loads(team_obj.text)["teams"][0]["id"]
- response = requests.request("DELETE", self.url + "/api/teams/{}".format(team_id), headers=self.headers)
+ response = requests.request(
+ "DELETE", self.url + "/api/teams/{}".format(team_id), headers=self.headers
+ )
log.info("Team %s deleted in Grafana", project_name)
return response
# update grafana team
def update_grafana_teams(self, project_new_name, project_old_name):
- team_obj = requests.request("GET", self.url + "/api/teams/search?name={}".format(project_old_name),
- headers=self.headers)
+ team_obj = requests.request(
+ "GET",
+ self.url + "/api/teams/search?name={}".format(project_old_name),
+ headers=self.headers,
+ )
team_id = json.loads(team_obj.text)["teams"][0]["id"]
- data = {"name": project_new_name, }
- response = requests.request("PUT", self.url + "/api/teams/{}".format(team_id), json=data, headers=self.headers)
+ data = {
+ "name": project_new_name,
+ }
+ response = requests.request(
+ "PUT",
+ self.url + "/api/teams/{}".format(team_id),
+ json=data,
+ headers=self.headers,
+ )
log.info("Grafana team updated %s", response.text)
return response
# remove member from grafana team
def remove_grafana_team_member(self, user_name, project_data):
# Get user id
- response_id = requests.request("GET", self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
- headers=self.headers)
+ response_id = requests.request(
+ "GET",
+ self.url + "/api/users/lookup?loginOrEmail={}".format(user_name),
+ headers=self.headers,
+ )
user_id = json.loads(response_id.text)["id"]
for project in project_data:
# Get team id
- team_obj = requests.request("GET", self.url + "/api/teams/search?name={}".format(project['project']),
- headers=self.headers)
+ team_obj = requests.request(
+ "GET",
+ self.url + "/api/teams/search?name={}".format(project["project"]),
+ headers=self.headers,
+ )
team_id = json.loads(team_obj.text)["teams"][0]["id"]
- response = requests.request("DELETE", self.url + "/api/teams/{}/members/{}".format(team_id, user_id),
- headers=self.headers)
+ response = requests.request(
+ "DELETE",
+ self.url + "/api/teams/{}/members/{}".format(team_id, user_id),
+ headers=self.headers,
+ )
return response
log.exception("Exception %s", str(e))
async def _user_msg(self, topic, key, values):
- log.debug("Message from kafka bus received: topic: %s and values: %s and key: %s", topic, values, key)
+ log.debug(
+ "Message from kafka bus received: topic: %s and values: %s and key: %s",
+ topic,
+ values,
+ key,
+ )
try:
if topic == "users" and key == "created":
log.debug("Received message from kafka for creating user")
- if values.get('username'):
- user = values['username']
+ if values.get("username"):
+ user = values["username"]
else:
- user = values['changes']['username']
+ user = values["changes"]["username"]
self.service.create_grafana_user(user)
# user-created and mapping is done with osm cli
- if values.get('changes'):
+ if values.get("changes"):
# user-project-role mapping is included in change
- if values['changes'].get('project_role_mappings'):
+ if values["changes"].get("project_role_mappings"):
user_id = values["_id"]
project_data = values["changes"]["project_role_mappings"]
project_list = values["changes"].get("projects")
- self.service.create_grafana_team_member(project_data, user_id, project_list)
- elif values.get('project_role_mappings'):
+ self.service.create_grafana_team_member(
+ project_data, user_id, project_list
+ )
+ elif values.get("project_role_mappings"):
# for fresh project-role-mapping
user_id = values.get("_id")
project_data = values["project_role_mappings"]
self.service.create_grafana_team_member(project_data, user_id)
else:
# for keystone we will get username
- self.service.create_grafana_team_member(project_data, user=values['username'])
+ self.service.create_grafana_team_member(
+ project_data, user=values["username"]
+ )
elif topic == "users" and key == "deleted":
log.debug("Received message from kafka for deleting user")
- user = values['username']
+ user = values["username"]
self.service.delete_grafana_user(user)
elif topic == "users" and key == "edited":
log.debug("Received message from kafka for associating user to team")
user_id = values["_id"]
- if values["changes"].get("remove_project_role_mappings") and not \
- values["changes"].get("add_project_role_mappings"):
+ if values["changes"].get("remove_project_role_mappings") and not values[
+ "changes"
+ ].get("add_project_role_mappings"):
# Removing user-project role mapping
- self.service.remove_grafana_team_members(user_id,
- values["changes"].get("remove_project_role_mappings"))
+ self.service.remove_grafana_team_members(
+ user_id, values["changes"].get("remove_project_role_mappings")
+ )
else:
# Changing user project role mapping
if values["changes"].get("project_role_mappings"):
log.exception("Exception processing message: ")
def dashboard_forever(self):
- log.debug('dashboard_forever')
- grafana_parsed_uri = urlparse(self.conf.get('grafana', 'url'))
+ log.debug("dashboard_forever")
+ grafana_parsed_uri = urlparse(self.conf.get("grafana", "url"))
while True:
try:
socket.gethostbyname(grafana_parsed_uri.hostname)
log.debug("Dashboard backend is running")
except socket.error:
log.debug("Dashboard backend is not available")
- time.sleep(int(self.conf.get('dashboarder', 'interval')))
+ time.sleep(int(self.conf.get("dashboarder", "interval")))
continue
try:
self.create_dashboards()
- time.sleep(int(self.conf.get('dashboarder', 'interval')))
+ time.sleep(int(self.conf.get("dashboarder", "interval")))
except Exception:
log.exception("Error creating dashboards")
def create_dashboards(self):
self.service.create_dashboards()
- log.debug('Dashboarder Service > create_dashboards called!')
+ log.debug("Dashboarder Service > create_dashboards called!")
self.common_db = CommonDbClient(self.conf)
self.grafana = GrafanaBackend(self.conf)
- if bool(self.conf.get('keystone', 'enabled')):
+ if bool(self.conf.get("keystone", "enabled")):
self.keystone = KeystoneConnection(self.conf)
else:
self.keystone = None
if self.keystone:
try:
projects.extend(
- map(lambda project: {'_id': project.id, 'name': project.name}, self.keystone.getProjects())
+ map(
+ lambda project: {"_id": project.id, "name": project.name},
+ self.keystone.getProjects(),
+ )
)
except Exception:
- log.error('Cannot retrieve projects from keystone')
+ log.error("Cannot retrieve projects from keystone")
else:
projects.extend(self.common_db.get_projects())
# Reads existing project list and creates a dashboard for each
for project in projects:
- project_id = project['_id']
+ project_id = project["_id"]
# Collect Project IDs for periodical dashboard clean-up
osm_resource_uids.append(project_id)
- dashboard_path = '{}/dashboarder/templates/project_scoped.json'.format(mon_path[0])
+ dashboard_path = "{}/dashboarder/templates/project_scoped.json".format(
+ mon_path[0]
+ )
if project_id not in dashboard_uids:
- project_name = project['name']
+ project_name = project["name"]
if project_name != "admin":
# Create project folder in Grafana only if user is not admin.
# Admin user's dashboard will be created in default folder
self.grafana.create_grafana_folders(project_name)
- self.grafana.create_dashboard(project_id, project_name,
- dashboard_path)
- log.debug('Created dashboard for Project: %s', project_id)
+ self.grafana.create_dashboard(project_id, project_name, dashboard_path)
+ log.debug("Created dashboard for Project: %s", project_id)
else:
- log.debug('Dashboard already exists')
+ log.debug("Dashboard already exists")
# Reads existing NS list and creates a dashboard for each
# TODO lavado: only create for ACTIVE NSRs
nsrs = self.common_db.get_nsrs()
for nsr in nsrs:
- nsr_id = nsr['_id']
- dashboard_path = '{}/dashboarder/templates/ns_scoped.json'.format(mon_path[0])
+ nsr_id = nsr["_id"]
+ dashboard_path = "{}/dashboarder/templates/ns_scoped.json".format(
+ mon_path[0]
+ )
# Collect NS IDs for periodical dashboard clean-up
osm_resource_uids.append(nsr_id)
# Check if the NSR's VNFDs contain metrics
# Only one DF at the moment, support for this feature is comming in the future
- vnfds_profiles = nsr['nsd']["df"][0]['vnf-profile']
+ vnfds_profiles = nsr["nsd"]["df"][0]["vnf-profile"]
for vnf_profile in vnfds_profiles:
try:
- vnfd = self.common_db.get_vnfd_by_id(vnf_profile['vnfd-id'], create_filter_from_nsr(nsr))
+ vnfd = self.common_db.get_vnfd_by_id(
+ vnf_profile["vnfd-id"], create_filter_from_nsr(nsr)
+ )
# If there are metrics, create dashboard (if exists)
- vdu_found = find_in_list(vnfd["vdu"], lambda a_vdu: "monitoring-parameter" in a_vdu)
+ vdu_found = find_in_list(
+ vnfd["vdu"], lambda a_vdu: "monitoring-parameter" in a_vdu
+ )
if vdu_found:
if nsr_id not in dashboard_uids:
- nsr_name = nsr['name']
+ nsr_name = nsr["name"]
project_id = nsr["_admin"]["projects_read"][0]
try:
# Get project details from commondb
if self.keystone:
# Serach project in keystone
for project in projects:
- if project_id == project['_id']:
+ if project_id == project["_id"]:
project_name = project["name"]
else:
- log.info('Project %s not found', project_id)
- log.debug('Exception %s' % e)
- self.grafana.create_dashboard(nsr_id, nsr_name,
- dashboard_path, project_name)
- log.debug('Created dashboard for NS: %s', nsr_id)
+ log.info("Project %s not found", project_id)
+ log.debug("Exception %s" % e)
+ self.grafana.create_dashboard(
+ nsr_id, nsr_name, dashboard_path, project_name
+ )
+ log.debug("Created dashboard for NS: %s", nsr_id)
else:
- log.debug('Dashboard already exists')
+ log.debug("Dashboard already exists")
break
else:
- log.debug('NS does not has metrics')
+ log.debug("NS does not has metrics")
except Exception:
log.exception("VNFD is not valid or has been renamed")
continue
for dashboard_uid in dashboard_uids:
if dashboard_uid not in osm_resource_uids:
self.grafana.delete_dashboard(dashboard_uid)
- log.debug('Deleted obsolete dashboard: %s', dashboard_uid)
+ log.debug("Deleted obsolete dashboard: %s", dashboard_uid)
else:
- log.debug('All dashboards in use')
+ log.debug("All dashboards in use")
def create_grafana_user(self, user):
self.grafana.create_grafana_users(user)
- def create_grafana_team_member(self, project_data, userid=None, project_list=None, user=None):
+ def create_grafana_team_member(
+ self, project_data, userid=None, project_list=None, user=None
+ ):
if user:
user_name = user
else:
user = self.keystone.getUserById(userid)
user_name = user.name
else:
- log.info('User %s not found', userid)
- log.debug('Exception %s' % e)
+ log.info("User %s not found", userid)
+ log.debug("Exception %s" % e)
if project_list:
# user-project mapping is done by osm cli
for proj in project_data:
project = self.common_db.get_project(proj["project"])
- proj_name = project['name']
+ proj_name = project["name"]
role_obj = self.common_db.get_role_by_id(proj["role"])
is_admin = role_obj["permissions"].get("admin")
- self.grafana.create_grafana_teams_members(proj_name, user_name, is_admin, project_list)
+ self.grafana.create_grafana_teams_members(
+ proj_name, user_name, is_admin, project_list
+ )
else:
# user-project mapping is done by osm ui
proj_list = []
users_proj_list = user.get("project_role_mappings")
for project in users_proj_list:
proj_data = self.common_db.get_project(project["project"])
- proj_list.append(proj_data['name'])
+ proj_list.append(proj_data["name"])
for proj in project_data:
if self.keystone:
# Backend authentication type is keystone
try:
# Getting project and role objects from keystone using ids
role_obj = self.keystone.getRoleById(proj["role"])
- proj_data = self.keystone.getProjectsByProjectId(proj["project"])
- log.info('role object {} {}'.format(role_obj.permissions, proj_data.name))
- is_admin = role_obj.permissions['admin']
+ proj_data = self.keystone.getProjectsByProjectId(
+ proj["project"]
+ )
+ log.info(
+ "role object {} {}".format(
+ role_obj.permissions, proj_data.name
+ )
+ )
+ is_admin = role_obj.permissions["admin"]
except Exception:
# Getting project and role objects from keystone using names
role_obj = self.keystone.getRoleByName(proj["role"])[0]
- proj_data = self.keystone.getProjectsByProjectName(proj["project"])[0]
- is_admin = role_obj.to_dict().get("permissions").get('admin')
- log.info('role object {} {}'.format(role_obj.to_dict(), proj_data.name))
+ proj_data = self.keystone.getProjectsByProjectName(
+ proj["project"]
+ )[0]
+ is_admin = role_obj.to_dict().get("permissions").get("admin")
+ log.info(
+ "role object {} {}".format(
+ role_obj.to_dict(), proj_data.name
+ )
+ )
proj_name = proj_data.name
else:
# Backend authentication type is internal
# Getting project and role object from commondb using ids
role_obj = self.common_db.get_role_by_id(proj["role"])
proj_data = self.common_db.get_project(proj["project"])
- proj_name = proj_data['name']
+ proj_name = proj_data["name"]
is_admin = role_obj["permissions"].get("admin")
- self.grafana.create_grafana_teams_members(proj_name, user_name, is_admin, proj_list)
+ self.grafana.create_grafana_teams_members(
+ proj_name, user_name, is_admin, proj_list
+ )
def create_grafana_team(self, team_name):
self.grafana.create_grafana_teams(team_name)
user = self.keystone.getUserById(user_id)
user_name = user.name
else:
- log.info('User %s not found', user_id)
- log.debug('Exception %s' % e)
+ log.info("User %s not found", user_id)
+ log.debug("Exception %s" % e)
self.grafana.remove_grafana_team_member(user_name, proj_data)
log = logging.getLogger(__name__)
-OSM_METRIC_PREFIX = 'osm_'
+OSM_METRIC_PREFIX = "osm_"
class PrometheusBackend(BaseBackend):
-
def __init__(self, config: Config):
super().__init__(config)
self.conf = config
query = self._build_query(metric_name, tags)
request_url = self._build_url(query)
log.info("Querying Prometheus: %s", request_url)
- r = requests.get(request_url, timeout=int(self.conf.get('global', 'request_timeout')))
+ r = requests.get(
+ request_url, timeout=int(self.conf.get("global", "request_timeout"))
+ )
if r.status_code == 200:
json_response = r.json()
- if json_response['status'] == 'success':
+ if json_response["status"] == "success":
return self._get_metric_value_from_response(json_response)
else:
- log.warning("Prometheus response is not success. Got status %s", json_response['status'])
+ log.warning(
+ "Prometheus response is not success. Got status %s",
+ json_response["status"],
+ )
else:
- log.warning("Error contacting Prometheus. Got status code %s: %s", r.status_code, r.text)
+ log.warning(
+ "Error contacting Prometheus. Got status code %s: %s",
+ r.status_code,
+ r.text,
+ )
return None
def _build_query(self, metric_name: str, tags: dict) -> str:
query_section_tags = []
for k, v in tags.items():
- query_section_tags.append(k + '=\"' + v + '\"')
- query_section = "query={0}{{{1}}}".format(OSM_METRIC_PREFIX + metric_name, ','.join(query_section_tags))
+ query_section_tags.append(k + '="' + v + '"')
+ query_section = "query={0}{{{1}}}".format(
+ OSM_METRIC_PREFIX + metric_name, ",".join(query_section_tags)
+ )
return query_section
def _build_url(self, query: str):
- return self.conf.get('prometheus', 'url') + "/api/v1/query?" + query
+ return self.conf.get("prometheus", "url") + "/api/v1/query?" + query
def _get_metric_value_from_response(self, json_response):
- result = json_response['data']['result']
+ result = json_response["data"]["result"]
if len(result):
- metric_value = float(result[0]['value'][1])
+ metric_value = float(result[0]["value"][1])
log.info("Metric value: %s", metric_value)
return metric_value
else:
class Evaluator:
-
def __init__(self, config: Config, loop=None):
self.conf = config
if not loop:
self.msg_bus = MessageBusClient(config)
def evaluate_forever(self):
- log.debug('evaluate_forever')
+ log.debug("evaluate_forever")
while True:
try:
self.evaluate()
- time.sleep(int(self.conf.get('evaluator', 'interval')))
+ time.sleep(int(self.conf.get("evaluator", "interval")))
except Exception:
log.exception("Error evaluating alarms")
def evaluate(self):
- log.debug('evaluate')
- log.info('Starting alarm evaluation')
+ log.debug("evaluate")
+ log.info("Starting alarm evaluation")
alarms_tuples = self.service.evaluate_alarms()
processes = []
for alarm, status in alarms_tuples:
- p = multiprocessing.Process(target=self.notify_alarm,
- args=(alarm, status))
+ p = multiprocessing.Process(target=self.notify_alarm, args=(alarm, status))
p.start()
processes.append(p)
for process in processes:
process.join(timeout=10)
- log.info('Alarm evaluation is complete')
+ log.info("Alarm evaluation is complete")
def notify_alarm(self, alarm: Alarm, status: AlarmStatus):
log.debug("_notify_alarm")
resp_message = self._build_alarm_response(alarm, status)
log.info("Sent alarm notification: %s", resp_message)
- self.loop.run_until_complete(self.msg_bus.aiowrite('alarm_response', 'notify_alarm', resp_message))
+ self.loop.run_until_complete(
+ self.msg_bus.aiowrite("alarm_response", "notify_alarm", resp_message)
+ )
def _build_alarm_response(self, alarm: Alarm, status: AlarmStatus):
log.debug("_build_alarm_response")
tags[name] = value
now = time.strftime("%d-%m-%Y") + " " + time.strftime("%X")
return response.generate_response(
- 'notify_alarm',
+ "notify_alarm",
alarm_id=alarm.uuid,
metric_name=alarm.metric,
operation=alarm.operation,
sev=alarm.severity,
status=status.value,
date=now,
- tags=tags)
+ tags=tags,
+ )
log = logging.getLogger(__name__)
-BACKENDS = {
- 'prometheus': PrometheusBackend
-}
+BACKENDS = {"prometheus": PrometheusBackend}
class AlarmStatus(Enum):
- ALARM = 'alarm'
- OK = 'ok'
- INSUFFICIENT = 'insufficient-data'
+ ALARM = "alarm"
+ OK = "ok"
+ INSUFFICIENT = "insufficient-data"
class EvaluatorService:
-
def __init__(self, config: Config):
self.conf = config
self.common_db = CommonDbClient(self.conf)
self.queue = multiprocessing.Queue()
- def _get_metric_value(self,
- metric_name: str,
- tags: dict):
- return BACKENDS[self.conf.get('evaluator', 'backend')](self.conf).get_metric_value(metric_name, tags)
+ def _get_metric_value(self, metric_name: str, tags: dict):
+ return BACKENDS[self.conf.get("evaluator", "backend")](
+ self.conf
+ ).get_metric_value(metric_name, tags)
- def _evaluate_metric(self,
- alarm: Alarm):
+ def _evaluate_metric(self, alarm: Alarm):
log.debug("_evaluate_metric")
metric_value = self._get_metric_value(alarm.metric, alarm.tags)
if metric_value is None:
log.warning("No metric result for alarm %s", alarm.uuid)
self.queue.put((alarm, AlarmStatus.INSUFFICIENT))
else:
- if alarm.operation.upper() == 'GT':
+ if alarm.operation.upper() == "GT":
if metric_value > alarm.threshold:
self.queue.put((alarm, AlarmStatus.ALARM))
else:
self.queue.put((alarm, AlarmStatus.OK))
- elif alarm.operation.upper() == 'LT':
+ elif alarm.operation.upper() == "LT":
if metric_value < alarm.threshold:
self.queue.put((alarm, AlarmStatus.ALARM))
else:
self.queue.put((alarm, AlarmStatus.OK))
def evaluate_alarms(self) -> List[Tuple[Alarm, AlarmStatus]]:
- log.debug('evaluate_alarms')
+ log.debug("evaluate_alarms")
processes = []
for alarm in self.common_db.get_alarms():
- p = multiprocessing.Process(target=self._evaluate_metric,
- args=(alarm,))
+ p = multiprocessing.Process(target=self._evaluate_metric, args=(alarm,))
processes.append(p)
p.start()
class Server:
-
def __init__(self, config: Config, loop=None):
self.conf = config
if not loop:
self.loop.run_until_complete(self.start())
async def start(self):
- topics = [
- "alarm_request"
- ]
+ topics = ["alarm_request"]
try:
await self.msg_bus.aioread(topics, self._process_msg)
except Exception as e:
if topic == "alarm_request":
if key == "create_alarm_request":
- alarm_details = values['alarm_create_request']
- cor_id = alarm_details['correlation_id']
+ alarm_details = values["alarm_create_request"]
+ cor_id = alarm_details["correlation_id"]
response_builder = ResponseBuilder()
try:
alarm = self.service.create_alarm(
- alarm_details['alarm_name'],
- alarm_details['threshold_value'],
- alarm_details['operation'].lower(),
- alarm_details['severity'].lower(),
- alarm_details['statistic'].lower(),
- alarm_details['metric_name'],
- alarm_details['tags']
+ alarm_details["alarm_name"],
+ alarm_details["threshold_value"],
+ alarm_details["operation"].lower(),
+ alarm_details["severity"].lower(),
+ alarm_details["statistic"].lower(),
+ alarm_details["metric_name"],
+ alarm_details["tags"],
+ )
+ response = response_builder.generate_response(
+ "create_alarm_response",
+ cor_id=cor_id,
+ status=True,
+ alarm_id=alarm.uuid,
)
- response = response_builder.generate_response('create_alarm_response',
- cor_id=cor_id,
- status=True,
- alarm_id=alarm.uuid)
except Exception:
log.exception("Error creating alarm: ")
- response = response_builder.generate_response('create_alarm_response',
- cor_id=cor_id,
- status=False,
- alarm_id=None)
- await self._publish_response('alarm_response_' + str(cor_id), 'create_alarm_response', response)
+ response = response_builder.generate_response(
+ "create_alarm_response",
+ cor_id=cor_id,
+ status=False,
+ alarm_id=None,
+ )
+ await self._publish_response(
+ "alarm_response_" + str(cor_id),
+ "create_alarm_response",
+ response,
+ )
if key == "delete_alarm_request":
- alarm_details = values['alarm_delete_request']
- alarm_uuid = alarm_details['alarm_uuid']
+ alarm_details = values["alarm_delete_request"]
+ alarm_uuid = alarm_details["alarm_uuid"]
response_builder = ResponseBuilder()
- cor_id = alarm_details['correlation_id']
+ cor_id = alarm_details["correlation_id"]
try:
self.service.delete_alarm(alarm_uuid)
- response = response_builder.generate_response('delete_alarm_response',
- cor_id=cor_id,
- status=True,
- alarm_id=alarm_uuid)
+ response = response_builder.generate_response(
+ "delete_alarm_response",
+ cor_id=cor_id,
+ status=True,
+ alarm_id=alarm_uuid,
+ )
except Exception:
log.exception("Error deleting alarm: ")
- response = response_builder.generate_response('delete_alarm_response',
- cor_id=cor_id,
- status=False,
- alarm_id=alarm_uuid)
- await self._publish_response('alarm_response_' + str(cor_id), 'delete_alarm_response', response)
+ response = response_builder.generate_response(
+ "delete_alarm_response",
+ cor_id=cor_id,
+ status=False,
+ alarm_id=alarm_uuid,
+ )
+ await self._publish_response(
+ "alarm_response_" + str(cor_id),
+ "delete_alarm_response",
+ response,
+ )
except Exception:
log.exception("Exception processing message: ")
async def _publish_response(self, topic: str, key: str, msg: dict):
- log.info("Sending response %s to topic %s with key %s", json.dumps(msg), topic, key)
+ log.info(
+ "Sending response %s to topic %s with key %s", json.dumps(msg), topic, key
+ )
await self.msg_bus.aiowrite(topic, key, msg)
class ServerService:
-
def __init__(self, config: Config):
self.common_db = CommonDbClient(config)
- def create_alarm(self,
- name: str,
- threshold: float,
- operation: str,
- severity: str,
- statistic: str,
- metric_name: str,
- tags: dict) -> Alarm:
+ def create_alarm(
+ self,
+ name: str,
+ threshold: float,
+ operation: str,
+ severity: str,
+ statistic: str,
+ metric_name: str,
+ tags: dict,
+ ) -> Alarm:
log.debug("create_alarm")
- alarm = Alarm(name, severity, threshold, operation, statistic, metric_name, tags)
+ alarm = Alarm(
+ name, severity, threshold, operation, statistic, metric_name, tags
+ )
self.common_db.create_alarm(alarm)
log.info("Alarm %s created", alarm.name)
return alarm
- def delete_alarm(self,
- alarm_uuid: str) -> None:
+ def delete_alarm(self, alarm_uuid: str) -> None:
log.debug("delete_alarm")
self.common_db.delete_alarm(alarm_uuid)
log.info("Alarm %s is deleted", alarm_uuid)
@mock.patch.object(OpenstackCollector, "collect")
@mock.patch.object(CommonDbClient, "get_vim_account")
def test_init_vim_collector_and_collect_openstack(self, _get_vim_account, collect):
- _get_vim_account.return_value = {'vim_type': 'openstack'}
+ _get_vim_account.return_value = {"vim_type": "openstack"}
collector = CollectorService(self.config)
- collector._collect_vim_metrics(self.config, {}, 'test_vim_account_id')
+ collector._collect_vim_metrics(self.config, {}, "test_vim_account_id")
collect.assert_called_once_with({})
@mock.patch.object(OpenstackCollector, "collect")
@mock.patch.object(CommonDbClient, "get_vim_account")
- def test_init_vim_collector_and_collect_unknown(self, _get_vim_account, openstack_collect):
- _get_vim_account.return_value = {'vim_type': 'unknown'}
+ def test_init_vim_collector_and_collect_unknown(
+ self, _get_vim_account, openstack_collect
+ ):
+ _get_vim_account.return_value = {"vim_type": "unknown"}
collector = CollectorService(self.config)
- collector._collect_vim_metrics(self.config, {}, 'test_vim_account_id')
+ collector._collect_vim_metrics(self.config, {}, "test_vim_account_id")
openstack_collect.assert_not_called()
@mock.patch.object(OpenstackCollector, "__init__", lambda *args, **kwargs: None)
@mock.patch.object(OpenstackCollector, "collect")
@mock.patch.object(CommonDbClient, "get_vim_account")
- def test_init_vim_collector_and_collect_vio_with_openstack_collector(self, _get_vim_account, openstack_collect):
- _get_vim_account.return_value = {'vim_type': 'openstack', 'config': {'vim_type': 'VIO'}}
+ def test_init_vim_collector_and_collect_vio_with_openstack_collector(
+ self, _get_vim_account, openstack_collect
+ ):
+ _get_vim_account.return_value = {
+ "vim_type": "openstack",
+ "config": {"vim_type": "VIO"},
+ }
collector = CollectorService(self.config)
- collector._collect_vim_metrics(self.config, {}, 'test_vim_account_id')
+ collector._collect_vim_metrics(self.config, {}, "test_vim_account_id")
openstack_collect.assert_called_once_with({})
@mock.patch.object(VIOCollector, "__init__", lambda *args, **kwargs: None)
@mock.patch.object(VIOCollector, "collect")
@mock.patch.object(CommonDbClient, "get_vim_account")
- def test_init_vim_collector_and_collect_vio_with_vrops_collector(self, _get_vim_account, vio_collect):
- _get_vim_account.return_value = {'vim_type': 'openstack',
- 'config': {'vim_type': 'VIO', 'vrops_site': 'https://vrops'}}
+ def test_init_vim_collector_and_collect_vio_with_vrops_collector(
+ self, _get_vim_account, vio_collect
+ ):
+ _get_vim_account.return_value = {
+ "vim_type": "openstack",
+ "config": {"vim_type": "VIO", "vrops_site": "https://vrops"},
+ }
collector = CollectorService(self.config)
- collector._collect_vim_metrics(self.config, {}, 'test_vim_account_id')
+ collector._collect_vim_metrics(self.config, {}, "test_vim_account_id")
vio_collect.assert_called_once_with({})
@mock.patch("osm_mon.collector.service.VCACollector", autospec=True)
from osm_mon.collector.utils.openstack import OpenstackUtils
-@mock.patch('osm_mon.collector.utils.openstack.session')
+@mock.patch("osm_mon.collector.utils.openstack.session")
class OpenstackUtilsTest(TestCase):
-
def setUp(self):
super().setUp()
def test_session_without_insecure(self, mock_session):
creds = {
- 'config': {
- },
- 'vim_url': 'url',
- 'vim_user': 'user',
- 'vim_password': 'password',
- 'vim_tenant_name': 'tenant_name'
+ "config": {},
+ "vim_url": "url",
+ "vim_user": "user",
+ "vim_password": "password",
+ "vim_tenant_name": "tenant_name",
}
OpenstackUtils.get_session(creds)
mock_session.Session.assert_called_once_with(
- auth=mock.ANY, verify=True, timeout=10)
+ auth=mock.ANY, verify=True, timeout=10
+ )
def test_session_with_insecure(self, mock_session):
creds = {
- 'config': {
- 'insecure': True
- },
- 'vim_url': 'url',
- 'vim_user': 'user',
- 'vim_password': 'password',
- 'vim_tenant_name': 'tenant_name'
+ "config": {"insecure": True},
+ "vim_url": "url",
+ "vim_user": "user",
+ "vim_password": "password",
+ "vim_tenant_name": "tenant_name",
}
OpenstackUtils.get_session(creds)
mock_session.Session.assert_called_once_with(
- auth=mock.ANY, verify=False, timeout=10)
+ auth=mock.ANY, verify=False, timeout=10
+ )
def test_session_with_insecure_false(self, mock_session):
creds = {
- 'config': {
- 'insecure': False
- },
- 'vim_url': 'url',
- 'vim_user': 'user',
- 'vim_password': 'password',
- 'vim_tenant_name': 'tenant_name'
+ "config": {"insecure": False},
+ "vim_url": "url",
+ "vim_user": "user",
+ "vim_password": "password",
+ "vim_tenant_name": "tenant_name",
}
OpenstackUtils.get_session(creds)
mock_session.Session.assert_called_once_with(
- auth=mock.ANY, verify=True, timeout=10)
+ auth=mock.ANY, verify=True, timeout=10
+ )
def tearDown(self):
super().tearDown()
- @mock.patch.object(GnocchiBackend, '_build_neutron_client')
- @mock.patch.object(GnocchiBackend, '_build_gnocchi_client')
+ @mock.patch.object(GnocchiBackend, "_build_neutron_client")
+ @mock.patch.object(GnocchiBackend, "_build_gnocchi_client")
def test_collect_gnocchi_rate_instance(self, build_gnocchi_client, _):
mock_gnocchi_client = mock.Mock()
mock_gnocchi_client.metric = mock.Mock()
- mock_gnocchi_client.metric.get_measures.return_value = [(datetime.datetime(2019, 4, 12, 15, 43,
- tzinfo=datetime.timezone(
- datetime.timedelta(0),
- '+00:00')), 60.0, 0.0345442539),
- (datetime.datetime(2019, 4, 12, 15, 44,
- tzinfo=datetime.timezone(
- datetime.timedelta(0),
- '+00:00')), 60.0, 600000000)]
+ mock_gnocchi_client.metric.get_measures.return_value = [
+ (
+ datetime.datetime(
+ 2019,
+ 4,
+ 12,
+ 15,
+ 43,
+ tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+ ),
+ 60.0,
+ 0.0345442539,
+ ),
+ (
+ datetime.datetime(
+ 2019,
+ 4,
+ 12,
+ 15,
+ 44,
+ tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+ ),
+ 60.0,
+ 600000000,
+ ),
+ ]
build_gnocchi_client.return_value = mock_gnocchi_client
- backend = GnocchiBackend({'_id': 'test_uuid'})
- value = backend._collect_instance_metric('cpu', 'test_resource_id')
+ backend = GnocchiBackend({"_id": "test_uuid"})
+ value = backend._collect_instance_metric("cpu", "test_resource_id")
self.assertEqual(value, 1.0)
- mock_gnocchi_client.metric.get_measures.assert_called_once_with('cpu',
- aggregation="rate:mean",
- start=mock.ANY,
- resource_id='test_resource_id')
-
- @mock.patch.object(GnocchiBackend, '_build_neutron_client')
- @mock.patch.object(GnocchiBackend, '_build_gnocchi_client')
+ mock_gnocchi_client.metric.get_measures.assert_called_once_with(
+ "cpu",
+ aggregation="rate:mean",
+ start=mock.ANY,
+ resource_id="test_resource_id",
+ )
+
+ @mock.patch.object(GnocchiBackend, "_build_neutron_client")
+ @mock.patch.object(GnocchiBackend, "_build_gnocchi_client")
def test_collect_gnocchi_non_rate_instance(self, build_gnocchi_client, _):
mock_gnocchi_client = mock.Mock()
- mock_gnocchi_client.metric.get_measures.return_value = [(datetime.datetime(2019, 4, 12, 15, 43,
- tzinfo=datetime.timezone(
- datetime.timedelta(0),
- '+00:00')), 60.0, 0.0345442539),
- (datetime.datetime(2019, 4, 12, 15, 44,
- tzinfo=datetime.timezone(
- datetime.timedelta(0),
- '+00:00')), 60.0, 128)]
+ mock_gnocchi_client.metric.get_measures.return_value = [
+ (
+ datetime.datetime(
+ 2019,
+ 4,
+ 12,
+ 15,
+ 43,
+ tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+ ),
+ 60.0,
+ 0.0345442539,
+ ),
+ (
+ datetime.datetime(
+ 2019,
+ 4,
+ 12,
+ 15,
+ 44,
+ tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+ ),
+ 60.0,
+ 128,
+ ),
+ ]
build_gnocchi_client.return_value = mock_gnocchi_client
- backend = GnocchiBackend({'_id': 'test_uuid'})
- value = backend._collect_instance_metric('memory.usage', 'test_resource_id')
+ backend = GnocchiBackend({"_id": "test_uuid"})
+ value = backend._collect_instance_metric("memory.usage", "test_resource_id")
self.assertEqual(value, 128)
- mock_gnocchi_client.metric.get_measures.assert_called_once_with('memory.usage',
- aggregation=None,
- start=mock.ANY,
- resource_id='test_resource_id')
-
- @mock.patch.object(GnocchiBackend, '_build_neutron_client')
- @mock.patch.object(GnocchiBackend, '_build_gnocchi_client')
+ mock_gnocchi_client.metric.get_measures.assert_called_once_with(
+ "memory.usage",
+ aggregation=None,
+ start=mock.ANY,
+ resource_id="test_resource_id",
+ )
+
+ @mock.patch.object(GnocchiBackend, "_build_neutron_client")
+ @mock.patch.object(GnocchiBackend, "_build_gnocchi_client")
def test_collect_gnocchi_no_metric(self, build_gnocchi_client, _):
mock_gnocchi_client = mock.Mock()
- mock_gnocchi_client.metric.get_measures.side_effect = gnocchiclient.exceptions.NotFound()
+ mock_gnocchi_client.metric.get_measures.side_effect = (
+ gnocchiclient.exceptions.NotFound()
+ )
build_gnocchi_client.return_value = mock_gnocchi_client
- backend = GnocchiBackend({'_id': 'test_uuid'})
- value = backend._collect_instance_metric('memory.usage', 'test_resource_id')
+ backend = GnocchiBackend({"_id": "test_uuid"})
+ value = backend._collect_instance_metric("memory.usage", "test_resource_id")
self.assertIsNone(value)
- mock_gnocchi_client.metric.get_measures.assert_called_once_with('memory.usage',
- aggregation=None,
- start=mock.ANY,
- resource_id='test_resource_id')
-
- @mock.patch.object(GnocchiBackend, '_build_neutron_client')
- @mock.patch.object(GnocchiBackend, '_build_gnocchi_client')
- def test_collect_interface_all_metric(self, build_gnocchi_client, build_neutron_client):
+ mock_gnocchi_client.metric.get_measures.assert_called_once_with(
+ "memory.usage",
+ aggregation=None,
+ start=mock.ANY,
+ resource_id="test_resource_id",
+ )
+
+ @mock.patch.object(GnocchiBackend, "_build_neutron_client")
+ @mock.patch.object(GnocchiBackend, "_build_gnocchi_client")
+ def test_collect_interface_all_metric(
+ self, build_gnocchi_client, build_neutron_client
+ ):
mock_gnocchi_client = mock.Mock()
- mock_gnocchi_client.resource.search.return_value = [{'id': 'test_id_1'}, {'id': 'test_id_2'}]
- mock_gnocchi_client.metric.get_measures.return_value = [(datetime.datetime(2019, 4, 12, 15, 43,
- tzinfo=datetime.timezone(
- datetime.timedelta(0),
- '+00:00')), 60.0, 0.0345442539),
- (datetime.datetime(2019, 4, 12, 15, 44,
- tzinfo=datetime.timezone(
- datetime.timedelta(0),
- '+00:00')), 60.0, 0.0333070363)]
+ mock_gnocchi_client.resource.search.return_value = [
+ {"id": "test_id_1"},
+ {"id": "test_id_2"},
+ ]
+ mock_gnocchi_client.metric.get_measures.return_value = [
+ (
+ datetime.datetime(
+ 2019,
+ 4,
+ 12,
+ 15,
+ 43,
+ tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+ ),
+ 60.0,
+ 0.0345442539,
+ ),
+ (
+ datetime.datetime(
+ 2019,
+ 4,
+ 12,
+ 15,
+ 44,
+ tzinfo=datetime.timezone(datetime.timedelta(0), "+00:00"),
+ ),
+ 60.0,
+ 0.0333070363,
+ ),
+ ]
build_gnocchi_client.return_value = mock_gnocchi_client
- backend = GnocchiBackend({'_id': 'test_uuid'})
- value = backend._collect_interface_all_metric('packets_received', 'test_resource_id')
+ backend = GnocchiBackend({"_id": "test_uuid"})
+ value = backend._collect_interface_all_metric(
+ "packets_received", "test_resource_id"
+ )
self.assertEqual(value, 0.0666140726)
- mock_gnocchi_client.metric.get_measures.assert_any_call('packets_received', resource_id='test_id_1',
- limit=1)
- mock_gnocchi_client.metric.get_measures.assert_any_call('packets_received', resource_id='test_id_2',
- limit=1)
+ mock_gnocchi_client.metric.get_measures.assert_any_call(
+ "packets_received", resource_id="test_id_1", limit=1
+ )
+ mock_gnocchi_client.metric.get_measures.assert_any_call(
+ "packets_received", resource_id="test_id_2", limit=1
+ )
import re
-def mock_http_response(mocker, method='GET', site='https://vrops', url_pattern='', response_file='OK',
- status_code=200, exception=None):
- '''Helper function to load a canned response from a file.'''
- with open(os.path.join(os.path.dirname(__file__), 'vmware_mocks',
- '%s' % response_file), 'r') as f:
+def mock_http_response(
+ mocker,
+ method="GET",
+ site="https://vrops",
+ url_pattern="",
+ response_file="OK",
+ status_code=200,
+ exception=None,
+):
+ """Helper function to load a canned response from a file."""
+ with open(
+ os.path.join(os.path.dirname(__file__), "vmware_mocks", "%s" % response_file),
+ "r",
+ ) as f:
response = f.read()
matcher = re.compile(site + url_pattern)
from osm_mon.collector.vnf_collectors.vmware import VMwareCollector
from osm_mon.core.config import Config
-from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import mock_http_response
+from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import (
+ mock_http_response,
+)
from unittest import TestCase, mock
import json
import os
import requests_mock
-VIM_ACCOUNT = {"vrops_site": "https://vrops",
- "vrops_user": "",
- "vrops_password": "",
- "vim_url": "https://vcd",
- "admin_username": "",
- "admin_password": "",
- "vim_uuid": ""}
+VIM_ACCOUNT = {
+ "vrops_site": "https://vrops",
+ "vrops_user": "",
+ "vrops_password": "",
+ "vim_url": "https://vcd",
+ "admin_username": "",
+ "admin_password": "",
+ "vim_uuid": "",
+}
-@mock.patch.object(VMwareCollector, 'get_vm_moref_id',
- spec_set=True, autospec=True)
+@mock.patch.object(VMwareCollector, "get_vm_moref_id", spec_set=True, autospec=True)
class CollectorTest(TestCase):
-
- @mock.patch.object(VMwareCollector, 'get_vim_account',
- spec_set=True, autospec=True)
- @mock.patch('osm_mon.collector.vnf_collectors.vmware.CommonDbClient')
+ @mock.patch.object(VMwareCollector, "get_vim_account", spec_set=True, autospec=True)
+ @mock.patch("osm_mon.collector.vnf_collectors.vmware.CommonDbClient")
def setUp(self, mock_db, mock_get_vim_account):
super().setUp()
mock_get_vim_account.return_value = VIM_ACCOUNT
- self.collector = VMwareCollector(Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4")
+ self.collector = VMwareCollector(
+ Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4"
+ )
self.mock_db = mock_db
- with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFR.json'), 'r') as f:
+ with open(
+ os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFR.json"), "r"
+ ) as f:
self.vnfr = json.load(f)
- with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFD.json'), 'r') as f:
+ with open(
+ os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFD.json"), "r"
+ ) as f:
self.vnfd = json.load(f)
def tearDown(self):
def test_collect_cpu_and_memory(self, mock_vm_moref_id):
mock_vm_moref_id.return_value = "VMWARE-OID-VM-1"
- self.vnfd['vdu'][0]['monitoring-parameter'] = [
+ self.vnfd["vdu"][0]["monitoring-parameter"] = [
{"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
- {"id": "ubuntu_vnf_average_memory_utilization", "performance-metric": "average_memory_utilization"}
- ]
+ {
+ "id": "ubuntu_vnf_average_memory_utilization",
+ "performance-metric": "average_memory_utilization",
+ },
+ ]
self.mock_db.return_value.get_vnfd.return_value = self.vnfd
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='vrops_resources.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="vrops_resources.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.collector.collect(self.vnfr)
self.assertEqual(len(metrics), 2, "Number of metrics returned")
self.assertEqual(metrics[0].name, "cpu_utilization", "First metric name")
self.assertEqual(metrics[0].value, 100.0, "CPU metric value")
- self.assertEqual(metrics[1].name, "average_memory_utilization", "Second metric name")
+ self.assertEqual(
+ metrics[1].name, "average_memory_utilization", "Second metric name"
+ )
self.assertEqual(metrics[1].value, 20.515941619873047, "Memory metric value")
def test_collect_no_moref(self, mock_vm_moref_id):
mock_vm_moref_id.return_value = None
self.mock_db.return_value.get_vnfd.return_value = self.vnfd
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='404.txt', status_code=404)
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="404.txt",
+ status_code=404,
+ )
metrics = self.collector.collect(self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_no_monitoring_param(self, _):
- self.vnfd['vdu'][0]['monitoring-parameter'] = []
+ self.vnfd["vdu"][0]["monitoring-parameter"] = []
self.mock_db.return_value.get_vnfd.return_value = self.vnfd
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='vrops_resources.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="vrops_resources.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.collector.collect(self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_empty_monitoring_param(self, _):
- del self.vnfd['vdu'][0]['monitoring-parameter']
+ del self.vnfd["vdu"][0]["monitoring-parameter"]
self.mock_db.return_value.get_vnfd.return_value = self.vnfd
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='vrops_resources.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="vrops_resources.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.collector.collect(self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_no_name(self, _):
- del self.vnfr['vdur'][0]['name']
- del self.vnfr['vdur'][1]['name']
+ del self.vnfr["vdur"][0]["name"]
+ del self.vnfr["vdur"][1]["name"]
self.mock_db.return_value.get_vnfd.return_value = self.vnfd
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='vrops_resources.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="vrops_resources.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.collector.collect(self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
class VApp_Details_Test(TestCase):
-
- @mock.patch.object(VMwareCollector, 'get_vim_account',
- spec_set=True, autospec=True)
- @mock.patch('osm_mon.collector.vnf_collectors.vmware.CommonDbClient')
+ @mock.patch.object(VMwareCollector, "get_vim_account", spec_set=True, autospec=True)
+ @mock.patch("osm_mon.collector.vnf_collectors.vmware.CommonDbClient")
def setUp(self, mock_db, mock_get_vim_account):
super().setUp()
self.mock_db = mock_db
mock_get_vim_account.return_value = VIM_ACCOUNT
- self.collector = VMwareCollector(Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4")
+ self.collector = VMwareCollector(
+ Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4"
+ )
def tearDown(self):
super().tearDown()
- @mock.patch('osm_mon.collector.vnf_collectors.vmware.Client')
+ @mock.patch("osm_mon.collector.vnf_collectors.vmware.Client")
def test_get_vapp_details(self, mock_vcd_client):
- mock_vcd_client.return_value._session.headers = {'x-vcloud-authorization': ''}
+ mock_vcd_client.return_value._session.headers = {"x-vcloud-authorization": ""}
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests,
- site='https://vcd',
- url_pattern='/api/vApp/.*',
- response_file='vcd_vapp_response.xml')
- response = self.collector.get_vapp_details_rest('')
- self.assertDictContainsSubset({'vm_vcenter_info': {'vm_moref_id': 'vm-4055'}},
- response, 'Managed object reference id incorrect')
+ mock_http_response(
+ mock_requests,
+ site="https://vcd",
+ url_pattern="/api/vApp/.*",
+ response_file="vcd_vapp_response.xml",
+ )
+ response = self.collector.get_vapp_details_rest("")
+ self.assertDictContainsSubset(
+ {"vm_vcenter_info": {"vm_moref_id": "vm-4055"}},
+ response,
+ "Managed object reference id incorrect",
+ )
def test_no_admin_connect(self):
- response = self.collector.get_vapp_details_rest('')
- self.assertDictEqual(response, {}, 'Failed to connect should return empty dictionary')
+ response = self.collector.get_vapp_details_rest("")
+ self.assertDictEqual(
+ response, {}, "Failed to connect should return empty dictionary"
+ )
def test_no_id(self):
response = self.collector.get_vapp_details_rest()
- self.assertDictEqual(response, {}, 'No id supplied should return empty dictionary')
+ self.assertDictEqual(
+ response, {}, "No id supplied should return empty dictionary"
+ )
- @mock.patch('osm_mon.collector.vnf_collectors.vmware.Client')
+ @mock.patch("osm_mon.collector.vnf_collectors.vmware.Client")
def test_get_vapp_details_404(self, mock_vcd_client):
- mock_vcd_client.return_value._session.headers = {'x-vcloud-authorization': ''}
+ mock_vcd_client.return_value._session.headers = {"x-vcloud-authorization": ""}
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests,
- site='https://vcd',
- url_pattern='/api/vApp/.*',
- response_file='404.txt', status_code=404)
- response = self.collector.get_vapp_details_rest('')
- self.assertDictEqual(response, {}, 'HTTP error should return empty dictionary')
-
- @mock.patch('osm_mon.collector.vnf_collectors.vmware.Client')
+ mock_http_response(
+ mock_requests,
+ site="https://vcd",
+ url_pattern="/api/vApp/.*",
+ response_file="404.txt",
+ status_code=404,
+ )
+ response = self.collector.get_vapp_details_rest("")
+ self.assertDictEqual(response, {}, "HTTP error should return empty dictionary")
+
+ @mock.patch("osm_mon.collector.vnf_collectors.vmware.Client")
def test_get_vapp_details_xml_parse_error(self, mock_vcd_client):
- mock_vcd_client.return_value._session.headers = {'x-vcloud-authorization': ''}
+ mock_vcd_client.return_value._session.headers = {"x-vcloud-authorization": ""}
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests,
- site='https://vcd',
- url_pattern='/api/vApp/.*',
- response_file='404.txt')
- response = self.collector.get_vapp_details_rest('')
- self.assertDictEqual(response, {}, 'XML parse error should return empty dictionary')
+ mock_http_response(
+ mock_requests,
+ site="https://vcd",
+ url_pattern="/api/vApp/.*",
+ response_file="404.txt",
+ )
+ response = self.collector.get_vapp_details_rest("")
+ self.assertDictEqual(
+ response, {}, "XML parse error should return empty dictionary"
+ )
class Get_VM_Moref_Test(TestCase):
-
- @mock.patch.object(VMwareCollector, 'get_vim_account',
- spec_set=True, autospec=True)
- @mock.patch('osm_mon.collector.vnf_collectors.vmware.CommonDbClient')
+ @mock.patch.object(VMwareCollector, "get_vim_account", spec_set=True, autospec=True)
+ @mock.patch("osm_mon.collector.vnf_collectors.vmware.CommonDbClient")
def setUp(self, mock_db, mock_get_vim_account):
super().setUp()
self.mock_db = mock_db
mock_get_vim_account.return_value = VIM_ACCOUNT
- self.collector = VMwareCollector(Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4")
+ self.collector = VMwareCollector(
+ Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4"
+ )
def tearDown(self):
super().tearDown()
- @mock.patch.object(VMwareCollector, 'get_vapp_details_rest',
- spec_set=True, autospec=True)
+ @mock.patch.object(
+ VMwareCollector, "get_vapp_details_rest", spec_set=True, autospec=True
+ )
def test_get_vm_moref_id(self, mock_vapp_details):
- mock_vapp_details.return_value = {'vm_vcenter_info': {'vm_moref_id': 'vm-4055'}}
- response = self.collector.get_vm_moref_id('1234')
- self.assertEqual(response, 'vm-4055', 'Did not fetch correct ref id from dictionary')
+ mock_vapp_details.return_value = {"vm_vcenter_info": {"vm_moref_id": "vm-4055"}}
+ response = self.collector.get_vm_moref_id("1234")
+ self.assertEqual(
+ response, "vm-4055", "Did not fetch correct ref id from dictionary"
+ )
- @mock.patch.object(VMwareCollector, 'get_vapp_details_rest',
- spec_set=True, autospec=True)
+ @mock.patch.object(
+ VMwareCollector, "get_vapp_details_rest", spec_set=True, autospec=True
+ )
def test_get_vm_moref_bad_content(self, mock_vapp_details):
mock_vapp_details.return_value = {}
- response = self.collector.get_vm_moref_id('1234')
- self.assertEqual(response, None, 'Error fetching vapp details should return None')
+ response = self.collector.get_vm_moref_id("1234")
+ self.assertEqual(
+ response, None, "Error fetching vapp details should return None"
+ )
- @mock.patch.object(VMwareCollector, 'get_vapp_details_rest',
- spec_set=True, autospec=True)
+ @mock.patch.object(
+ VMwareCollector, "get_vapp_details_rest", spec_set=True, autospec=True
+ )
def test_get_vm_moref_has_exception(self, mock_vapp_details):
- mock_vapp_details.side_effect = Exception('Testing')
- response = self.collector.get_vm_moref_id('1234')
- self.assertEqual(response, None, 'Exception while fetching vapp details should return None')
+ mock_vapp_details.side_effect = Exception("Testing")
+ response = self.collector.get_vm_moref_id("1234")
+ self.assertEqual(
+ response, None, "Exception while fetching vapp details should return None"
+ )
from osm_mon.collector.vnf_collectors.vio import VIOCollector
from osm_mon.core.config import Config
-from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import mock_http_response
+from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import (
+ mock_http_response,
+)
from unittest import TestCase, mock
import json
import os
import requests_mock
-VIM_ACCOUNT = {"vrops_site": "https://vrops",
- "vrops_user": "",
- "vrops_password": "",
- "vim_url": "",
- "admin_username": "",
- "admin_password": "",
- "vim_uuid": ""}
+VIM_ACCOUNT = {
+ "vrops_site": "https://vrops",
+ "vrops_user": "",
+ "vrops_password": "",
+ "vim_url": "",
+ "admin_username": "",
+ "admin_password": "",
+ "vim_uuid": "",
+}
class CollectorTest(TestCase):
-
- @mock.patch.object(VIOCollector, 'get_vim_account',
- spec_set=True, autospec=True)
- @mock.patch('osm_mon.collector.vnf_collectors.vio.CommonDbClient')
+ @mock.patch.object(VIOCollector, "get_vim_account", spec_set=True, autospec=True)
+ @mock.patch("osm_mon.collector.vnf_collectors.vio.CommonDbClient")
def setUp(self, mock_db, mock_get_vim_account):
super().setUp()
self.mock_db = mock_db
mock_get_vim_account.return_value = VIM_ACCOUNT
self.collector = VIOCollector(Config(), "9de6df67-b820-48c3-bcae-ee4838c5c5f4")
- with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFR.json'), 'r') as f:
+ with open(
+ os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFR.json"), "r"
+ ) as f:
self.vnfr = json.load(f)
- with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFD.json'), 'r') as f:
+ with open(
+ os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFD.json"), "r"
+ ) as f:
self.vnfd = json.load(f)
def tearDown(self):
super().tearDown()
def test_collect_cpu_and_memory(self):
- self.vnfd['vdu'][0]['monitoring-parameter'] = [
+ self.vnfd["vdu"][0]["monitoring-parameter"] = [
{"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
- {"id": "ubuntu_vnf_average_memory_utilization", "performance-metric": "average_memory_utilization"}
- ]
+ {
+ "id": "ubuntu_vnf_average_memory_utilization",
+ "performance-metric": "average_memory_utilization",
+ },
+ ]
self.mock_db.return_value.get_vnfd.return_value = self.vnfd
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='vrops_resources.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="vrops_resources.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.collector.collect(self.vnfr)
self.assertEqual(len(metrics), 4, "Number of metrics returned")
self.assertEqual(metrics[0].name, "cpu_utilization", "First metric name")
self.assertEqual(metrics[0].value, 100.0, "CPU metric value")
- self.assertEqual(metrics[1].name, "average_memory_utilization", "Second metric name")
- self.assertEqual(metrics[1].value, 20.515941619873047, "Memory metric value")
+ self.assertEqual(
+ metrics[1].name, "average_memory_utilization", "Second metric name"
+ )
+ self.assertEqual(
+ metrics[1].value, 20.515941619873047, "Memory metric value"
+ )
self.assertEqual(metrics[2].name, "cpu_utilization", "First metric name")
self.assertEqual(metrics[2].value, 0.05400000140070915, "CPU metric value")
- self.assertEqual(metrics[3].name, "average_memory_utilization", "Second metric name")
+ self.assertEqual(
+ metrics[3].name, "average_memory_utilization", "Second metric name"
+ )
self.assertEqual(metrics[3].value, 15.23439884185791, "Memory metric value")
def test_collect_no_monitoring_param(self):
- self.vnfd['vdu'][0]['monitoring-parameter'] = []
+ self.vnfd["vdu"][0]["monitoring-parameter"] = []
self.mock_db.return_value.get_vnfd.return_value = self.vnfd
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='vrops_resources.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="vrops_resources.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.collector.collect(self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_empty_monitoring_param(self):
- del self.vnfd['vdu'][0]['monitoring-parameter']
+ del self.vnfd["vdu"][0]["monitoring-parameter"]
self.mock_db.return_value.get_vnfd.return_value = self.vnfd
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='vrops_resources.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="vrops_resources.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.collector.collect(self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_no_name(self):
- del self.vnfr['vdur'][0]['name']
- del self.vnfr['vdur'][1]['name']
+ del self.vnfr["vdur"][0]["name"]
+ del self.vnfr["vdur"][1]["name"]
self.mock_db.return_value.get_vnfd.return_value = self.vnfd
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='vrops_resources.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="vrops_resources.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.collector.collect(self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
# #
from osm_mon.collector.vnf_collectors.vrops.vrops_helper import vROPS_Helper
-from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import mock_http_response
+from osm_mon.tests.unit.collector.vnf_collectors.vmware.mock_http import (
+ mock_http_response,
+)
from unittest import TestCase
import json
class vROPS_Helper_Resource_List_Test(TestCase):
-
def setUp(self):
super().setUp()
self.vrops = vROPS_Helper()
def test_get_vm_resource_list_from_vrops(self):
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='vrops_resources.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="vrops_resources.json",
+ )
resource_list = self.vrops.get_vm_resource_list_from_vrops()
- self.assertEqual(len(resource_list), 3, "List of resources from canned vrops_resources.json")
+ self.assertEqual(
+ len(resource_list),
+ 3,
+ "List of resources from canned vrops_resources.json",
+ )
def test_get_vm_resource_list_from_vrops_http_404(self):
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='404.txt', status_code=404)
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="404.txt",
+ status_code=404,
+ )
resource_list = self.vrops.get_vm_resource_list_from_vrops()
self.assertEqual(len(resource_list), 0, "Should return an empty list")
def test_get_vm_resource_list_from_vrops_bad_json(self):
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources\\?resourceKind=VirtualMachine',
- response_file='malformed.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources\\?resourceKind=VirtualMachine",
+ response_file="malformed.json",
+ )
resource_list = self.vrops.get_vm_resource_list_from_vrops()
self.assertEqual(len(resource_list), 0, "Should return an empty list")
class vROPS_Helper_Get_Metrics_Test(TestCase):
-
def setUp(self):
super().setUp()
self.vrops = vROPS_Helper()
- with open(os.path.join(os.path.dirname(__file__), 'osm_mocks', 'VNFR.json'), 'r') as f:
+ with open(
+ os.path.join(os.path.dirname(__file__), "osm_mocks", "VNFR.json"), "r"
+ ) as f:
self.vnfr = json.load(f)
def tearDown(self):
super().tearDown()
def test_collect_one_metric_only(self):
- vdu_mappings = {'VMWARE-OID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
- 'vrops_id': 'VROPS-UUID-1'}}
+ vdu_mappings = {
+ "VMWARE-OID-VM-1": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-1",
+ }
+ }
monitoring_params = [
{"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
- ]
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
self.assertEqual(len(metrics), 1, "Number of metrics returned")
self.assertEqual(metrics[0].value, 100.0, "CPU metric value")
def test_collect_cpu_and_memory(self):
- vdu_mappings = {'VMWARE-OID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
- 'vrops_id': 'VROPS-UUID-1'}}
+ vdu_mappings = {
+ "VMWARE-OID-VM-1": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-1",
+ }
+ }
monitoring_params = [
{"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
- {"id": "ubuntu_vnf_average_memory_utilization", "performance-metric": "average_memory_utilization"}
- ]
+ {
+ "id": "ubuntu_vnf_average_memory_utilization",
+ "performance-metric": "average_memory_utilization",
+ },
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
self.assertEqual(len(metrics), 2, "Number of metrics returned")
self.assertEqual(metrics[0].name, "cpu_utilization", "First metric name")
self.assertEqual(metrics[0].value, 100.0, "CPU metric value")
- self.assertEqual(metrics[1].name, "average_memory_utilization", "Second metric name")
+ self.assertEqual(
+ metrics[1].name, "average_memory_utilization", "Second metric name"
+ )
self.assertEqual(metrics[1].value, 20.515941619873047, "Memory metric value")
def test_collect_adjusted_metric(self):
- vdu_mappings = {'VMWARE-OID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
- 'vrops_id': 'VROPS-UUID-1'}}
+ vdu_mappings = {
+ "VMWARE-OID-VM-1": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-1",
+ }
+ }
monitoring_params = [
- {'id': 'ubuntu_vnf_cpu_util', 'performance-metric': 'disk_read_bytes'}
- ]
+ {"id": "ubuntu_vnf_cpu_util", "performance-metric": "disk_read_bytes"}
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
self.assertEqual(metrics[0].value, 10240.0, "Disk read bytes (not KB/s)")
def test_collect_not_provided_metric(self):
- vdu_mappings = {'VMWARE-OID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
- 'vrops_id': 'VROPS-UUID-1'}}
+ vdu_mappings = {
+ "VMWARE-OID-VM-1": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-1",
+ }
+ }
monitoring_params = [
- {'id': 'cirros_vnf_packets_sent', 'performance-metric': 'packets_in_dropped'},
- ]
+ {
+ "id": "cirros_vnf_packets_sent",
+ "performance-metric": "packets_in_dropped",
+ },
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_unkown_metric(self):
- vdu_mappings = {'VMWARE-OID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
- 'vrops_id': 'VROPS-UUID-1'}}
+ vdu_mappings = {
+ "VMWARE-OID-VM-1": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-1",
+ }
+ }
monitoring_params = [
- {'id': 'cirros_vnf-Unknown_Metric', 'performance-metric': 'unknown'},
- ]
+ {"id": "cirros_vnf-Unknown_Metric", "performance-metric": "unknown"},
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_vrops_no_data(self):
- vdu_mappings = {'VMWARE-OID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
- 'vrops_id': 'VROPS-UUID-1'}}
+ vdu_mappings = {
+ "VMWARE-OID-VM-1": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-1",
+ }
+ }
monitoring_params = [
{"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
- ]
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='OK.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="OK.json",
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_vrops_unknown_vim_id(self):
- vdu_mappings = {'VMWARE-OID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2'}}
+ vdu_mappings = {
+ "VMWARE-OID-VM-1": {"name": "vmware-scaling-1-ubuntu_vnfd-VM-2"}
+ }
monitoring_params = [
{"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
- ]
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_vrops_http_error(self):
- vdu_mappings = {'VMWARE-OID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
- 'vrops_id': 'VROPS-UUID-1'}}
+ vdu_mappings = {
+ "VMWARE-OID-VM-1": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-1",
+ }
+ }
monitoring_params = [
{"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
- ]
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='404.txt', status_code=404)
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="404.txt",
+ status_code=404,
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_vrops_json_parse_error(self):
- vdu_mappings = {'VMWARE-OID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2',
- 'vrops_id': 'VROPS-UUID-1'}}
+ vdu_mappings = {
+ "VMWARE-OID-VM-1": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-1",
+ }
+ }
monitoring_params = [
{"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
- ]
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='404.txt')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="404.txt",
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
self.assertEqual(len(metrics), 0, "Number of metrics returned")
def test_collect_multi_vdu(self):
- vdu_mappings = {'VMWARE-UUID-VM-1':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-1', 'vrops_id': 'VROPS-UUID-1'},
- 'VMWARE-UUID-VM-2':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2', 'vrops_id': 'VROPS-UUID-2'},
- 'VMWARE-UUID-VM-3':
- {'name': 'vmware-scaling-1-ubuntu_vnfd-VM-2', 'vrops_id': 'VROPS-UUID-3'}
- }
+ vdu_mappings = {
+ "VMWARE-UUID-VM-1": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-1",
+ "vrops_id": "VROPS-UUID-1",
+ },
+ "VMWARE-UUID-VM-2": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-2",
+ },
+ "VMWARE-UUID-VM-3": {
+ "name": "vmware-scaling-1-ubuntu_vnfd-VM-2",
+ "vrops_id": "VROPS-UUID-3",
+ },
+ }
monitoring_params = [
- {'id': 'ubuntu_vnf_cpu_util', 'performance-metric': 'cpu_utilization'},
- {'id': 'ubuntu_vnf_average_memory_utilization', 'performance-metric': 'average_memory_utilization'},
- {'id': 'ubuntu_vnf_disk_read_ops', 'performance-metric': 'disk_read_ops'},
- {'id': 'ubuntu_vnf_disk_write_ops', 'performance-metric': 'disk_write_ops'},
- {'id': 'ubuntu_vnf_disk_read_bytes', 'performance-metric': 'disk_read_bytes'},
- {'id': 'ubuntu_vnf_disk_write_bytes', 'performance-metric': 'disk_write_bytes'},
- {'id': 'ubuntu_vnf_packets_out_dropped', 'performance-metric': 'packets_out_dropped'},
- {'id': 'ubuntu_vnf_packets_received', 'performance-metric': 'packets_received'},
- {'id': 'ubuntu_vnf_packets_sent', 'performance-metric': 'packets_sent'}
- ]
+ {"id": "ubuntu_vnf_cpu_util", "performance-metric": "cpu_utilization"},
+ {
+ "id": "ubuntu_vnf_average_memory_utilization",
+ "performance-metric": "average_memory_utilization",
+ },
+ {"id": "ubuntu_vnf_disk_read_ops", "performance-metric": "disk_read_ops"},
+ {"id": "ubuntu_vnf_disk_write_ops", "performance-metric": "disk_write_ops"},
+ {
+ "id": "ubuntu_vnf_disk_read_bytes",
+ "performance-metric": "disk_read_bytes",
+ },
+ {
+ "id": "ubuntu_vnf_disk_write_bytes",
+ "performance-metric": "disk_write_bytes",
+ },
+ {
+ "id": "ubuntu_vnf_packets_out_dropped",
+ "performance-metric": "packets_out_dropped",
+ },
+ {
+ "id": "ubuntu_vnf_packets_received",
+ "performance-metric": "packets_received",
+ },
+ {"id": "ubuntu_vnf_packets_sent", "performance-metric": "packets_sent"},
+ ]
with requests_mock.Mocker() as mock_requests:
- mock_http_response(mock_requests, method='POST',
- url_pattern='/suite-api/api/auth/token/acquire',
- response_file='vrops_token.json')
- mock_http_response(mock_requests,
- url_pattern='/suite-api/api/resources/stats.*',
- response_file='vrops_multi.json')
+ mock_http_response(
+ mock_requests,
+ method="POST",
+ url_pattern="/suite-api/api/auth/token/acquire",
+ response_file="vrops_token.json",
+ )
+ mock_http_response(
+ mock_requests,
+ url_pattern="/suite-api/api/resources/stats.*",
+ response_file="vrops_multi.json",
+ )
metrics = self.vrops.get_metrics(vdu_mappings, monitoring_params, self.vnfr)
- self.assertEqual(len(metrics), len(monitoring_params) * len(vdu_mappings), "Number of metrics returned")
+ self.assertEqual(
+ len(metrics),
+ len(monitoring_params) * len(vdu_mappings),
+ "Number of metrics returned",
+ )
@mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
@mock.patch.object(CommonDbClient, "get_vnfr")
def test_get_vim_id(self, get_vnfr):
- get_vnfr.return_value = {'_id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
- '_admin': {
- 'projects_read': ['admin'], 'created': 1526044312.102287,
- 'modified': 1526044312.102287, 'projects_write': ['admin']
- },
- 'vim-account-id': 'c1740601-7287-48c8-a2c9-bce8fee459eb',
- 'nsr-id-ref': '5ec3f571-d540-4cb0-9992-971d1b08312e',
- 'vdur': [
- {
- 'internal-connection-point': [],
- 'vdu-id-ref': 'ubuntuvnf_vnfd-VM',
- 'id': 'ffd73f33-c8bb-4541-a977-44dcc3cbe28d',
- 'vim-id': '27042672-5190-4209-b844-95bbaeea7ea7',
- 'name': 'ubuntuvnf_vnfd-VM'
- }
- ],
- 'vnfd-ref': 'ubuntuvnf_vnfd',
- 'member-vnf-index-ref': '1',
- 'created-time': 1526044312.0999322,
- 'vnfd-id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
- 'id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01'}
+ get_vnfr.return_value = {
+ "_id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+ "_admin": {
+ "projects_read": ["admin"],
+ "created": 1526044312.102287,
+ "modified": 1526044312.102287,
+ "projects_write": ["admin"],
+ },
+ "vim-account-id": "c1740601-7287-48c8-a2c9-bce8fee459eb",
+ "nsr-id-ref": "5ec3f571-d540-4cb0-9992-971d1b08312e",
+ "vdur": [
+ {
+ "internal-connection-point": [],
+ "vdu-id-ref": "ubuntuvnf_vnfd-VM",
+ "id": "ffd73f33-c8bb-4541-a977-44dcc3cbe28d",
+ "vim-id": "27042672-5190-4209-b844-95bbaeea7ea7",
+ "name": "ubuntuvnf_vnfd-VM",
+ }
+ ],
+ "vnfd-ref": "ubuntuvnf_vnfd",
+ "member-vnf-index-ref": "1",
+ "created-time": 1526044312.0999322,
+ "vnfd-id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+ "id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+ }
common_db_client = CommonDbClient(self.config)
- vim_account_id = common_db_client.get_vim_account_id('5ec3f571-d540-4cb0-9992-971d1b08312e', 1)
- self.assertEqual(vim_account_id, 'c1740601-7287-48c8-a2c9-bce8fee459eb')
+ vim_account_id = common_db_client.get_vim_account_id(
+ "5ec3f571-d540-4cb0-9992-971d1b08312e", 1
+ )
+ self.assertEqual(vim_account_id, "c1740601-7287-48c8-a2c9-bce8fee459eb")
@mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
@mock.patch.object(dbmongo.DbMongo, "get_one")
def test_get_vdur(self, get_one):
- get_one.return_value = {'_id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
- '_admin': {
- 'projects_read': ['admin'], 'created': 1526044312.102287,
- 'modified': 1526044312.102287, 'projects_write': ['admin']
- },
- 'vim-account-id': 'c1740601-7287-48c8-a2c9-bce8fee459eb',
- 'nsr-id-ref': '5ec3f571-d540-4cb0-9992-971d1b08312e',
- 'vdur': [
- {
- 'internal-connection-point': [],
- 'vdu-id-ref': 'ubuntuvnf_vnfd-VM',
- 'id': 'ffd73f33-c8bb-4541-a977-44dcc3cbe28d',
- 'vim-id': '27042672-5190-4209-b844-95bbaeea7ea7',
- 'name': 'ubuntuvnf_vnfd-VM'
- }
- ],
- 'vnfd-ref': 'ubuntuvnf_vnfd',
- 'member-vnf-index-ref': '1',
- 'created-time': 1526044312.0999322,
- 'vnfd-id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01',
- 'id': 'a314c865-aee7-4d9b-9c9d-079d7f857f01'}
+ get_one.return_value = {
+ "_id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+ "_admin": {
+ "projects_read": ["admin"],
+ "created": 1526044312.102287,
+ "modified": 1526044312.102287,
+ "projects_write": ["admin"],
+ },
+ "vim-account-id": "c1740601-7287-48c8-a2c9-bce8fee459eb",
+ "nsr-id-ref": "5ec3f571-d540-4cb0-9992-971d1b08312e",
+ "vdur": [
+ {
+ "internal-connection-point": [],
+ "vdu-id-ref": "ubuntuvnf_vnfd-VM",
+ "id": "ffd73f33-c8bb-4541-a977-44dcc3cbe28d",
+ "vim-id": "27042672-5190-4209-b844-95bbaeea7ea7",
+ "name": "ubuntuvnf_vnfd-VM",
+ }
+ ],
+ "vnfd-ref": "ubuntuvnf_vnfd",
+ "member-vnf-index-ref": "1",
+ "created-time": 1526044312.0999322,
+ "vnfd-id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+ "id": "a314c865-aee7-4d9b-9c9d-079d7f857f01",
+ }
common_db_client = CommonDbClient(self.config)
- vdur = common_db_client.get_vdur('5ec3f571-d540-4cb0-9992-971d1b08312e', '1', 'ubuntuvnf_vnfd-VM')
+ vdur = common_db_client.get_vdur(
+ "5ec3f571-d540-4cb0-9992-971d1b08312e", "1", "ubuntuvnf_vnfd-VM"
+ )
expected_vdur = {
- 'internal-connection-point': [],
- 'vdu-id-ref': 'ubuntuvnf_vnfd-VM',
- 'id': 'ffd73f33-c8bb-4541-a977-44dcc3cbe28d',
- 'vim-id': '27042672-5190-4209-b844-95bbaeea7ea7',
- 'name': 'ubuntuvnf_vnfd-VM'
+ "internal-connection-point": [],
+ "vdu-id-ref": "ubuntuvnf_vnfd-VM",
+ "id": "ffd73f33-c8bb-4541-a977-44dcc3cbe28d",
+ "vim-id": "27042672-5190-4209-b844-95bbaeea7ea7",
+ "name": "ubuntuvnf_vnfd-VM",
}
self.assertDictEqual(vdur, expected_vdur)
@mock.patch.object(dbmongo.DbMongo, "get_one")
@mock.patch.object(CommonDbClient, "decrypt_vim_password")
def test_get_vim_account_default_schema(self, decrypt_vim_password, get_one):
- schema_version = '10.0'
- vim_id = '1'
+ schema_version = "10.0"
+ vim_id = "1"
get_one.return_value = {
- '_id': vim_id,
- 'vim_password': 'vim_password',
- 'schema_version': schema_version,
- 'config': {
- 'admin_password': 'admin_password',
- 'vrops_password': 'vrops_password',
- 'nsx_password': 'nsx_password',
- 'vcenter_password': 'vcenter_password'
- }
+ "_id": vim_id,
+ "vim_password": "vim_password",
+ "schema_version": schema_version,
+ "config": {
+ "admin_password": "admin_password",
+ "vrops_password": "vrops_password",
+ "nsx_password": "nsx_password",
+ "vcenter_password": "vcenter_password",
+ },
}
common_db_client = CommonDbClient(self.config)
- common_db_client.get_vim_account('1')
+ common_db_client.get_vim_account("1")
- decrypt_vim_password.assert_any_call('vim_password', schema_version, vim_id)
- decrypt_vim_password.assert_any_call('vrops_password', schema_version, vim_id)
- decrypt_vim_password.assert_any_call('admin_password', schema_version, vim_id)
- decrypt_vim_password.assert_any_call('nsx_password', schema_version, vim_id)
- decrypt_vim_password.assert_any_call('vcenter_password', schema_version, vim_id)
+ decrypt_vim_password.assert_any_call("vim_password", schema_version, vim_id)
+ decrypt_vim_password.assert_any_call("vrops_password", schema_version, vim_id)
+ decrypt_vim_password.assert_any_call("admin_password", schema_version, vim_id)
+ decrypt_vim_password.assert_any_call("nsx_password", schema_version, vim_id)
+ decrypt_vim_password.assert_any_call("vcenter_password", schema_version, vim_id)
@mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
@mock.patch.object(dbmongo.DbMongo, "get_one")
@mock.patch.object(CommonDbClient, "decrypt_vim_password")
def test_get_vim_account_1_1_schema(self, decrypt_vim_password, get_one):
- schema_version = '1.1'
- vim_id = '1'
+ schema_version = "1.1"
+ vim_id = "1"
get_one.return_value = {
- '_id': vim_id,
- 'vim_password': 'vim_password',
- 'schema_version': schema_version,
- 'config': {
- 'vrops_password': 'vrops_password'
- }
+ "_id": vim_id,
+ "vim_password": "vim_password",
+ "schema_version": schema_version,
+ "config": {"vrops_password": "vrops_password"},
}
common_db_client = CommonDbClient(self.config)
- common_db_client.get_vim_account('1')
+ common_db_client.get_vim_account("1")
- decrypt_vim_password.assert_any_call('vim_password', schema_version, vim_id)
- self.assertRaises(AssertionError, decrypt_vim_password.assert_any_call, 'vrops_password', schema_version,
- vim_id)
+ decrypt_vim_password.assert_any_call("vim_password", schema_version, vim_id)
+ self.assertRaises(
+ AssertionError,
+ decrypt_vim_password.assert_any_call,
+ "vrops_password",
+ schema_version,
+ vim_id,
+ )
@mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
@mock.patch.object(dbmongo.DbMongo, "get_list")
def test_get_alarms(self, get_list):
- get_list.return_value = [{
- 'uuid': '1',
- 'name': 'name',
- 'severity': 'severity',
- 'threshold': 50,
- 'operation': 'operation',
- 'statistic': 'statistic',
- 'tags': {},
- }]
+ get_list.return_value = [
+ {
+ "uuid": "1",
+ "name": "name",
+ "severity": "severity",
+ "threshold": 50,
+ "operation": "operation",
+ "statistic": "statistic",
+ "tags": {},
+ }
+ ]
common_db_client = CommonDbClient(self.config)
alarms = common_db_client.get_alarms()
- self.assertEqual('1', alarms[0].uuid)
+ self.assertEqual("1", alarms[0].uuid)
@mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
@mock.patch.object(dbmongo.DbMongo, "create")
def test_create_alarm(self, create):
- alarm = Alarm('name', 'severity', 50.0, 'operation', 'statistic', 'metric', {})
- alarm.uuid = '1'
+ alarm = Alarm("name", "severity", 50.0, "operation", "statistic", "metric", {})
+ alarm.uuid = "1"
common_db_client = CommonDbClient(self.config)
common_db_client.create_alarm(alarm)
- create.assert_called_with('alarms', {'tags': {}, 'threshold': 50.0, 'metric': 'metric',
- 'severity': 'severity', 'statistic': 'statistic',
- 'name': 'name', 'operation': 'operation', 'uuid': '1'})
+ create.assert_called_with(
+ "alarms",
+ {
+ "tags": {},
+ "threshold": 50.0,
+ "metric": "metric",
+ "severity": "severity",
+ "statistic": "statistic",
+ "name": "name",
+ "operation": "operation",
+ "uuid": "1",
+ },
+ )
@mock.patch.object(dbmongo.DbMongo, "db_connect", mock.Mock())
@mock.patch.object(dbmongo.DbMongo, "del_one")
def test_delete_alarm(self, delete):
common_db_client = CommonDbClient(self.config)
- common_db_client.delete_alarm('1')
- delete.assert_called_with('alarms', {'uuid': '1'})
+ common_db_client.delete_alarm("1")
+ delete.assert_called_with("alarms", {"uuid": "1"})
class TestMessageBusClient(TestCase):
-
def setUp(self):
self.config = Config()
- self.config.set('message', 'driver', 'kafka')
+ self.config.set("message", "driver", "kafka")
self.loop = asyncio.new_event_loop()
- @mock.patch.object(MsgKafka, 'aioread')
+ @mock.patch.object(MsgKafka, "aioread")
def test_aioread(self, aioread):
async def mock_callback():
pass
future = asyncio.Future(loop=self.loop)
- future.set_result('mock')
+ future.set_result("mock")
aioread.return_value = future
msg_bus = MessageBusClient(self.config, loop=self.loop)
- topic = 'test_topic'
+ topic = "test_topic"
self.loop.run_until_complete(msg_bus.aioread([topic], mock_callback))
- aioread.assert_called_with(['test_topic'], self.loop, aiocallback=mock_callback)
+ aioread.assert_called_with(["test_topic"], self.loop, aiocallback=mock_callback)
- @mock.patch.object(MsgKafka, 'aiowrite')
+ @mock.patch.object(MsgKafka, "aiowrite")
def test_aiowrite(self, aiowrite):
future = asyncio.Future(loop=self.loop)
- future.set_result('mock')
+ future.set_result("mock")
aiowrite.return_value = future
msg_bus = MessageBusClient(self.config, loop=self.loop)
- topic = 'test_topic'
- key = 'test_key'
- msg = {'test': 'test_msg'}
+ topic = "test_topic"
+ key = "test_key"
+ msg = {"test": "test_msg"}
self.loop.run_until_complete(msg_bus.aiowrite(topic, key, msg))
aiowrite.assert_called_with(topic, key, msg, self.loop)
- @mock.patch.object(MsgKafka, 'aioread')
+ @mock.patch.object(MsgKafka, "aioread")
def test_aioread_once(self, aioread):
future = asyncio.Future(loop=self.loop)
- future.set_result('mock')
+ future.set_result("mock")
aioread.return_value = future
msg_bus = MessageBusClient(self.config, loop=self.loop)
- topic = 'test_topic'
+ topic = "test_topic"
self.loop.run_until_complete(msg_bus.aioread_once(topic))
- aioread.assert_called_with('test_topic', self.loop)
+ aioread.assert_called_with("test_topic", self.loop)
super().setUp()
self.config = Config()
- @mock.patch('multiprocessing.Process')
+ @mock.patch("multiprocessing.Process")
@mock.patch.object(Evaluator, "notify_alarm")
@mock.patch.object(EvaluatorService, "evaluate_alarms")
def test_evaluate(self, evaluate_alarms, notify_alarm, process):
mock_alarm = mock.Mock()
- mock_alarm.operation = 'gt'
+ mock_alarm.operation = "gt"
mock_alarm.threshold = 50.0
evaluate_alarms.return_value = [(mock_alarm, AlarmStatus.ALARM)]
evaluator = Evaluator(self.config)
evaluator.evaluate()
- process.assert_called_with(target=notify_alarm, args=(mock_alarm, AlarmStatus.ALARM))
+ process.assert_called_with(
+ target=notify_alarm, args=(mock_alarm, AlarmStatus.ALARM)
+ )
{
"mac-address": "fa:16:3e:71:fd:b8",
"name": "eth0",
- "ip-address": "192.168.160.2"
+ "ip-address": "192.168.160.2",
}
],
"status": "ACTIVE",
"name": "cirros_ns-1-cirros_vnfd-VM-1",
"status-detailed": None,
"ip-address": "192.168.160.2",
- "vdu-id-ref": "cirros_vnfd-VM"
+ "vdu-id-ref": "cirros_vnfd-VM",
}
],
"id": "0d9d06ad-3fc2-418c-9934-465e815fafe2",
"vnfd-id": "63f44c41-45ee-456b-b10d-5f08fb1796e0",
"_admin": {
"created": 1535392482.0067868,
- "projects_read": [
- "admin"
- ],
+ "projects_read": ["admin"],
"modified": 1535392482.0067868,
- "projects_write": [
- "admin"
- ]
+ "projects_write": ["admin"],
},
"nsr-id-ref": "87776f33-b67c-417a-8119-cb08e4098951",
"member-vnf-index-ref": "1",
- "connection-point": [
- {
- "name": "eth0",
- "id": None,
- "connection-point-id": None
- }
- ]
+ "connection-point": [{"name": "eth0", "id": None, "connection-point-id": None}],
}
vnfd_record_mock = {
"name": "cirros_vnfd-VM",
"int-cpd": [
{
- "virtual-network-interface-requirement": [
- {
- "name": "vdu-eth0"
- }
- ],
- "id": "vdu-eth0-int"
+ "virtual-network-interface-requirement": [{"name": "vdu-eth0"}],
+ "id": "vdu-eth0-int",
}
],
"virtual-compute-desc": "cirros_vnfd-VM-compute",
- "virtual-storage-desc": [
- "cirros_vnfd-VM-storage"
- ],
+ "virtual-storage-desc": ["cirros_vnfd-VM-storage"],
"sw-image-desc": "cirros034",
"monitoring-parameter": [
{
"id": "cirros_vnfd-VM_memory_util",
"name": "cirros_vnfd-VM_memory_util",
- "performance-metric": "average_memory_utilization"
+ "performance-metric": "average_memory_utilization",
}
- ]
+ ],
}
],
"virtual-compute-desc": [
{
"id": "cirros_vnfd-VM-compute",
- "virtual-cpu": {
- "num-virtual-cpu": 1
- },
- "virtual-memory": {
- "size": 1
- }
- }
- ],
- "virtual-storage-desc": [
- {
- "id": "cirros_vnfd-VM-storage",
- "size-of-storage": 2
- }
- ],
- "sw-image-desc": [
- {
- "id": "cirros034",
- "name": "cirros034",
- "image": "cirros034"
+ "virtual-cpu": {"num-virtual-cpu": 1},
+ "virtual-memory": {"size": 1},
}
],
+ "virtual-storage-desc": [{"id": "cirros_vnfd-VM-storage", "size-of-storage": 2}],
+ "sw-image-desc": [{"id": "cirros034", "name": "cirros034", "image": "cirros034"}],
"ext-cpd": [
{
- "int-cpd": {
- "vdu-id": "cirros_vnfd-VM",
- "cpd": "vdu-eth0-int"
- },
- "id": "vnf-cp0-ext"
+ "int-cpd": {"vdu-id": "cirros_vnfd-VM", "cpd": "vdu-eth0-int"},
+ "id": "vnf-cp0-ext",
}
],
"df": [
{
"id": "default-df",
- "vdu-profile": [
- {
- "id": "cirros_vnfd-VM"
- }
- ],
+ "vdu-profile": [{"id": "cirros_vnfd-VM"}],
"instantiation-level": [
{
"id": "default-instantiation-level",
- "vdu-level": [
- {
- "vdu-id": "cirros_vnfd-VM"
- }
- ]
+ "vdu-level": [{"vdu-id": "cirros_vnfd-VM"}],
}
- ]
+ ],
}
],
"description": "Simple VNF example with a cirros and a scaling group descriptor",
- "mgmt-cp": "vnf-cp0-ext"
+ "mgmt-cp": "vnf-cp0-ext",
}
@mock.patch.object(EvaluatorService, "_get_metric_value")
def test_evaluate_metric(self, get_metric_value):
mock_alarm = mock.Mock()
- mock_alarm.operation = 'gt'
+ mock_alarm.operation = "gt"
mock_alarm.threshold = 50.0
- mock_alarm.metric = 'metric_name'
+ mock_alarm.metric = "metric_name"
get_metric_value.return_value = 100.0
service = EvaluatorService(self.config)
service.queue.put.assert_called_with((mock_alarm, AlarmStatus.ALARM))
service.queue.reset_mock()
- mock_alarm.operation = 'lt'
+ mock_alarm.operation = "lt"
service._evaluate_metric(mock_alarm)
service.queue.put.assert_called_with((mock_alarm, AlarmStatus.OK))
service.queue.reset_mock()
service._evaluate_metric(mock_alarm)
service.queue.put.assert_called_with((mock_alarm, AlarmStatus.INSUFFICIENT))
- @mock.patch('multiprocessing.Process')
+ @mock.patch("multiprocessing.Process")
@mock.patch.object(EvaluatorService, "_evaluate_metric")
@mock.patch.object(CommonDbClient, "get_vnfd")
@mock.patch.object(CommonDbClient, "get_vnfr")
@mock.patch.object(CommonDbClient, "get_alarms")
- def test_evaluate_alarms(self, alarm_list, get_vnfr, get_vnfd, evaluate_metric, process):
+ def test_evaluate_alarms(
+ self, alarm_list, get_vnfr, get_vnfd, evaluate_metric, process
+ ):
mock_alarm = mock.Mock()
- mock_alarm.vdur_name = 'cirros_ns-1-cirros_vnfd-VM-1'
- mock_alarm.monitoring_param = 'cirros_vnf_memory_util'
- mock_alarm.tags = {'name': 'value'}
+ mock_alarm.vdur_name = "cirros_ns-1-cirros_vnfd-VM-1"
+ mock_alarm.monitoring_param = "cirros_vnf_memory_util"
+ mock_alarm.tags = {"name": "value"}
alarm_list.return_value = [mock_alarm]
get_vnfr.return_value = vnfr_record_mock
get_vnfd.return_value = vnfd_record_mock
@mock.patch.object(PrometheusBackend, "get_metric_value")
def test_get_metric_value_prometheus(self, get_metric_value):
- self.config.set('evaluator', 'backend', 'prometheus')
+ self.config.set("evaluator", "backend", "prometheus")
evaluator = EvaluatorService(self.config)
- evaluator._get_metric_value('test', {})
+ evaluator._get_metric_value("test", {})
- get_metric_value.assert_called_with('test', {})
+ get_metric_value.assert_called_with("test", {})
def test_build_query(self):
prometheus = PrometheusBackend(self.config)
alarm_tags = collections.OrderedDict()
- alarm_tags['tag_1'] = 'value_1'
- alarm_tags['tag_2'] = 'value_2'
- query = prometheus._build_query('metric_name', alarm_tags)
- self.assertEqual(query, 'query=osm_metric_name{tag_1="value_1",tag_2="value_2"}')
+ alarm_tags["tag_1"] = "value_1"
+ alarm_tags["tag_2"] = "value_2"
+ query = prometheus._build_query("metric_name", alarm_tags)
+ self.assertEqual(
+ query, 'query=osm_metric_name{tag_1="value_1",tag_2="value_2"}'
+ )
from setuptools import setup
-_name = 'osm_mon'
-_version_command = ('git describe --match v* --tags --long --dirty', 'pep440-git-full')
-_description = 'OSM Monitoring Module'
+_name = "osm_mon"
+_version_command = ("git describe --match v* --tags --long --dirty", "pep440-git-full")
+_description = "OSM Monitoring Module"
_author = "OSM Support"
-_author_email = 'osmsupport@etsi.org'
-_maintainer = 'OSM Support'
-_maintainer_email = 'osmsupport@etsi.org'
-_license = 'Apache 2.0'
-_url = 'https://osm.etsi.org/gitweb/?p=osm/MON.git;a=tree'
+_author_email = "osmsupport@etsi.org"
+_maintainer = "OSM Support"
+_maintainer_email = "osmsupport@etsi.org"
+_license = "Apache 2.0"
+_url = "https://osm.etsi.org/gitweb/?p=osm/MON.git;a=tree"
setup(
name=_name,
version_command=_version_command,
description=_description,
- long_description=open('README.rst', encoding='utf-8').read(),
+ long_description=open("README.rst", encoding="utf-8").read(),
author=_author,
author_email=_author_email,
maintainer=_maintainer,
"osm-mon-healthcheck = osm_mon.cmd.mon_healthcheck:main",
]
},
- setup_requires=['setuptools-version-command']
+ setup_requires=["setuptools-version-command"],
)
skip_install = true
commands =
- black --check --diff osm_mon/
+ - black --check --diff setup.py
#######################################################################################