X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FLCM.git;a=blobdiff_plain;f=osm_lcm%2Fns.py;h=41926253dd454ded658de18045cfabf5b696bf7b;hp=b13d4662f0ada383d9bb09f1be3321c71a33cf7a;hb=c41fe83a651559f7e85e959beaeff5ea0a1ceaaa;hpb=486707fb941546f366f89ed30e35d280395e6226 diff --git a/osm_lcm/ns.py b/osm_lcm/ns.py index b13d466..4192625 100644 --- a/osm_lcm/ns.py +++ b/osm_lcm/ns.py @@ -17,7 +17,7 @@ ## import asyncio -from typing import Any, Dict +from typing import Any, Dict, List import yaml import logging import logging.handlers @@ -96,6 +96,7 @@ from n2vc.n2vc_juju_conn import N2VCJujuConnector from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException from osm_lcm.lcm_helm_conn import LCMHelmConn +from osm_lcm.prometheus import parse_job from copy import copy, deepcopy from time import time @@ -123,7 +124,7 @@ class NsLcm(LcmBase): SUBOPERATION_STATUS_SKIP = -3 task_name_deploy_vca = "Deploying VCA" - def __init__(self, msg, lcm_tasks, config, loop, prometheus=None): + def __init__(self, msg, lcm_tasks, config, loop): """ Init, Connect to database, filesystem storage, and messaging :param config: two level dictionary with configuration. Top level should contain 'database', 'storage', @@ -200,8 +201,6 @@ class NsLcm(LcmBase): "helm-v3": self.conn_helm_ee, } - self.prometheus = prometheus - # create RO client self.RO = NgRoClient(self.loop, **self.ro_config) @@ -1987,7 +1986,7 @@ class NsLcm(LcmBase): # STEP 7 Configure metrics if vca_type == "helm" or vca_type == "helm-v3": - prometheus_jobs = await self.add_prometheus_metrics( + prometheus_jobs = await self.extract_prometheus_scrape_jobs( ee_id=ee_id, artifact_path=artifact_path, ee_config_descriptor=ee_config_descriptor, @@ -2002,6 +2001,17 @@ class NsLcm(LcmBase): {db_update_entry + "prometheus_jobs": prometheus_jobs}, ) + for job in prometheus_jobs: + self.db.set_one( + "prometheus_jobs", + { + "job_name": job["job_name"] + }, + job, + upsert=True, + fail_on_empty=False + ) + step = "instantiated at VCA" self.logger.debug(logging_text + step) @@ -2276,8 +2286,10 @@ class NsLcm(LcmBase): # read from db: ns stage[1] = "Getting nsr={} from db.".format(nsr_id) + self.logger.debug(logging_text + stage[1]) db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"]) + self.logger.debug(logging_text + stage[1]) nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]}) self.fs.sync(db_nsr["nsd-id"]) db_nsr["nsd"] = nsd @@ -2735,15 +2747,37 @@ class NsLcm(LcmBase): nsd: Dict[str, Any], vca: DeployedVCA, cached_vnfds: Dict[str, Any], - ): + ) -> List[Relation]: relations = [] db_ns_relations = get_ns_configuration_relation_list(nsd) for r in db_ns_relations: + provider_dict = None + requirer_dict = None + if all(key in r for key in ("provider", "requirer")): + provider_dict = r["provider"] + requirer_dict = r["requirer"] + elif "entities" in r: + provider_id = r["entities"][0]["id"] + provider_dict = { + "nsr-id": nsr_id, + "endpoint": r["entities"][0]["endpoint"], + } + if provider_id != nsd["id"]: + provider_dict["vnf-profile-id"] = provider_id + requirer_id = r["entities"][1]["id"] + requirer_dict = { + "nsr-id": nsr_id, + "endpoint": r["entities"][1]["endpoint"], + } + if requirer_id != nsd["id"]: + requirer_dict["vnf-profile-id"] = requirer_id + else: + raise Exception("provider/requirer or entities must be included in the relation.") relation_provider = self._update_ee_relation_data_with_implicit_data( - nsr_id, nsd, r["provider"], cached_vnfds + nsr_id, nsd, provider_dict, cached_vnfds ) relation_requirer = self._update_ee_relation_data_with_implicit_data( - nsr_id, nsd, r["requirer"], cached_vnfds + nsr_id, nsd, requirer_dict, cached_vnfds ) provider = EERelation(relation_provider) requirer = EERelation(relation_requirer) @@ -2759,7 +2793,7 @@ class NsLcm(LcmBase): nsd: Dict[str, Any], vca: DeployedVCA, cached_vnfds: Dict[str, Any], - ): + ) -> List[Relation]: relations = [] vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id) vnf_profile_id = vnf_profile["id"] @@ -2767,11 +2801,35 @@ class NsLcm(LcmBase): db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds) db_vnf_relations = get_relation_list(db_vnfd, vnfd_id) for r in db_vnf_relations: + provider_dict = None + requirer_dict = None + if all(key in r for key in ("provider", "requirer")): + provider_dict = r["provider"] + requirer_dict = r["requirer"] + elif "entities" in r: + provider_id = r["entities"][0]["id"] + provider_dict = { + "nsr-id": nsr_id, + "vnf-profile-id": vnf_profile_id, + "endpoint": r["entities"][0]["endpoint"], + } + if provider_id != vnfd_id: + provider_dict["vdu-profile-id"] = provider_id + requirer_id = r["entities"][1]["id"] + requirer_dict = { + "nsr-id": nsr_id, + "vnf-profile-id": vnf_profile_id, + "endpoint": r["entities"][1]["endpoint"], + } + if requirer_id != vnfd_id: + requirer_dict["vdu-profile-id"] = requirer_id + else: + raise Exception("provider/requirer or entities must be included in the relation.") relation_provider = self._update_ee_relation_data_with_implicit_data( - nsr_id, nsd, r["provider"], cached_vnfds, vnf_profile_id=vnf_profile_id + nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id ) relation_requirer = self._update_ee_relation_data_with_implicit_data( - nsr_id, nsd, r["requirer"], cached_vnfds, vnf_profile_id=vnf_profile_id + nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id ) provider = EERelation(relation_provider) requirer = EERelation(relation_requirer) @@ -3900,8 +3958,9 @@ class NsLcm(LcmBase): "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False} ) - if vca_deployed.get("prometheus_jobs") and self.prometheus: - await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"]) + # Delete Prometheus Jobs if any + # This uses NSR_ID, so it will destroy any jobs under this index + self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]}) if destroy_ee: await self.vca_map[vca_type].delete_execution_environment( @@ -4787,6 +4846,9 @@ class NsLcm(LcmBase): ) step = "Getting vnfd from database" db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]}) + + # Sync filesystem before running a primitive + self.fs.sync(db_vnfr["vnfd-id"]) else: step = "Getting nsd from database" db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]}) @@ -5271,7 +5333,7 @@ class NsLcm(LcmBase): for kdu_delta in delta.get("kdu-resource-delta", {}): kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"]) kdu_name = kdu_profile["kdu-name"] - resource_name = kdu_profile["resource-name"] + resource_name = kdu_profile.get("resource-name", "") # Might have different kdus in the same delta # Should have list for each kdu @@ -5287,7 +5349,6 @@ class NsLcm(LcmBase): and kdur.get("helm-version") == "v2" ): k8s_cluster_type = "helm-chart" - raise NotImplementedError elif kdur.get("juju-bundle"): k8s_cluster_type = "juju-bundle" else: @@ -5317,7 +5378,13 @@ class NsLcm(LcmBase): kdu_instance = deployed_kdu.get("kdu-instance") instance_num = await self.k8scluster_map[ k8s_cluster_type - ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id) + ].get_scale_count( + resource_name, + kdu_instance, + vca_id=vca_id, + cluster_uuid=deployed_kdu.get("k8scluster-uuid"), + kdu_model=deployed_kdu.get("kdu-model"), + ) kdu_replica_count = instance_num + kdu_delta.get( "number-of-instances", 1 ) @@ -5401,7 +5468,7 @@ class NsLcm(LcmBase): for kdu_delta in delta.get("kdu-resource-delta", {}): kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"]) kdu_name = kdu_profile["kdu-name"] - resource_name = kdu_profile["resource-name"] + resource_name = kdu_profile.get("resource-name", "") if not scaling_info["kdu-delete"].get(kdu_name, None): scaling_info["kdu-delete"][kdu_name] = [] @@ -5415,7 +5482,6 @@ class NsLcm(LcmBase): and kdur.get("helm-version") == "v2" ): k8s_cluster_type = "helm-chart" - raise NotImplementedError elif kdur.get("juju-bundle"): k8s_cluster_type = "juju-bundle" else: @@ -5443,7 +5509,13 @@ class NsLcm(LcmBase): kdu_instance = deployed_kdu.get("kdu-instance") instance_num = await self.k8scluster_map[ k8s_cluster_type - ].get_scale_count(resource_name, kdu_instance, vca_id=vca_id) + ].get_scale_count( + resource_name, + kdu_instance, + vca_id=vca_id, + cluster_uuid=deployed_kdu.get("k8scluster-uuid"), + kdu_model=deployed_kdu.get("kdu-model"), + ) kdu_replica_count = instance_num - kdu_delta.get( "number-of-instances", 1 ) @@ -6149,6 +6221,7 @@ class NsLcm(LcmBase): ) cluster_uuid = deployed_kdu["k8scluster-uuid"] kdu_instance = deployed_kdu["kdu-instance"] + kdu_model = deployed_kdu.get("kdu-model") scale = int(kdu_scaling_info["scale"]) k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"] @@ -6203,6 +6276,10 @@ class NsLcm(LcmBase): scale, kdu_scaling_info["resource-name"], vca_id=vca_id, + cluster_uuid=cluster_uuid, + kdu_model=kdu_model, + atomic=True, + db_dict=db_dict, ), timeout=self.timeout_vca_on_error, ) @@ -6286,11 +6363,15 @@ class NsLcm(LcmBase): db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False ) - async def add_prometheus_metrics( - self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip + async def extract_prometheus_scrape_jobs( + self, + ee_id, + artifact_path, + ee_config_descriptor, + vnfr_id, + nsr_id, + target_ip ): - if not self.prometheus: - return # look if exist a file called 'prometheus*.j2' and artifact_content = self.fs.dir_ls(artifact_path) job_file = next( @@ -6317,7 +6398,7 @@ class NsLcm(LcmBase): "EXPORTER_POD_IP": host_name, "EXPORTER_POD_PORT": host_port, } - job_list = self.prometheus.parse_job(job_data, variables) + job_list = parse_job(job_data, variables) # ensure job_name is using the vnfr_id. Adding the metadata nsr_id for job in job_list: if ( @@ -6326,9 +6407,8 @@ class NsLcm(LcmBase): ): job["job_name"] = vnfr_id + "_" + str(randint(1, 10000)) job["nsr_id"] = nsr_id - job_dict = {jl["job_name"]: jl for jl in job_list} - if await self.prometheus.update(job_dict): - return list(job_dict.keys()) + job["vnfr_id"] = vnfr_id + return job_list def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str): """