1 # -*- coding: utf-8 -*-
4 # Copyright 2018 Telefonica S.A.
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
21 from typing
import Any
, Dict
, List
24 import logging
.handlers
36 from osm_lcm
import ROclient
37 from osm_lcm
.data_utils
.lcm_config
import LcmCfg
38 from osm_lcm
.data_utils
.nsr
import (
41 get_deployed_vca_list
,
44 from osm_lcm
.data_utils
.vca
import (
53 from osm_lcm
.ng_ro
import NgRoClient
, NgRoException
54 from osm_lcm
.lcm_utils
import (
61 check_juju_bundle_existence
,
62 get_charm_artifact_path
,
66 from osm_lcm
.data_utils
.nsd
import (
67 get_ns_configuration_relation_list
,
71 from osm_lcm
.data_utils
.vnfd
import (
77 get_ee_sorted_initial_config_primitive_list
,
78 get_ee_sorted_terminate_config_primitive_list
,
80 get_virtual_link_profiles
,
85 get_number_of_instances
,
87 get_kdu_resource_profile
,
88 find_software_version
,
91 from osm_lcm
.data_utils
.list_utils
import find_in_list
92 from osm_lcm
.data_utils
.vnfr
import (
96 get_volumes_from_instantiation_params
,
98 from osm_lcm
.data_utils
.dict_utils
import parse_yaml_strings
99 from osm_lcm
.data_utils
.database
.vim_account
import VimAccountDB
100 from n2vc
.definitions
import RelationEndpoint
101 from n2vc
.k8s_helm_conn
import K8sHelmConnector
102 from n2vc
.k8s_helm3_conn
import K8sHelm3Connector
103 from n2vc
.k8s_juju_conn
import K8sJujuConnector
105 from osm_common
.dbbase
import DbException
106 from osm_common
.fsbase
import FsException
108 from osm_lcm
.data_utils
.database
.database
import Database
109 from osm_lcm
.data_utils
.filesystem
.filesystem
import Filesystem
110 from osm_lcm
.data_utils
.wim
import (
112 get_target_wim_attrs
,
113 select_feasible_wim_account
,
116 from n2vc
.n2vc_juju_conn
import N2VCJujuConnector
117 from n2vc
.exceptions
import N2VCException
, N2VCNotFound
, K8sException
119 from osm_lcm
.lcm_helm_conn
import LCMHelmConn
120 from osm_lcm
.osm_config
import OsmConfigBuilder
121 from osm_lcm
.prometheus
import parse_job
123 from copy
import copy
, deepcopy
124 from time
import time
125 from uuid
import uuid4
127 from random
import SystemRandom
129 __author__
= "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
132 class NsLcm(LcmBase
):
133 SUBOPERATION_STATUS_NOT_FOUND
= -1
134 SUBOPERATION_STATUS_NEW
= -2
135 SUBOPERATION_STATUS_SKIP
= -3
136 EE_TLS_NAME
= "ee-tls"
137 task_name_deploy_vca
= "Deploying VCA"
138 rel_operation_types
= {
147 def __init__(self
, msg
, lcm_tasks
, config
: LcmCfg
):
149 Init, Connect to database, filesystem storage, and messaging
150 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
153 super().__init
__(msg
=msg
, logger
=logging
.getLogger("lcm.ns"))
155 self
.db
= Database().instance
.db
156 self
.fs
= Filesystem().instance
.fs
157 self
.lcm_tasks
= lcm_tasks
158 self
.timeout
= config
.timeout
159 self
.ro_config
= config
.RO
160 self
.vca_config
= config
.VCA
162 # create N2VC connector
163 self
.n2vc
= N2VCJujuConnector(
165 on_update_db
=self
._on
_update
_n
2vc
_db
,
170 self
.conn_helm_ee
= LCMHelmConn(
172 vca_config
=self
.vca_config
,
173 on_update_db
=self
._on
_update
_n
2vc
_db
,
176 self
.k8sclusterhelm2
= K8sHelmConnector(
177 kubectl_command
=self
.vca_config
.kubectlpath
,
178 helm_command
=self
.vca_config
.helmpath
,
185 self
.k8sclusterhelm3
= K8sHelm3Connector(
186 kubectl_command
=self
.vca_config
.kubectlpath
,
187 helm_command
=self
.vca_config
.helm3path
,
194 self
.k8sclusterjuju
= K8sJujuConnector(
195 kubectl_command
=self
.vca_config
.kubectlpath
,
196 juju_command
=self
.vca_config
.jujupath
,
198 on_update_db
=self
._on
_update
_k
8s
_db
,
203 self
.k8scluster_map
= {
204 "helm-chart": self
.k8sclusterhelm2
,
205 "helm-chart-v3": self
.k8sclusterhelm3
,
206 "chart": self
.k8sclusterhelm3
,
207 "juju-bundle": self
.k8sclusterjuju
,
208 "juju": self
.k8sclusterjuju
,
212 "lxc_proxy_charm": self
.n2vc
,
213 "native_charm": self
.n2vc
,
214 "k8s_proxy_charm": self
.n2vc
,
215 "helm": self
.conn_helm_ee
,
216 "helm-v3": self
.conn_helm_ee
,
220 self
.RO
= NgRoClient(**self
.ro_config
.to_dict())
222 self
.op_status_map
= {
223 "instantiation": self
.RO
.status
,
224 "termination": self
.RO
.status
,
225 "migrate": self
.RO
.status
,
226 "healing": self
.RO
.recreate_status
,
227 "verticalscale": self
.RO
.status
,
228 "start_stop_rebuild": self
.RO
.status
,
232 def increment_ip_mac(ip_mac
, vm_index
=1):
233 if not isinstance(ip_mac
, str):
236 # try with ipv4 look for last dot
237 i
= ip_mac
.rfind(".")
240 return "{}{}".format(ip_mac
[:i
], int(ip_mac
[i
:]) + vm_index
)
241 # try with ipv6 or mac look for last colon. Operate in hex
242 i
= ip_mac
.rfind(":")
245 # format in hex, len can be 2 for mac or 4 for ipv6
246 return ("{}{:0" + str(len(ip_mac
) - i
) + "x}").format(
247 ip_mac
[:i
], int(ip_mac
[i
:], 16) + vm_index
253 def _on_update_ro_db(self
, nsrs_id
, ro_descriptor
):
254 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
257 # TODO filter RO descriptor fields...
261 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
262 db_dict
["deploymentStatus"] = ro_descriptor
263 self
.update_db_2("nsrs", nsrs_id
, db_dict
)
265 except Exception as e
:
267 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id
, e
)
270 async def _on_update_n2vc_db(self
, table
, filter, path
, updated_data
, vca_id
=None):
271 # remove last dot from path (if exists)
272 if path
.endswith("."):
275 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
276 # .format(table, filter, path, updated_data))
278 nsr_id
= filter.get("_id")
280 # read ns record from database
281 nsr
= self
.db
.get_one(table
="nsrs", q_filter
=filter)
282 current_ns_status
= nsr
.get("nsState")
284 # get vca status for NS
285 status_dict
= await self
.n2vc
.get_status(
286 namespace
="." + nsr_id
, yaml_format
=False, vca_id
=vca_id
291 db_dict
["vcaStatus"] = status_dict
293 # update configurationStatus for this VCA
295 vca_index
= int(path
[path
.rfind(".") + 1 :])
298 target_dict
=nsr
, key_list
=("_admin", "deployed", "VCA")
300 vca_status
= vca_list
[vca_index
].get("status")
302 configuration_status_list
= nsr
.get("configurationStatus")
303 config_status
= configuration_status_list
[vca_index
].get("status")
305 if config_status
== "BROKEN" and vca_status
!= "failed":
306 db_dict
["configurationStatus"][vca_index
] = "READY"
307 elif config_status
!= "BROKEN" and vca_status
== "failed":
308 db_dict
["configurationStatus"][vca_index
] = "BROKEN"
309 except Exception as e
:
310 # not update configurationStatus
311 self
.logger
.debug("Error updating vca_index (ignore): {}".format(e
))
313 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
314 # if nsState = 'DEGRADED' check if all is OK
316 if current_ns_status
in ("READY", "DEGRADED"):
317 error_description
= ""
319 if status_dict
.get("machines"):
320 for machine_id
in status_dict
.get("machines"):
321 machine
= status_dict
.get("machines").get(machine_id
)
322 # check machine agent-status
323 if machine
.get("agent-status"):
324 s
= machine
.get("agent-status").get("status")
327 error_description
+= (
328 "machine {} agent-status={} ; ".format(
332 # check machine instance status
333 if machine
.get("instance-status"):
334 s
= machine
.get("instance-status").get("status")
337 error_description
+= (
338 "machine {} instance-status={} ; ".format(
343 if status_dict
.get("applications"):
344 for app_id
in status_dict
.get("applications"):
345 app
= status_dict
.get("applications").get(app_id
)
346 # check application status
347 if app
.get("status"):
348 s
= app
.get("status").get("status")
351 error_description
+= (
352 "application {} status={} ; ".format(app_id
, s
)
355 if error_description
:
356 db_dict
["errorDescription"] = error_description
357 if current_ns_status
== "READY" and is_degraded
:
358 db_dict
["nsState"] = "DEGRADED"
359 if current_ns_status
== "DEGRADED" and not is_degraded
:
360 db_dict
["nsState"] = "READY"
363 self
.update_db_2("nsrs", nsr_id
, db_dict
)
365 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
367 except Exception as e
:
368 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
370 async def _on_update_k8s_db(
371 self
, cluster_uuid
, kdu_instance
, filter=None, vca_id
=None, cluster_type
="juju"
374 Updating vca status in NSR record
375 :param cluster_uuid: UUID of a k8s cluster
376 :param kdu_instance: The unique name of the KDU instance
377 :param filter: To get nsr_id
378 :cluster_type: The cluster type (juju, k8s)
382 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
383 # .format(cluster_uuid, kdu_instance, filter))
385 nsr_id
= filter.get("_id")
387 vca_status
= await self
.k8scluster_map
[cluster_type
].status_kdu(
388 cluster_uuid
=cluster_uuid
,
389 kdu_instance
=kdu_instance
,
391 complete_status
=True,
397 db_dict
["vcaStatus"] = {nsr_id
: vca_status
}
400 f
"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
404 self
.update_db_2("nsrs", nsr_id
, db_dict
)
405 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
407 except Exception as e
:
408 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
411 def _parse_cloud_init(cloud_init_text
, additional_params
, vnfd_id
, vdu_id
):
414 undefined
=StrictUndefined
,
415 autoescape
=select_autoescape(default_for_string
=True, default
=True),
417 template
= env
.from_string(cloud_init_text
)
418 return template
.render(additional_params
or {})
419 except UndefinedError
as e
:
421 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
422 "file, must be provided in the instantiation parameters inside the "
423 "'additionalParamsForVnf/Vdu' block".format(e
, vnfd_id
, vdu_id
)
425 except (TemplateError
, TemplateNotFound
) as e
:
427 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
432 def _get_vdu_cloud_init_content(self
, vdu
, vnfd
):
433 cloud_init_content
= cloud_init_file
= None
435 if vdu
.get("cloud-init-file"):
436 base_folder
= vnfd
["_admin"]["storage"]
437 if base_folder
["pkg-dir"]:
438 cloud_init_file
= "{}/{}/cloud_init/{}".format(
439 base_folder
["folder"],
440 base_folder
["pkg-dir"],
441 vdu
["cloud-init-file"],
444 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
445 base_folder
["folder"],
446 vdu
["cloud-init-file"],
448 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
449 cloud_init_content
= ci_file
.read()
450 elif vdu
.get("cloud-init"):
451 cloud_init_content
= vdu
["cloud-init"]
453 return cloud_init_content
454 except FsException
as e
:
456 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
457 vnfd
["id"], vdu
["id"], cloud_init_file
, e
461 def _get_vdu_additional_params(self
, db_vnfr
, vdu_id
):
463 (vdur
for vdur
in db_vnfr
.get("vdur") if vdu_id
== vdur
["vdu-id-ref"]), {}
465 additional_params
= vdur
.get("additionalParams")
466 return parse_yaml_strings(additional_params
)
468 def vnfd2RO(self
, vnfd
, new_id
=None, additionalParams
=None, nsrId
=None):
470 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
471 :param vnfd: input vnfd
472 :param new_id: overrides vnf id if provided
473 :param additionalParams: Instantiation params for VNFs provided
474 :param nsrId: Id of the NSR
475 :return: copy of vnfd
477 vnfd_RO
= deepcopy(vnfd
)
478 # remove unused by RO configuration, monitoring, scaling and internal keys
479 vnfd_RO
.pop("_id", None)
480 vnfd_RO
.pop("_admin", None)
481 vnfd_RO
.pop("monitoring-param", None)
482 vnfd_RO
.pop("scaling-group-descriptor", None)
483 vnfd_RO
.pop("kdu", None)
484 vnfd_RO
.pop("k8s-cluster", None)
486 vnfd_RO
["id"] = new_id
488 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
489 for vdu
in get_iterable(vnfd_RO
, "vdu"):
490 vdu
.pop("cloud-init-file", None)
491 vdu
.pop("cloud-init", None)
495 def ip_profile_2_RO(ip_profile
):
496 RO_ip_profile
= deepcopy(ip_profile
)
497 if "dns-server" in RO_ip_profile
:
498 if isinstance(RO_ip_profile
["dns-server"], list):
499 RO_ip_profile
["dns-address"] = []
500 for ds
in RO_ip_profile
.pop("dns-server"):
501 RO_ip_profile
["dns-address"].append(ds
["address"])
503 RO_ip_profile
["dns-address"] = RO_ip_profile
.pop("dns-server")
504 if RO_ip_profile
.get("ip-version") == "ipv4":
505 RO_ip_profile
["ip-version"] = "IPv4"
506 if RO_ip_profile
.get("ip-version") == "ipv6":
507 RO_ip_profile
["ip-version"] = "IPv6"
508 if "dhcp-params" in RO_ip_profile
:
509 RO_ip_profile
["dhcp"] = RO_ip_profile
.pop("dhcp-params")
512 def _get_ro_vim_id_for_vim_account(self
, vim_account
):
513 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account
})
514 if db_vim
["_admin"]["operationalState"] != "ENABLED":
516 "VIM={} is not available. operationalState={}".format(
517 vim_account
, db_vim
["_admin"]["operationalState"]
520 RO_vim_id
= db_vim
["_admin"]["deployed"]["RO"]
523 def get_ro_wim_id_for_wim_account(self
, wim_account
):
524 if isinstance(wim_account
, str):
525 db_wim
= self
.db
.get_one("wim_accounts", {"_id": wim_account
})
526 if db_wim
["_admin"]["operationalState"] != "ENABLED":
528 "WIM={} is not available. operationalState={}".format(
529 wim_account
, db_wim
["_admin"]["operationalState"]
532 RO_wim_id
= db_wim
["_admin"]["deployed"]["RO-account"]
537 def scale_vnfr(self
, db_vnfr
, vdu_create
=None, vdu_delete
=None, mark_delete
=False):
538 db_vdu_push_list
= []
540 db_update
= {"_admin.modified": time()}
542 for vdu_id
, vdu_count
in vdu_create
.items():
546 for vdur
in reversed(db_vnfr
["vdur"])
547 if vdur
["vdu-id-ref"] == vdu_id
552 # Read the template saved in the db:
554 "No vdur in the database. Using the vdur-template to scale"
556 vdur_template
= db_vnfr
.get("vdur-template")
557 if not vdur_template
:
559 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
563 vdur
= vdur_template
[0]
564 # Delete a template from the database after using it
567 {"_id": db_vnfr
["_id"]},
569 pull
={"vdur-template": {"_id": vdur
["_id"]}},
571 for count
in range(vdu_count
):
572 vdur_copy
= deepcopy(vdur
)
573 vdur_copy
["status"] = "BUILD"
574 vdur_copy
["status-detailed"] = None
575 vdur_copy
["ip-address"] = None
576 vdur_copy
["_id"] = str(uuid4())
577 vdur_copy
["count-index"] += count
+ 1
578 vdur_copy
["id"] = "{}-{}".format(
579 vdur_copy
["vdu-id-ref"], vdur_copy
["count-index"]
581 vdur_copy
.pop("vim_info", None)
582 for iface
in vdur_copy
["interfaces"]:
583 if iface
.get("fixed-ip"):
584 iface
["ip-address"] = self
.increment_ip_mac(
585 iface
["ip-address"], count
+ 1
588 iface
.pop("ip-address", None)
589 if iface
.get("fixed-mac"):
590 iface
["mac-address"] = self
.increment_ip_mac(
591 iface
["mac-address"], count
+ 1
594 iface
.pop("mac-address", None)
598 ) # only first vdu can be managment of vnf
599 db_vdu_push_list
.append(vdur_copy
)
600 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
602 if len(db_vnfr
["vdur"]) == 1:
603 # The scale will move to 0 instances
605 "Scaling to 0 !, creating the template with the last vdur"
607 template_vdur
= [db_vnfr
["vdur"][0]]
608 for vdu_id
, vdu_count
in vdu_delete
.items():
610 indexes_to_delete
= [
612 for iv
in enumerate(db_vnfr
["vdur"])
613 if iv
[1]["vdu-id-ref"] == vdu_id
617 "vdur.{}.status".format(i
): "DELETING"
618 for i
in indexes_to_delete
[-vdu_count
:]
622 # it must be deleted one by one because common.db does not allow otherwise
625 for v
in reversed(db_vnfr
["vdur"])
626 if v
["vdu-id-ref"] == vdu_id
628 for vdu
in vdus_to_delete
[:vdu_count
]:
631 {"_id": db_vnfr
["_id"]},
633 pull
={"vdur": {"_id": vdu
["_id"]}},
637 db_push
["vdur"] = db_vdu_push_list
639 db_push
["vdur-template"] = template_vdur
642 db_vnfr
["vdur-template"] = template_vdur
643 self
.db
.set_one("vnfrs", {"_id": db_vnfr
["_id"]}, db_update
, push_list
=db_push
)
644 # modify passed dictionary db_vnfr
645 db_vnfr_
= self
.db
.get_one("vnfrs", {"_id": db_vnfr
["_id"]})
646 db_vnfr
["vdur"] = db_vnfr_
["vdur"]
648 def ns_update_nsr(self
, ns_update_nsr
, db_nsr
, nsr_desc_RO
):
650 Updates database nsr with the RO info for the created vld
651 :param ns_update_nsr: dictionary to be filled with the updated info
652 :param db_nsr: content of db_nsr. This is also modified
653 :param nsr_desc_RO: nsr descriptor from RO
654 :return: Nothing, LcmException is raised on errors
657 for vld_index
, vld
in enumerate(get_iterable(db_nsr
, "vld")):
658 for net_RO
in get_iterable(nsr_desc_RO
, "nets"):
659 if vld
["id"] != net_RO
.get("ns_net_osm_id"):
661 vld
["vim-id"] = net_RO
.get("vim_net_id")
662 vld
["name"] = net_RO
.get("vim_name")
663 vld
["status"] = net_RO
.get("status")
664 vld
["status-detailed"] = net_RO
.get("error_msg")
665 ns_update_nsr
["vld.{}".format(vld_index
)] = vld
669 "ns_update_nsr: Not found vld={} at RO info".format(vld
["id"])
672 def set_vnfr_at_error(self
, db_vnfrs
, error_text
):
674 for db_vnfr
in db_vnfrs
.values():
675 vnfr_update
= {"status": "ERROR"}
676 for vdu_index
, vdur
in enumerate(get_iterable(db_vnfr
, "vdur")):
677 if "status" not in vdur
:
678 vdur
["status"] = "ERROR"
679 vnfr_update
["vdur.{}.status".format(vdu_index
)] = "ERROR"
681 vdur
["status-detailed"] = str(error_text
)
683 "vdur.{}.status-detailed".format(vdu_index
)
685 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
686 except DbException
as e
:
687 self
.logger
.error("Cannot update vnf. {}".format(e
))
689 def ns_update_vnfr(self
, db_vnfrs
, nsr_desc_RO
):
691 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
692 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
693 :param nsr_desc_RO: nsr descriptor from RO
694 :return: Nothing, LcmException is raised on errors
696 for vnf_index
, db_vnfr
in db_vnfrs
.items():
697 for vnf_RO
in nsr_desc_RO
["vnfs"]:
698 if vnf_RO
["member_vnf_index"] != vnf_index
:
701 if vnf_RO
.get("ip_address"):
702 db_vnfr
["ip-address"] = vnfr_update
["ip-address"] = vnf_RO
[
705 elif not db_vnfr
.get("ip-address"):
706 if db_vnfr
.get("vdur"): # if not VDUs, there is not ip_address
707 raise LcmExceptionNoMgmtIP(
708 "ns member_vnf_index '{}' has no IP address".format(
713 for vdu_index
, vdur
in enumerate(get_iterable(db_vnfr
, "vdur")):
714 vdur_RO_count_index
= 0
715 if vdur
.get("pdu-type"):
717 for vdur_RO
in get_iterable(vnf_RO
, "vms"):
718 if vdur
["vdu-id-ref"] != vdur_RO
["vdu_osm_id"]:
720 if vdur
["count-index"] != vdur_RO_count_index
:
721 vdur_RO_count_index
+= 1
723 vdur
["vim-id"] = vdur_RO
.get("vim_vm_id")
724 if vdur_RO
.get("ip_address"):
725 vdur
["ip-address"] = vdur_RO
["ip_address"].split(";")[0]
727 vdur
["ip-address"] = None
728 vdur
["vdu-id-ref"] = vdur_RO
.get("vdu_osm_id")
729 vdur
["name"] = vdur_RO
.get("vim_name")
730 vdur
["status"] = vdur_RO
.get("status")
731 vdur
["status-detailed"] = vdur_RO
.get("error_msg")
732 for ifacer
in get_iterable(vdur
, "interfaces"):
733 for interface_RO
in get_iterable(vdur_RO
, "interfaces"):
734 if ifacer
["name"] == interface_RO
.get("internal_name"):
735 ifacer
["ip-address"] = interface_RO
.get(
738 ifacer
["mac-address"] = interface_RO
.get(
744 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
745 "from VIM info".format(
746 vnf_index
, vdur
["vdu-id-ref"], ifacer
["name"]
749 vnfr_update
["vdur.{}".format(vdu_index
)] = vdur
753 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
755 vnf_index
, vdur
["vdu-id-ref"], vdur
["count-index"]
759 for vld_index
, vld
in enumerate(get_iterable(db_vnfr
, "vld")):
760 for net_RO
in get_iterable(nsr_desc_RO
, "nets"):
761 if vld
["id"] != net_RO
.get("vnf_net_osm_id"):
763 vld
["vim-id"] = net_RO
.get("vim_net_id")
764 vld
["name"] = net_RO
.get("vim_name")
765 vld
["status"] = net_RO
.get("status")
766 vld
["status-detailed"] = net_RO
.get("error_msg")
767 vnfr_update
["vld.{}".format(vld_index
)] = vld
771 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
776 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
781 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
786 def _get_ns_config_info(self
, nsr_id
):
788 Generates a mapping between vnf,vdu elements and the N2VC id
789 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
790 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
791 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
792 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
794 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
795 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
797 ns_config_info
= {"osm-config-mapping": mapping
}
798 for vca
in vca_deployed_list
:
799 if not vca
["member-vnf-index"]:
801 if not vca
["vdu_id"]:
802 mapping
[vca
["member-vnf-index"]] = vca
["application"]
806 vca
["member-vnf-index"], vca
["vdu_id"], vca
["vdu_count_index"]
808 ] = vca
["application"]
809 return ns_config_info
811 async def _instantiate_ng_ro(
827 def get_vim_account(vim_account_id
):
829 if vim_account_id
in db_vims
:
830 return db_vims
[vim_account_id
]
831 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
832 db_vims
[vim_account_id
] = db_vim
835 # modify target_vld info with instantiation parameters
836 def parse_vld_instantiation_params(
837 target_vim
, target_vld
, vld_params
, target_sdn
839 if vld_params
.get("ip-profile"):
840 target_vld
["vim_info"][target_vim
]["ip_profile"] = vld_to_ro_ip_profile(
841 vld_params
["ip-profile"]
843 if vld_params
.get("provider-network"):
844 target_vld
["vim_info"][target_vim
]["provider_network"] = vld_params
[
847 if "sdn-ports" in vld_params
["provider-network"] and target_sdn
:
848 target_vld
["vim_info"][target_sdn
]["sdn-ports"] = vld_params
[
852 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
853 # if wim_account_id is specified in vld_params, validate if it is feasible.
854 wim_account_id
, db_wim
= select_feasible_wim_account(
855 db_nsr
, db_vnfrs
, target_vld
, vld_params
, self
.logger
859 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
860 self
.logger
.info("WIM selected: {:s}".format(str(wim_account_id
)))
861 # update vld_params with correct WIM account Id
862 vld_params
["wimAccountId"] = wim_account_id
864 target_wim
= "wim:{}".format(wim_account_id
)
865 target_wim_attrs
= get_target_wim_attrs(nsr_id
, target_vld
, vld_params
)
866 sdn_ports
= get_sdn_ports(vld_params
, db_wim
)
867 if len(sdn_ports
) > 0:
868 target_vld
["vim_info"][target_wim
] = target_wim_attrs
869 target_vld
["vim_info"][target_wim
]["sdn-ports"] = sdn_ports
872 "Target VLD with WIM data: {:s}".format(str(target_vld
))
875 for param
in ("vim-network-name", "vim-network-id"):
876 if vld_params
.get(param
):
877 if isinstance(vld_params
[param
], dict):
878 for vim
, vim_net
in vld_params
[param
].items():
879 other_target_vim
= "vim:" + vim
881 target_vld
["vim_info"],
882 (other_target_vim
, param
.replace("-", "_")),
885 else: # isinstance str
886 target_vld
["vim_info"][target_vim
][
887 param
.replace("-", "_")
888 ] = vld_params
[param
]
889 if vld_params
.get("common_id"):
890 target_vld
["common_id"] = vld_params
.get("common_id")
892 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
893 def update_ns_vld_target(target
, ns_params
):
894 for vnf_params
in ns_params
.get("vnf", ()):
895 if vnf_params
.get("vimAccountId"):
899 for vnfr
in db_vnfrs
.values()
900 if vnf_params
["member-vnf-index"]
901 == vnfr
["member-vnf-index-ref"]
905 vdur
= next((vdur
for vdur
in target_vnf
.get("vdur", ())), None)
908 for a_index
, a_vld
in enumerate(target
["ns"]["vld"]):
909 target_vld
= find_in_list(
910 get_iterable(vdur
, "interfaces"),
911 lambda iface
: iface
.get("ns-vld-id") == a_vld
["name"],
914 vld_params
= find_in_list(
915 get_iterable(ns_params
, "vld"),
916 lambda v_vld
: v_vld
["name"] in (a_vld
["name"], a_vld
["id"]),
919 if vnf_params
.get("vimAccountId") not in a_vld
.get(
922 target_vim_network_list
= [
923 v
for _
, v
in a_vld
.get("vim_info").items()
925 target_vim_network_name
= next(
927 item
.get("vim_network_name", "")
928 for item
in target_vim_network_list
933 target
["ns"]["vld"][a_index
].get("vim_info").update(
935 "vim:{}".format(vnf_params
["vimAccountId"]): {
936 "vim_network_name": target_vim_network_name
,
942 for param
in ("vim-network-name", "vim-network-id"):
943 if vld_params
.get(param
) and isinstance(
944 vld_params
[param
], dict
946 for vim
, vim_net
in vld_params
[
949 other_target_vim
= "vim:" + vim
951 target
["ns"]["vld"][a_index
].get(
956 param
.replace("-", "_"),
961 nslcmop_id
= db_nslcmop
["_id"]
963 "name": db_nsr
["name"],
966 "image": deepcopy(db_nsr
["image"]),
967 "flavor": deepcopy(db_nsr
["flavor"]),
968 "action_id": nslcmop_id
,
969 "cloud_init_content": {},
971 for image
in target
["image"]:
972 image
["vim_info"] = {}
973 for flavor
in target
["flavor"]:
974 flavor
["vim_info"] = {}
975 if db_nsr
.get("shared-volumes"):
976 target
["shared-volumes"] = deepcopy(db_nsr
["shared-volumes"])
977 for shared_volumes
in target
["shared-volumes"]:
978 shared_volumes
["vim_info"] = {}
979 if db_nsr
.get("affinity-or-anti-affinity-group"):
980 target
["affinity-or-anti-affinity-group"] = deepcopy(
981 db_nsr
["affinity-or-anti-affinity-group"]
983 for affinity_or_anti_affinity_group
in target
[
984 "affinity-or-anti-affinity-group"
986 affinity_or_anti_affinity_group
["vim_info"] = {}
988 if db_nslcmop
.get("lcmOperationType") != "instantiate":
989 # get parameters of instantiation:
990 db_nslcmop_instantiate
= self
.db
.get_list(
993 "nsInstanceId": db_nslcmop
["nsInstanceId"],
994 "lcmOperationType": "instantiate",
997 ns_params
= db_nslcmop_instantiate
.get("operationParams")
999 ns_params
= db_nslcmop
.get("operationParams")
1000 ssh_keys_instantiation
= ns_params
.get("ssh_keys") or []
1001 ssh_keys_all
= ssh_keys_instantiation
+ (n2vc_key_list
or [])
1004 for vld_index
, vld
in enumerate(db_nsr
.get("vld")):
1005 target_vim
= "vim:{}".format(ns_params
["vimAccountId"])
1008 "name": vld
["name"],
1009 "mgmt-network": vld
.get("mgmt-network", False),
1010 "type": vld
.get("type"),
1013 "vim_network_name": vld
.get("vim-network-name"),
1014 "vim_account_id": ns_params
["vimAccountId"],
1018 # check if this network needs SDN assist
1019 if vld
.get("pci-interfaces"):
1020 db_vim
= get_vim_account(ns_params
["vimAccountId"])
1021 if vim_config
:= db_vim
.get("config"):
1022 if sdnc_id
:= vim_config
.get("sdn-controller"):
1023 sdn_vld
= "nsrs:{}:vld.{}".format(nsr_id
, vld
["id"])
1024 target_sdn
= "sdn:{}".format(sdnc_id
)
1025 target_vld
["vim_info"][target_sdn
] = {
1027 "target_vim": target_vim
,
1029 "type": vld
.get("type"),
1032 nsd_vnf_profiles
= get_vnf_profiles(nsd
)
1033 for nsd_vnf_profile
in nsd_vnf_profiles
:
1034 for cp
in nsd_vnf_profile
["virtual-link-connectivity"]:
1035 if cp
["virtual-link-profile-id"] == vld
["id"]:
1037 "member_vnf:{}.{}".format(
1038 cp
["constituent-cpd-id"][0][
1039 "constituent-base-element-id"
1041 cp
["constituent-cpd-id"][0]["constituent-cpd-id"],
1043 ] = "nsrs:{}:vld.{}".format(nsr_id
, vld_index
)
1045 # check at nsd descriptor, if there is an ip-profile
1047 nsd_vlp
= find_in_list(
1048 get_virtual_link_profiles(nsd
),
1049 lambda a_link_profile
: a_link_profile
["virtual-link-desc-id"]
1054 and nsd_vlp
.get("virtual-link-protocol-data")
1055 and nsd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
1057 vld_params
["ip-profile"] = nsd_vlp
["virtual-link-protocol-data"][
1061 # update vld_params with instantiation params
1062 vld_instantiation_params
= find_in_list(
1063 get_iterable(ns_params
, "vld"),
1064 lambda a_vld
: a_vld
["name"] in (vld
["name"], vld
["id"]),
1066 if vld_instantiation_params
:
1067 vld_params
.update(vld_instantiation_params
)
1068 parse_vld_instantiation_params(target_vim
, target_vld
, vld_params
, None)
1069 target
["ns"]["vld"].append(target_vld
)
1070 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1071 update_ns_vld_target(target
, ns_params
)
1073 for vnfr
in db_vnfrs
.values():
1074 vnfd
= find_in_list(
1075 db_vnfds
, lambda db_vnf
: db_vnf
["id"] == vnfr
["vnfd-ref"]
1077 vnf_params
= find_in_list(
1078 get_iterable(ns_params
, "vnf"),
1079 lambda a_vnf
: a_vnf
["member-vnf-index"] == vnfr
["member-vnf-index-ref"],
1081 target_vnf
= deepcopy(vnfr
)
1082 target_vim
= "vim:{}".format(vnfr
["vim-account-id"])
1083 for vld
in target_vnf
.get("vld", ()):
1084 # check if connected to a ns.vld, to fill target'
1085 vnf_cp
= find_in_list(
1086 vnfd
.get("int-virtual-link-desc", ()),
1087 lambda cpd
: cpd
.get("id") == vld
["id"],
1090 ns_cp
= "member_vnf:{}.{}".format(
1091 vnfr
["member-vnf-index-ref"], vnf_cp
["id"]
1093 if cp2target
.get(ns_cp
):
1094 vld
["target"] = cp2target
[ns_cp
]
1097 target_vim
: {"vim_network_name": vld
.get("vim-network-name")}
1099 # check if this network needs SDN assist
1101 if vld
.get("pci-interfaces"):
1102 db_vim
= get_vim_account(vnfr
["vim-account-id"])
1103 sdnc_id
= db_vim
["config"].get("sdn-controller")
1105 sdn_vld
= "vnfrs:{}:vld.{}".format(target_vnf
["_id"], vld
["id"])
1106 target_sdn
= "sdn:{}".format(sdnc_id
)
1107 vld
["vim_info"][target_sdn
] = {
1109 "target_vim": target_vim
,
1111 "type": vld
.get("type"),
1114 # check at vnfd descriptor, if there is an ip-profile
1116 vnfd_vlp
= find_in_list(
1117 get_virtual_link_profiles(vnfd
),
1118 lambda a_link_profile
: a_link_profile
["id"] == vld
["id"],
1122 and vnfd_vlp
.get("virtual-link-protocol-data")
1123 and vnfd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
1125 vld_params
["ip-profile"] = vnfd_vlp
["virtual-link-protocol-data"][
1128 # update vld_params with instantiation params
1130 vld_instantiation_params
= find_in_list(
1131 get_iterable(vnf_params
, "internal-vld"),
1132 lambda i_vld
: i_vld
["name"] == vld
["id"],
1134 if vld_instantiation_params
:
1135 vld_params
.update(vld_instantiation_params
)
1136 parse_vld_instantiation_params(target_vim
, vld
, vld_params
, target_sdn
)
1139 for vdur
in target_vnf
.get("vdur", ()):
1140 if vdur
.get("status") == "DELETING" or vdur
.get("pdu-type"):
1141 continue # This vdu must not be created
1142 vdur
["vim_info"] = {"vim_account_id": vnfr
["vim-account-id"]}
1144 self
.logger
.debug("NS > ssh_keys > {}".format(ssh_keys_all
))
1147 vdu_configuration
= get_configuration(vnfd
, vdur
["vdu-id-ref"])
1148 vnf_configuration
= get_configuration(vnfd
, vnfd
["id"])
1151 and vdu_configuration
.get("config-access")
1152 and vdu_configuration
.get("config-access").get("ssh-access")
1154 vdur
["ssh-keys"] = ssh_keys_all
1155 vdur
["ssh-access-required"] = vdu_configuration
[
1157 ]["ssh-access"]["required"]
1160 and vnf_configuration
.get("config-access")
1161 and vnf_configuration
.get("config-access").get("ssh-access")
1162 and any(iface
.get("mgmt-vnf") for iface
in vdur
["interfaces"])
1164 vdur
["ssh-keys"] = ssh_keys_all
1165 vdur
["ssh-access-required"] = vnf_configuration
[
1167 ]["ssh-access"]["required"]
1168 elif ssh_keys_instantiation
and find_in_list(
1169 vdur
["interfaces"], lambda iface
: iface
.get("mgmt-vnf")
1171 vdur
["ssh-keys"] = ssh_keys_instantiation
1173 self
.logger
.debug("NS > vdur > {}".format(vdur
))
1175 vdud
= get_vdu(vnfd
, vdur
["vdu-id-ref"])
1177 if vdud
.get("cloud-init-file"):
1178 vdur
["cloud-init"] = "{}:file:{}".format(
1179 vnfd
["_id"], vdud
.get("cloud-init-file")
1181 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1182 if vdur
["cloud-init"] not in target
["cloud_init_content"]:
1183 base_folder
= vnfd
["_admin"]["storage"]
1184 if base_folder
["pkg-dir"]:
1185 cloud_init_file
= "{}/{}/cloud_init/{}".format(
1186 base_folder
["folder"],
1187 base_folder
["pkg-dir"],
1188 vdud
.get("cloud-init-file"),
1191 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
1192 base_folder
["folder"],
1193 vdud
.get("cloud-init-file"),
1195 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
1196 target
["cloud_init_content"][
1199 elif vdud
.get("cloud-init"):
1200 vdur
["cloud-init"] = "{}:vdu:{}".format(
1201 vnfd
["_id"], get_vdu_index(vnfd
, vdur
["vdu-id-ref"])
1203 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1204 target
["cloud_init_content"][vdur
["cloud-init"]] = vdud
[
1207 vdur
["additionalParams"] = vdur
.get("additionalParams") or {}
1208 deploy_params_vdu
= self
._format
_additional
_params
(
1209 vdur
.get("additionalParams") or {}
1211 deploy_params_vdu
["OSM"] = get_osm_params(
1212 vnfr
, vdur
["vdu-id-ref"], vdur
["count-index"]
1214 vdur
["additionalParams"] = deploy_params_vdu
1217 ns_flavor
= target
["flavor"][int(vdur
["ns-flavor-id"])]
1218 if target_vim
not in ns_flavor
["vim_info"]:
1219 ns_flavor
["vim_info"][target_vim
] = {}
1222 # in case alternative images are provided we must check if they should be applied
1223 # for the vim_type, modify the vim_type taking into account
1224 ns_image_id
= int(vdur
["ns-image-id"])
1225 if vdur
.get("alt-image-ids"):
1226 db_vim
= get_vim_account(vnfr
["vim-account-id"])
1227 vim_type
= db_vim
["vim_type"]
1228 for alt_image_id
in vdur
.get("alt-image-ids"):
1229 ns_alt_image
= target
["image"][int(alt_image_id
)]
1230 if vim_type
== ns_alt_image
.get("vim-type"):
1231 # must use alternative image
1233 "use alternative image id: {}".format(alt_image_id
)
1235 ns_image_id
= alt_image_id
1236 vdur
["ns-image-id"] = ns_image_id
1238 ns_image
= target
["image"][int(ns_image_id
)]
1239 if target_vim
not in ns_image
["vim_info"]:
1240 ns_image
["vim_info"][target_vim
] = {}
1243 if vdur
.get("affinity-or-anti-affinity-group-id"):
1244 for ags_id
in vdur
["affinity-or-anti-affinity-group-id"]:
1245 ns_ags
= target
["affinity-or-anti-affinity-group"][int(ags_id
)]
1246 if target_vim
not in ns_ags
["vim_info"]:
1247 ns_ags
["vim_info"][target_vim
] = {}
1250 if vdur
.get("shared-volumes-id"):
1251 for sv_id
in vdur
["shared-volumes-id"]:
1252 ns_sv
= find_in_list(
1253 target
["shared-volumes"], lambda sv
: sv_id
in sv
["id"]
1256 ns_sv
["vim_info"][target_vim
] = {}
1258 vdur
["vim_info"] = {target_vim
: {}}
1259 # instantiation parameters
1261 vdu_instantiation_params
= find_in_list(
1262 get_iterable(vnf_params
, "vdu"),
1263 lambda i_vdu
: i_vdu
["id"] == vdud
["id"],
1265 if vdu_instantiation_params
:
1266 # Parse the vdu_volumes from the instantiation params
1267 vdu_volumes
= get_volumes_from_instantiation_params(
1268 vdu_instantiation_params
, vdud
1270 vdur
["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1271 vdur
["additionalParams"]["OSM"][
1273 ] = vdu_instantiation_params
.get("vim-flavor-id")
1274 vdur_list
.append(vdur
)
1275 target_vnf
["vdur"] = vdur_list
1276 target
["vnf"].append(target_vnf
)
1278 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
1279 desc
= await self
.RO
.deploy(nsr_id
, target
)
1280 self
.logger
.debug("RO return > {}".format(desc
))
1281 action_id
= desc
["action_id"]
1282 await self
._wait
_ng
_ro
(
1289 operation
="instantiation",
1294 "_admin.deployed.RO.operational-status": "running",
1295 "detailed-status": " ".join(stage
),
1297 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1298 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1299 self
._write
_op
_status
(nslcmop_id
, stage
)
1301 logging_text
+ "ns deployed at RO. RO_id={}".format(action_id
)
1305 async def _wait_ng_ro(
1315 detailed_status_old
= None
1317 start_time
= start_time
or time()
1318 while time() <= start_time
+ timeout
:
1319 desc_status
= await self
.op_status_map
[operation
](nsr_id
, action_id
)
1320 self
.logger
.debug("Wait NG RO > {}".format(desc_status
))
1321 if desc_status
["status"] == "FAILED":
1322 raise NgRoException(desc_status
["details"])
1323 elif desc_status
["status"] == "BUILD":
1325 stage
[2] = "VIM: ({})".format(desc_status
["details"])
1326 elif desc_status
["status"] == "DONE":
1328 stage
[2] = "Deployed at VIM"
1331 assert False, "ROclient.check_ns_status returns unknown {}".format(
1332 desc_status
["status"]
1334 if stage
and nslcmop_id
and stage
[2] != detailed_status_old
:
1335 detailed_status_old
= stage
[2]
1336 db_nsr_update
["detailed-status"] = " ".join(stage
)
1337 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1338 self
._write
_op
_status
(nslcmop_id
, stage
)
1339 await asyncio
.sleep(15)
1340 else: # timeout_ns_deploy
1341 raise NgRoException("Timeout waiting ns to deploy")
1343 async def _terminate_ng_ro(
1344 self
, logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
1349 start_deploy
= time()
1356 "action_id": nslcmop_id
,
1358 desc
= await self
.RO
.deploy(nsr_id
, target
)
1359 action_id
= desc
["action_id"]
1360 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETING"
1363 + "ns terminate action at RO. action_id={}".format(action_id
)
1367 delete_timeout
= 20 * 60 # 20 minutes
1368 await self
._wait
_ng
_ro
(
1375 operation
="termination",
1377 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1379 await self
.RO
.delete(nsr_id
)
1380 except NgRoException
as e
:
1381 if e
.http_code
== 404: # not found
1382 db_nsr_update
["_admin.deployed.RO.nsr_id"] = None
1383 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1385 logging_text
+ "RO_action_id={} already deleted".format(action_id
)
1387 elif e
.http_code
== 409: # conflict
1388 failed_detail
.append("delete conflict: {}".format(e
))
1391 + "RO_action_id={} delete conflict: {}".format(action_id
, e
)
1394 failed_detail
.append("delete error: {}".format(e
))
1397 + "RO_action_id={} delete error: {}".format(action_id
, e
)
1399 except Exception as e
:
1400 failed_detail
.append("delete error: {}".format(e
))
1402 logging_text
+ "RO_action_id={} delete error: {}".format(action_id
, e
)
1406 stage
[2] = "Error deleting from VIM"
1408 stage
[2] = "Deleted from VIM"
1409 db_nsr_update
["detailed-status"] = " ".join(stage
)
1410 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1411 self
._write
_op
_status
(nslcmop_id
, stage
)
1414 raise LcmException("; ".join(failed_detail
))
1417 async def instantiate_RO(
1431 :param logging_text: preffix text to use at logging
1432 :param nsr_id: nsr identity
1433 :param nsd: database content of ns descriptor
1434 :param db_nsr: database content of ns record
1435 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1437 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1438 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1439 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1440 :return: None or exception
1443 start_deploy
= time()
1444 ns_params
= db_nslcmop
.get("operationParams")
1445 if ns_params
and ns_params
.get("timeout_ns_deploy"):
1446 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
1448 timeout_ns_deploy
= self
.timeout
.ns_deploy
1450 # Check for and optionally request placement optimization. Database will be updated if placement activated
1451 stage
[2] = "Waiting for Placement."
1452 if await self
._do
_placement
(logging_text
, db_nslcmop
, db_vnfrs
):
1453 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1454 for vnfr
in db_vnfrs
.values():
1455 if ns_params
["vimAccountId"] == vnfr
["vim-account-id"]:
1458 ns_params
["vimAccountId"] == vnfr
["vim-account-id"]
1460 return await self
._instantiate
_ng
_ro
(
1473 except Exception as e
:
1474 stage
[2] = "ERROR deploying at VIM"
1475 self
.set_vnfr_at_error(db_vnfrs
, str(e
))
1477 "Error deploying at VIM {}".format(e
),
1478 exc_info
=not isinstance(
1481 ROclient
.ROClientException
,
1490 async def wait_kdu_up(self
, logging_text
, nsr_id
, vnfr_id
, kdu_name
):
1492 Wait for kdu to be up, get ip address
1493 :param logging_text: prefix use for logging
1497 :return: IP address, K8s services
1500 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1503 while nb_tries
< 360:
1504 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1508 for x
in get_iterable(db_vnfr
, "kdur")
1509 if x
.get("kdu-name") == kdu_name
1515 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id
, kdu_name
)
1517 if kdur
.get("status"):
1518 if kdur
["status"] in ("READY", "ENABLED"):
1519 return kdur
.get("ip-address"), kdur
.get("services")
1522 "target KDU={} is in error state".format(kdu_name
)
1525 await asyncio
.sleep(10)
1527 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name
))
1529 async def wait_vm_up_insert_key_ro(
1530 self
, logging_text
, nsr_id
, vnfr_id
, vdu_id
, vdu_index
, pub_key
=None, user
=None
1533 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1534 :param logging_text: prefix use for logging
1539 :param pub_key: public ssh key to inject, None to skip
1540 :param user: user to apply the public ssh key
1544 self
.logger
.debug(logging_text
+ "Starting wait_vm_up_insert_key_ro")
1546 target_vdu_id
= None
1551 if ro_retries
>= 360: # 1 hour
1553 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id
)
1556 await asyncio
.sleep(10)
1559 if not target_vdu_id
:
1560 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1562 if not vdu_id
: # for the VNF case
1563 if db_vnfr
.get("status") == "ERROR":
1565 "Cannot inject ssh-key because target VNF is in error state"
1567 ip_address
= db_vnfr
.get("ip-address")
1573 for x
in get_iterable(db_vnfr
, "vdur")
1574 if x
.get("ip-address") == ip_address
1582 for x
in get_iterable(db_vnfr
, "vdur")
1583 if x
.get("vdu-id-ref") == vdu_id
1584 and x
.get("count-index") == vdu_index
1590 not vdur
and len(db_vnfr
.get("vdur", ())) == 1
1591 ): # If only one, this should be the target vdu
1592 vdur
= db_vnfr
["vdur"][0]
1595 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1596 vnfr_id
, vdu_id
, vdu_index
1599 # New generation RO stores information at "vim_info"
1602 if vdur
.get("vim_info"):
1604 t
for t
in vdur
["vim_info"]
1605 ) # there should be only one key
1606 ng_ro_status
= vdur
["vim_info"][target_vim
].get("vim_status")
1608 vdur
.get("pdu-type")
1609 or vdur
.get("status") == "ACTIVE"
1610 or ng_ro_status
== "ACTIVE"
1612 ip_address
= vdur
.get("ip-address")
1615 target_vdu_id
= vdur
["vdu-id-ref"]
1616 elif vdur
.get("status") == "ERROR" or ng_ro_status
== "ERROR":
1618 "Cannot inject ssh-key because target VM is in error state"
1621 if not target_vdu_id
:
1624 # inject public key into machine
1625 if pub_key
and user
:
1626 self
.logger
.debug(logging_text
+ "Inserting RO key")
1627 self
.logger
.debug("SSH > PubKey > {}".format(pub_key
))
1628 if vdur
.get("pdu-type"):
1629 self
.logger
.error(logging_text
+ "Cannot inject ssh-ky to a PDU")
1634 "action": "inject_ssh_key",
1638 "vnf": [{"_id": vnfr_id
, "vdur": [{"id": vdur
["id"]}]}],
1640 desc
= await self
.RO
.deploy(nsr_id
, target
)
1641 action_id
= desc
["action_id"]
1642 await self
._wait
_ng
_ro
(
1643 nsr_id
, action_id
, timeout
=600, operation
="instantiation"
1646 except NgRoException
as e
:
1648 "Reaching max tries injecting key. Error: {}".format(e
)
1655 async def _wait_dependent_n2vc(self
, nsr_id
, vca_deployed_list
, vca_index
):
1657 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1659 my_vca
= vca_deployed_list
[vca_index
]
1660 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
1661 # vdu or kdu: no dependencies
1665 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
1666 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1667 configuration_status_list
= db_nsr
["configurationStatus"]
1668 for index
, vca_deployed
in enumerate(configuration_status_list
):
1669 if index
== vca_index
:
1672 if not my_vca
.get("member-vnf-index") or (
1673 vca_deployed
.get("member-vnf-index")
1674 == my_vca
.get("member-vnf-index")
1676 internal_status
= configuration_status_list
[index
].get("status")
1677 if internal_status
== "READY":
1679 elif internal_status
== "BROKEN":
1681 "Configuration aborted because dependent charm/s has failed"
1686 # no dependencies, return
1688 await asyncio
.sleep(10)
1691 raise LcmException("Configuration aborted because dependent charm/s timeout")
1693 def get_vca_id(self
, db_vnfr
: dict, db_nsr
: dict):
1696 vca_id
= deep_get(db_vnfr
, ("vca-id",))
1698 vim_account_id
= deep_get(db_nsr
, ("instantiate_params", "vimAccountId"))
1699 vca_id
= VimAccountDB
.get_vim_account_with_id(vim_account_id
).get("vca")
1702 async def instantiate_N2VC(
1720 ee_config_descriptor
,
1722 nsr_id
= db_nsr
["_id"]
1723 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
1724 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1725 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
1726 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
1728 "collection": "nsrs",
1729 "filter": {"_id": nsr_id
},
1730 "path": db_update_entry
,
1735 element_under_configuration
= nsr_id
1739 vnfr_id
= db_vnfr
["_id"]
1740 osm_config
["osm"]["vnf_id"] = vnfr_id
1742 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
1744 if vca_type
== "native_charm":
1747 index_number
= vdu_index
or 0
1750 element_type
= "VNF"
1751 element_under_configuration
= vnfr_id
1752 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
1754 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
1755 element_type
= "VDU"
1756 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
1757 osm_config
["osm"]["vdu_id"] = vdu_id
1759 namespace
+= ".{}".format(kdu_name
)
1760 element_type
= "KDU"
1761 element_under_configuration
= kdu_name
1762 osm_config
["osm"]["kdu_name"] = kdu_name
1765 if base_folder
["pkg-dir"]:
1766 artifact_path
= "{}/{}/{}/{}".format(
1767 base_folder
["folder"],
1768 base_folder
["pkg-dir"],
1771 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1776 artifact_path
= "{}/Scripts/{}/{}/".format(
1777 base_folder
["folder"],
1780 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1785 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
1787 # get initial_config_primitive_list that applies to this element
1788 initial_config_primitive_list
= config_descriptor
.get(
1789 "initial-config-primitive"
1793 "Initial config primitive list > {}".format(
1794 initial_config_primitive_list
1798 # add config if not present for NS charm
1799 ee_descriptor_id
= ee_config_descriptor
.get("id")
1800 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
1801 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
1802 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
1806 "Initial config primitive list #2 > {}".format(
1807 initial_config_primitive_list
1810 # n2vc_redesign STEP 3.1
1811 # find old ee_id if exists
1812 ee_id
= vca_deployed
.get("ee_id")
1814 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
1815 # create or register execution environment in VCA
1816 if vca_type
in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1817 self
._write
_configuration
_status
(
1819 vca_index
=vca_index
,
1821 element_under_configuration
=element_under_configuration
,
1822 element_type
=element_type
,
1825 step
= "create execution environment"
1826 self
.logger
.debug(logging_text
+ step
)
1830 if vca_type
== "k8s_proxy_charm":
1831 ee_id
= await self
.vca_map
[vca_type
].install_k8s_proxy_charm(
1832 charm_name
=artifact_path
[artifact_path
.rfind("/") + 1 :],
1833 namespace
=namespace
,
1834 artifact_path
=artifact_path
,
1838 elif vca_type
== "helm" or vca_type
== "helm-v3":
1839 ee_id
, credentials
= await self
.vca_map
[
1841 ].create_execution_environment(
1846 artifact_path
=artifact_path
,
1847 chart_model
=vca_name
,
1851 ee_id
, credentials
= await self
.vca_map
[
1853 ].create_execution_environment(
1854 namespace
=namespace
,
1860 elif vca_type
== "native_charm":
1861 step
= "Waiting to VM being up and getting IP address"
1862 self
.logger
.debug(logging_text
+ step
)
1863 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
1872 credentials
= {"hostname": rw_mgmt_ip
}
1874 username
= deep_get(
1875 config_descriptor
, ("config-access", "ssh-access", "default-user")
1877 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1878 # merged. Meanwhile let's get username from initial-config-primitive
1879 if not username
and initial_config_primitive_list
:
1880 for config_primitive
in initial_config_primitive_list
:
1881 for param
in config_primitive
.get("parameter", ()):
1882 if param
["name"] == "ssh-username":
1883 username
= param
["value"]
1887 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1888 "'config-access.ssh-access.default-user'"
1890 credentials
["username"] = username
1891 # n2vc_redesign STEP 3.2
1893 self
._write
_configuration
_status
(
1895 vca_index
=vca_index
,
1896 status
="REGISTERING",
1897 element_under_configuration
=element_under_configuration
,
1898 element_type
=element_type
,
1901 step
= "register execution environment {}".format(credentials
)
1902 self
.logger
.debug(logging_text
+ step
)
1903 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
1904 credentials
=credentials
,
1905 namespace
=namespace
,
1910 # for compatibility with MON/POL modules, the need model and application name at database
1911 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1912 ee_id_parts
= ee_id
.split(".")
1913 db_nsr_update
= {db_update_entry
+ "ee_id": ee_id
}
1914 if len(ee_id_parts
) >= 2:
1915 model_name
= ee_id_parts
[0]
1916 application_name
= ee_id_parts
[1]
1917 db_nsr_update
[db_update_entry
+ "model"] = model_name
1918 db_nsr_update
[db_update_entry
+ "application"] = application_name
1920 # n2vc_redesign STEP 3.3
1921 step
= "Install configuration Software"
1923 self
._write
_configuration
_status
(
1925 vca_index
=vca_index
,
1926 status
="INSTALLING SW",
1927 element_under_configuration
=element_under_configuration
,
1928 element_type
=element_type
,
1929 other_update
=db_nsr_update
,
1932 # TODO check if already done
1933 self
.logger
.debug(logging_text
+ step
)
1935 if vca_type
== "native_charm":
1936 config_primitive
= next(
1937 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
1940 if config_primitive
:
1941 config
= self
._map
_primitive
_params
(
1942 config_primitive
, {}, deploy_params
1945 if vca_type
== "lxc_proxy_charm":
1946 if element_type
== "NS":
1947 num_units
= db_nsr
.get("config-units") or 1
1948 elif element_type
== "VNF":
1949 num_units
= db_vnfr
.get("config-units") or 1
1950 elif element_type
== "VDU":
1951 for v
in db_vnfr
["vdur"]:
1952 if vdu_id
== v
["vdu-id-ref"]:
1953 num_units
= v
.get("config-units") or 1
1955 if vca_type
!= "k8s_proxy_charm":
1956 await self
.vca_map
[vca_type
].install_configuration_sw(
1958 artifact_path
=artifact_path
,
1961 num_units
=num_units
,
1966 # write in db flag of configuration_sw already installed
1968 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
1971 # add relations for this VCA (wait for other peers related with this VCA)
1972 is_relation_added
= await self
._add
_vca
_relations
(
1973 logging_text
=logging_text
,
1976 vca_index
=vca_index
,
1979 if not is_relation_added
:
1980 raise LcmException("Relations could not be added to VCA.")
1982 # if SSH access is required, then get execution environment SSH public
1983 # if native charm we have waited already to VM be UP
1984 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1987 # self.logger.debug("get ssh key block")
1989 config_descriptor
, ("config-access", "ssh-access", "required")
1991 # self.logger.debug("ssh key needed")
1992 # Needed to inject a ssh key
1995 ("config-access", "ssh-access", "default-user"),
1997 step
= "Install configuration Software, getting public ssh key"
1998 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
1999 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
2002 step
= "Insert public key into VM user={} ssh_key={}".format(
2006 # self.logger.debug("no need to get ssh key")
2007 step
= "Waiting to VM being up and getting IP address"
2008 self
.logger
.debug(logging_text
+ step
)
2010 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2013 # n2vc_redesign STEP 5.1
2014 # wait for RO (ip-address) Insert pub_key into VM
2017 rw_mgmt_ip
, services
= await self
.wait_kdu_up(
2018 logging_text
, nsr_id
, vnfr_id
, kdu_name
2020 vnfd
= self
.db
.get_one(
2022 {"_id": f
'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2024 kdu
= get_kdu(vnfd
, kdu_name
)
2026 service
["name"] for service
in get_kdu_services(kdu
)
2028 exposed_services
= []
2029 for service
in services
:
2030 if any(s
in service
["name"] for s
in kdu_services
):
2031 exposed_services
.append(service
)
2032 await self
.vca_map
[vca_type
].exec_primitive(
2034 primitive_name
="config",
2036 "osm-config": json
.dumps(
2038 k8s
={"services": exposed_services
}
2045 # This verification is needed in order to avoid trying to add a public key
2046 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2047 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2048 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2050 elif db_vnfr
.get("vdur"):
2051 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
2061 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
2063 # store rw_mgmt_ip in deploy params for later replacement
2064 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
2066 # n2vc_redesign STEP 6 Execute initial config primitive
2067 step
= "execute initial config primitive"
2069 # wait for dependent primitives execution (NS -> VNF -> VDU)
2070 if initial_config_primitive_list
:
2071 await self
._wait
_dependent
_n
2vc
(nsr_id
, vca_deployed_list
, vca_index
)
2073 # stage, in function of element type: vdu, kdu, vnf or ns
2074 my_vca
= vca_deployed_list
[vca_index
]
2075 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
2077 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
2078 elif my_vca
.get("member-vnf-index"):
2080 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
2083 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
2085 self
._write
_configuration
_status
(
2086 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
2089 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
2091 check_if_terminated_needed
= True
2092 for initial_config_primitive
in initial_config_primitive_list
:
2093 # adding information on the vca_deployed if it is a NS execution environment
2094 if not vca_deployed
["member-vnf-index"]:
2095 deploy_params
["ns_config_info"] = json
.dumps(
2096 self
._get
_ns
_config
_info
(nsr_id
)
2098 # TODO check if already done
2099 primitive_params_
= self
._map
_primitive
_params
(
2100 initial_config_primitive
, {}, deploy_params
2103 step
= "execute primitive '{}' params '{}'".format(
2104 initial_config_primitive
["name"], primitive_params_
2106 self
.logger
.debug(logging_text
+ step
)
2107 await self
.vca_map
[vca_type
].exec_primitive(
2109 primitive_name
=initial_config_primitive
["name"],
2110 params_dict
=primitive_params_
,
2115 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2116 if check_if_terminated_needed
:
2117 if config_descriptor
.get("terminate-config-primitive"):
2119 "nsrs", nsr_id
, {db_update_entry
+ "needed_terminate": True}
2121 check_if_terminated_needed
= False
2123 # TODO register in database that primitive is done
2125 # STEP 7 Configure metrics
2126 if vca_type
== "helm" or vca_type
== "helm-v3":
2127 # TODO: review for those cases where the helm chart is a reference and
2128 # is not part of the NF package
2129 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
2131 artifact_path
=artifact_path
,
2132 ee_config_descriptor
=ee_config_descriptor
,
2135 target_ip
=rw_mgmt_ip
,
2136 element_type
=element_type
,
2137 vnf_member_index
=db_vnfr
.get("member-vnf-index-ref", ""),
2139 vdu_index
=vdu_index
,
2141 kdu_index
=kdu_index
,
2147 {db_update_entry
+ "prometheus_jobs": prometheus_jobs
},
2150 for job
in prometheus_jobs
:
2153 {"job_name": job
["job_name"]},
2156 fail_on_empty
=False,
2159 step
= "instantiated at VCA"
2160 self
.logger
.debug(logging_text
+ step
)
2162 self
._write
_configuration
_status
(
2163 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
2166 except Exception as e
: # TODO not use Exception but N2VC exception
2167 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2169 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
2172 "Exception while {} : {}".format(step
, e
), exc_info
=True
2174 self
._write
_configuration
_status
(
2175 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
2177 raise LcmException("{}. {}".format(step
, e
)) from e
2179 def _write_ns_status(
2183 current_operation
: str,
2184 current_operation_id
: str,
2185 error_description
: str = None,
2186 error_detail
: str = None,
2187 other_update
: dict = None,
2190 Update db_nsr fields.
2193 :param current_operation:
2194 :param current_operation_id:
2195 :param error_description:
2196 :param error_detail:
2197 :param other_update: Other required changes at database if provided, will be cleared
2201 db_dict
= other_update
or {}
2204 ] = current_operation_id
# for backward compatibility
2205 db_dict
["_admin.current-operation"] = current_operation_id
2206 db_dict
["_admin.operation-type"] = (
2207 current_operation
if current_operation
!= "IDLE" else None
2209 db_dict
["currentOperation"] = current_operation
2210 db_dict
["currentOperationID"] = current_operation_id
2211 db_dict
["errorDescription"] = error_description
2212 db_dict
["errorDetail"] = error_detail
2215 db_dict
["nsState"] = ns_state
2216 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2217 except DbException
as e
:
2218 self
.logger
.warn("Error writing NS status, ns={}: {}".format(nsr_id
, e
))
2220 def _write_op_status(
2224 error_message
: str = None,
2225 queuePosition
: int = 0,
2226 operation_state
: str = None,
2227 other_update
: dict = None,
2230 db_dict
= other_update
or {}
2231 db_dict
["queuePosition"] = queuePosition
2232 if isinstance(stage
, list):
2233 db_dict
["stage"] = stage
[0]
2234 db_dict
["detailed-status"] = " ".join(stage
)
2235 elif stage
is not None:
2236 db_dict
["stage"] = str(stage
)
2238 if error_message
is not None:
2239 db_dict
["errorMessage"] = error_message
2240 if operation_state
is not None:
2241 db_dict
["operationState"] = operation_state
2242 db_dict
["statusEnteredTime"] = time()
2243 self
.update_db_2("nslcmops", op_id
, db_dict
)
2244 except DbException
as e
:
2246 "Error writing OPERATION status for op_id: {} -> {}".format(op_id
, e
)
2249 def _write_all_config_status(self
, db_nsr
: dict, status
: str):
2251 nsr_id
= db_nsr
["_id"]
2252 # configurationStatus
2253 config_status
= db_nsr
.get("configurationStatus")
2256 "configurationStatus.{}.status".format(index
): status
2257 for index
, v
in enumerate(config_status
)
2261 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2263 except DbException
as e
:
2265 "Error writing all configuration status, ns={}: {}".format(nsr_id
, e
)
2268 def _write_configuration_status(
2273 element_under_configuration
: str = None,
2274 element_type
: str = None,
2275 other_update
: dict = None,
2277 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2278 # .format(vca_index, status))
2281 db_path
= "configurationStatus.{}.".format(vca_index
)
2282 db_dict
= other_update
or {}
2284 db_dict
[db_path
+ "status"] = status
2285 if element_under_configuration
:
2287 db_path
+ "elementUnderConfiguration"
2288 ] = element_under_configuration
2290 db_dict
[db_path
+ "elementType"] = element_type
2291 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2292 except DbException
as e
:
2294 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2295 status
, nsr_id
, vca_index
, e
2299 async def _do_placement(self
, logging_text
, db_nslcmop
, db_vnfrs
):
2301 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2302 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2303 Database is used because the result can be obtained from a different LCM worker in case of HA.
2304 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2305 :param db_nslcmop: database content of nslcmop
2306 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2307 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2308 computed 'vim-account-id'
2311 nslcmop_id
= db_nslcmop
["_id"]
2312 placement_engine
= deep_get(db_nslcmop
, ("operationParams", "placement-engine"))
2313 if placement_engine
== "PLA":
2315 logging_text
+ "Invoke and wait for placement optimization"
2317 await self
.msg
.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id
})
2318 db_poll_interval
= 5
2319 wait
= db_poll_interval
* 10
2321 while not pla_result
and wait
>= 0:
2322 await asyncio
.sleep(db_poll_interval
)
2323 wait
-= db_poll_interval
2324 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2325 pla_result
= deep_get(db_nslcmop
, ("_admin", "pla"))
2329 "Placement timeout for nslcmopId={}".format(nslcmop_id
)
2332 for pla_vnf
in pla_result
["vnf"]:
2333 vnfr
= db_vnfrs
.get(pla_vnf
["member-vnf-index"])
2334 if not pla_vnf
.get("vimAccountId") or not vnfr
:
2339 {"_id": vnfr
["_id"]},
2340 {"vim-account-id": pla_vnf
["vimAccountId"]},
2343 vnfr
["vim-account-id"] = pla_vnf
["vimAccountId"]
2346 def _gather_vnfr_healing_alerts(self
, vnfr
, vnfd
):
2348 nsr_id
= vnfr
["nsr-id-ref"]
2349 df
= vnfd
.get("df", [{}])[0]
2350 # Checking for auto-healing configuration
2351 if "healing-aspect" in df
:
2352 healing_aspects
= df
["healing-aspect"]
2353 for healing
in healing_aspects
:
2354 for healing_policy
in healing
.get("healing-policy", ()):
2355 vdu_id
= healing_policy
["vdu-id"]
2357 (vdur
for vdur
in vnfr
["vdur"] if vdu_id
== vdur
["vdu-id-ref"]),
2362 metric_name
= "vm_status"
2363 vdu_name
= vdur
.get("name")
2364 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2366 name
= f
"healing_{uuid}"
2367 action
= healing_policy
2368 # action_on_recovery = healing.get("action-on-recovery")
2369 # cooldown_time = healing.get("cooldown-time")
2370 # day1 = healing.get("day1")
2374 "metric": metric_name
,
2377 "vnf_member_index": vnf_member_index
,
2378 "vdu_name": vdu_name
,
2380 "alarm_status": "ok",
2381 "action_type": "healing",
2384 alerts
.append(alert
)
2387 def _gather_vnfr_scaling_alerts(self
, vnfr
, vnfd
):
2389 nsr_id
= vnfr
["nsr-id-ref"]
2390 df
= vnfd
.get("df", [{}])[0]
2391 # Checking for auto-scaling configuration
2392 if "scaling-aspect" in df
:
2393 scaling_aspects
= df
["scaling-aspect"]
2394 all_vnfd_monitoring_params
= {}
2395 for ivld
in vnfd
.get("int-virtual-link-desc", ()):
2396 for mp
in ivld
.get("monitoring-parameters", ()):
2397 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2398 for vdu
in vnfd
.get("vdu", ()):
2399 for mp
in vdu
.get("monitoring-parameter", ()):
2400 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2401 for df
in vnfd
.get("df", ()):
2402 for mp
in df
.get("monitoring-parameter", ()):
2403 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2404 for scaling_aspect
in scaling_aspects
:
2405 scaling_group_name
= scaling_aspect
.get("name", "")
2406 # Get monitored VDUs
2407 all_monitored_vdus
= set()
2408 for delta
in scaling_aspect
.get("aspect-delta-details", {}).get(
2411 for vdu_delta
in delta
.get("vdu-delta", ()):
2412 all_monitored_vdus
.add(vdu_delta
.get("id"))
2413 monitored_vdurs
= list(
2415 lambda vdur
: vdur
["vdu-id-ref"] in all_monitored_vdus
,
2419 if not monitored_vdurs
:
2421 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2424 for scaling_policy
in scaling_aspect
.get("scaling-policy", ()):
2425 if scaling_policy
["scaling-type"] != "automatic":
2427 threshold_time
= scaling_policy
.get("threshold-time", "1")
2428 cooldown_time
= scaling_policy
.get("cooldown-time", "0")
2429 for scaling_criteria
in scaling_policy
["scaling-criteria"]:
2430 monitoring_param_ref
= scaling_criteria
.get(
2431 "vnf-monitoring-param-ref"
2433 vnf_monitoring_param
= all_vnfd_monitoring_params
[
2434 monitoring_param_ref
2436 for vdur
in monitored_vdurs
:
2437 vdu_id
= vdur
["vdu-id-ref"]
2438 metric_name
= vnf_monitoring_param
.get("performance-metric")
2439 metric_name
= f
"osm_{metric_name}"
2440 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2441 scalein_threshold
= scaling_criteria
.get(
2442 "scale-in-threshold"
2444 scaleout_threshold
= scaling_criteria
.get(
2445 "scale-out-threshold"
2447 # Looking for min/max-number-of-instances
2448 instances_min_number
= 1
2449 instances_max_number
= 1
2450 vdu_profile
= df
["vdu-profile"]
2453 item
for item
in vdu_profile
if item
["id"] == vdu_id
2455 instances_min_number
= profile
.get(
2456 "min-number-of-instances", 1
2458 instances_max_number
= profile
.get(
2459 "max-number-of-instances", 1
2462 if scalein_threshold
:
2464 name
= f
"scalein_{uuid}"
2465 operation
= scaling_criteria
[
2466 "scale-in-relational-operation"
2468 rel_operator
= self
.rel_operation_types
.get(
2471 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2472 expression
= f
"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2475 "vnf_member_index": vnf_member_index
,
2481 "for": str(threshold_time
) + "m",
2484 action
= scaling_policy
2486 "scaling-group": scaling_group_name
,
2487 "cooldown-time": cooldown_time
,
2492 "metric": metric_name
,
2495 "vnf_member_index": vnf_member_index
,
2498 "alarm_status": "ok",
2499 "action_type": "scale_in",
2501 "prometheus_config": prom_cfg
,
2503 alerts
.append(alert
)
2505 if scaleout_threshold
:
2507 name
= f
"scaleout_{uuid}"
2508 operation
= scaling_criteria
[
2509 "scale-out-relational-operation"
2511 rel_operator
= self
.rel_operation_types
.get(
2514 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2515 expression
= f
"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2518 "vnf_member_index": vnf_member_index
,
2524 "for": str(threshold_time
) + "m",
2527 action
= scaling_policy
2529 "scaling-group": scaling_group_name
,
2530 "cooldown-time": cooldown_time
,
2535 "metric": metric_name
,
2538 "vnf_member_index": vnf_member_index
,
2541 "alarm_status": "ok",
2542 "action_type": "scale_out",
2544 "prometheus_config": prom_cfg
,
2546 alerts
.append(alert
)
2549 def _gather_vnfr_alarm_alerts(self
, vnfr
, vnfd
):
2551 nsr_id
= vnfr
["nsr-id-ref"]
2552 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2554 # Checking for VNF alarm configuration
2555 for vdur
in vnfr
["vdur"]:
2556 vdu_id
= vdur
["vdu-id-ref"]
2557 vdu
= next(filter(lambda vdu
: vdu
["id"] == vdu_id
, vnfd
["vdu"]))
2559 # Get VDU monitoring params, since alerts are based on them
2560 vdu_monitoring_params
= {}
2561 for mp
in vdu
.get("monitoring-parameter", []):
2562 vdu_monitoring_params
[mp
.get("id")] = mp
2563 if not vdu_monitoring_params
:
2565 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2568 # Get alarms in the VDU
2569 alarm_descriptors
= vdu
["alarm"]
2570 # Create VDU alarms for each alarm in the VDU
2571 for alarm_descriptor
in alarm_descriptors
:
2572 # Check that the VDU alarm refers to a proper monitoring param
2573 alarm_monitoring_param
= alarm_descriptor
.get(
2574 "vnf-monitoring-param-ref", ""
2576 vdu_specific_monitoring_param
= vdu_monitoring_params
.get(
2577 alarm_monitoring_param
, {}
2579 if not vdu_specific_monitoring_param
:
2581 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2584 metric_name
= vdu_specific_monitoring_param
.get(
2585 "performance-metric"
2589 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2592 # Set params of the alarm to be created in Prometheus
2593 metric_name
= f
"osm_{metric_name}"
2594 metric_threshold
= alarm_descriptor
.get("value")
2596 alert_name
= f
"vdu_alarm_{uuid}"
2597 operation
= alarm_descriptor
["operation"]
2598 rel_operator
= self
.rel_operation_types
.get(operation
, "<=")
2599 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2600 expression
= f
"{metric_selector} {rel_operator} {metric_threshold}"
2603 "vnf_member_index": vnf_member_index
,
2605 "vdu_name": "{{ $labels.vdu_name }}",
2608 "alert": alert_name
,
2610 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2613 alarm_action
= dict()
2614 for action_type
in ["ok", "insufficient-data", "alarm"]:
2616 "actions" in alarm_descriptor
2617 and action_type
in alarm_descriptor
["actions"]
2619 alarm_action
[action_type
] = alarm_descriptor
["actions"][
2625 "metric": metric_name
,
2628 "vnf_member_index": vnf_member_index
,
2631 "alarm_status": "ok",
2632 "action_type": "vdu_alarm",
2633 "action": alarm_action
,
2634 "prometheus_config": prom_cfg
,
2636 alerts
.append(alert
)
2639 def update_nsrs_with_pla_result(self
, params
):
2641 nslcmop_id
= deep_get(params
, ("placement", "nslcmopId"))
2643 "nslcmops", nslcmop_id
, {"_admin.pla": params
.get("placement")}
2645 except Exception as e
:
2646 self
.logger
.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id
, e
))
2648 async def instantiate(self
, nsr_id
, nslcmop_id
):
2651 :param nsr_id: ns instance to deploy
2652 :param nslcmop_id: operation to run
2656 # Try to lock HA task here
2657 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
2658 if not task_is_locked_by_me
:
2660 "instantiate() task is not locked by me, ns={}".format(nsr_id
)
2664 logging_text
= "Task ns={} instantiate={} ".format(nsr_id
, nslcmop_id
)
2665 self
.logger
.debug(logging_text
+ "Enter")
2667 # get all needed from database
2669 # database nsrs record
2672 # database nslcmops record
2675 # update operation on nsrs
2677 # update operation on nslcmops
2678 db_nslcmop_update
= {}
2680 timeout_ns_deploy
= self
.timeout
.ns_deploy
2682 nslcmop_operation_state
= None
2683 db_vnfrs
= {} # vnf's info indexed by member-index
2685 tasks_dict_info
= {} # from task to info text
2689 "Stage 1/5: preparation of the environment.",
2690 "Waiting for previous operations to terminate.",
2693 # ^ stage, step, VIM progress
2695 # wait for any previous tasks in process
2696 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
2698 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2699 stage
[1] = "Reading from database."
2700 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2701 db_nsr_update
["detailed-status"] = "creating"
2702 db_nsr_update
["operational-status"] = "init"
2703 self
._write
_ns
_status
(
2705 ns_state
="BUILDING",
2706 current_operation
="INSTANTIATING",
2707 current_operation_id
=nslcmop_id
,
2708 other_update
=db_nsr_update
,
2710 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
2712 # read from db: operation
2713 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
2714 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2715 if db_nslcmop
["operationParams"].get("additionalParamsForVnf"):
2716 db_nslcmop
["operationParams"]["additionalParamsForVnf"] = json
.loads(
2717 db_nslcmop
["operationParams"]["additionalParamsForVnf"]
2719 ns_params
= db_nslcmop
.get("operationParams")
2720 if ns_params
and ns_params
.get("timeout_ns_deploy"):
2721 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
2724 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
2725 self
.logger
.debug(logging_text
+ stage
[1])
2726 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
2727 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
2728 self
.logger
.debug(logging_text
+ stage
[1])
2729 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
2730 self
.fs
.sync(db_nsr
["nsd-id"])
2732 # nsr_name = db_nsr["name"] # TODO short-name??
2734 # read from db: vnf's of this ns
2735 stage
[1] = "Getting vnfrs from db."
2736 self
.logger
.debug(logging_text
+ stage
[1])
2737 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
2739 # read from db: vnfd's for every vnf
2740 db_vnfds
= [] # every vnfd data
2742 # for each vnf in ns, read vnfd
2743 for vnfr
in db_vnfrs_list
:
2744 if vnfr
.get("kdur"):
2746 for kdur
in vnfr
["kdur"]:
2747 if kdur
.get("additionalParams"):
2748 kdur
["additionalParams"] = json
.loads(
2749 kdur
["additionalParams"]
2751 kdur_list
.append(kdur
)
2752 vnfr
["kdur"] = kdur_list
2754 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
2755 vnfd_id
= vnfr
["vnfd-id"]
2756 vnfd_ref
= vnfr
["vnfd-ref"]
2757 self
.fs
.sync(vnfd_id
)
2759 # if we haven't this vnfd, read it from db
2760 if vnfd_id
not in db_vnfds
:
2762 stage
[1] = "Getting vnfd={} id='{}' from db.".format(
2765 self
.logger
.debug(logging_text
+ stage
[1])
2766 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
2769 db_vnfds
.append(vnfd
)
2771 # Get or generates the _admin.deployed.VCA list
2772 vca_deployed_list
= None
2773 if db_nsr
["_admin"].get("deployed"):
2774 vca_deployed_list
= db_nsr
["_admin"]["deployed"].get("VCA")
2775 if vca_deployed_list
is None:
2776 vca_deployed_list
= []
2777 configuration_status_list
= []
2778 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2779 db_nsr_update
["configurationStatus"] = configuration_status_list
2780 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2781 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2782 elif isinstance(vca_deployed_list
, dict):
2783 # maintain backward compatibility. Change a dict to list at database
2784 vca_deployed_list
= list(vca_deployed_list
.values())
2785 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2786 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2789 deep_get(db_nsr
, ("_admin", "deployed", "RO", "vnfd")), list
2791 populate_dict(db_nsr
, ("_admin", "deployed", "RO", "vnfd"), [])
2792 db_nsr_update
["_admin.deployed.RO.vnfd"] = []
2794 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2795 db_nsr_update
["_admin.nsState"] = "INSTANTIATED"
2796 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2798 "vnfrs", {"nsr-id-ref": nsr_id
}, {"_admin.nsState": "INSTANTIATED"}
2801 # n2vc_redesign STEP 2 Deploy Network Scenario
2802 stage
[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2803 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
2805 stage
[1] = "Deploying KDUs."
2806 # self.logger.debug(logging_text + "Before deploy_kdus")
2807 # Call to deploy_kdus in case exists the "vdu:kdu" param
2808 await self
.deploy_kdus(
2809 logging_text
=logging_text
,
2811 nslcmop_id
=nslcmop_id
,
2814 task_instantiation_info
=tasks_dict_info
,
2817 stage
[1] = "Getting VCA public key."
2818 # n2vc_redesign STEP 1 Get VCA public ssh-key
2819 # feature 1429. Add n2vc public key to needed VMs
2820 n2vc_key
= self
.n2vc
.get_public_key()
2821 n2vc_key_list
= [n2vc_key
]
2822 if self
.vca_config
.public_key
:
2823 n2vc_key_list
.append(self
.vca_config
.public_key
)
2825 stage
[1] = "Deploying NS at VIM."
2826 task_ro
= asyncio
.ensure_future(
2827 self
.instantiate_RO(
2828 logging_text
=logging_text
,
2832 db_nslcmop
=db_nslcmop
,
2835 n2vc_key_list
=n2vc_key_list
,
2839 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "instantiate_RO", task_ro
)
2840 tasks_dict_info
[task_ro
] = "Deploying at VIM"
2842 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2843 stage
[1] = "Deploying Execution Environments."
2844 self
.logger
.debug(logging_text
+ stage
[1])
2846 # create namespace and certificate if any helm based EE is present in the NS
2847 if check_helm_ee_in_ns(db_vnfds
):
2848 await self
.vca_map
["helm-v3"].setup_ns_namespace(
2851 # create TLS certificates
2852 await self
.vca_map
["helm-v3"].create_tls_certificate(
2853 secret_name
=self
.EE_TLS_NAME
,
2856 usage
="server auth",
2860 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
2861 for vnf_profile
in get_vnf_profiles(nsd
):
2862 vnfd_id
= vnf_profile
["vnfd-id"]
2863 vnfd
= find_in_list(db_vnfds
, lambda a_vnf
: a_vnf
["id"] == vnfd_id
)
2864 member_vnf_index
= str(vnf_profile
["id"])
2865 db_vnfr
= db_vnfrs
[member_vnf_index
]
2866 base_folder
= vnfd
["_admin"]["storage"]
2873 # Get additional parameters
2874 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
2875 if db_vnfr
.get("additionalParamsForVnf"):
2876 deploy_params
.update(
2877 parse_yaml_strings(db_vnfr
["additionalParamsForVnf"].copy())
2880 descriptor_config
= get_configuration(vnfd
, vnfd
["id"])
2881 if descriptor_config
:
2883 logging_text
=logging_text
2884 + "member_vnf_index={} ".format(member_vnf_index
),
2887 nslcmop_id
=nslcmop_id
,
2893 member_vnf_index
=member_vnf_index
,
2894 vdu_index
=vdu_index
,
2895 kdu_index
=kdu_index
,
2897 deploy_params
=deploy_params
,
2898 descriptor_config
=descriptor_config
,
2899 base_folder
=base_folder
,
2900 task_instantiation_info
=tasks_dict_info
,
2904 # Deploy charms for each VDU that supports one.
2905 for vdud
in get_vdu_list(vnfd
):
2907 descriptor_config
= get_configuration(vnfd
, vdu_id
)
2908 vdur
= find_in_list(
2909 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
2912 if vdur
.get("additionalParams"):
2913 deploy_params_vdu
= parse_yaml_strings(vdur
["additionalParams"])
2915 deploy_params_vdu
= deploy_params
2916 deploy_params_vdu
["OSM"] = get_osm_params(
2917 db_vnfr
, vdu_id
, vdu_count_index
=0
2919 vdud_count
= get_number_of_instances(vnfd
, vdu_id
)
2921 self
.logger
.debug("VDUD > {}".format(vdud
))
2923 "Descriptor config > {}".format(descriptor_config
)
2925 if descriptor_config
:
2929 for vdu_index
in range(vdud_count
):
2930 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2932 logging_text
=logging_text
2933 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2934 member_vnf_index
, vdu_id
, vdu_index
2938 nslcmop_id
=nslcmop_id
,
2944 kdu_index
=kdu_index
,
2945 member_vnf_index
=member_vnf_index
,
2946 vdu_index
=vdu_index
,
2948 deploy_params
=deploy_params_vdu
,
2949 descriptor_config
=descriptor_config
,
2950 base_folder
=base_folder
,
2951 task_instantiation_info
=tasks_dict_info
,
2954 for kdud
in get_kdu_list(vnfd
):
2955 kdu_name
= kdud
["name"]
2956 descriptor_config
= get_configuration(vnfd
, kdu_name
)
2957 if descriptor_config
:
2961 kdu_index
, kdur
= next(
2963 for x
in enumerate(db_vnfr
["kdur"])
2964 if x
[1]["kdu-name"] == kdu_name
2966 deploy_params_kdu
= {"OSM": get_osm_params(db_vnfr
)}
2967 if kdur
.get("additionalParams"):
2968 deploy_params_kdu
.update(
2969 parse_yaml_strings(kdur
["additionalParams"].copy())
2973 logging_text
=logging_text
,
2976 nslcmop_id
=nslcmop_id
,
2982 member_vnf_index
=member_vnf_index
,
2983 vdu_index
=vdu_index
,
2984 kdu_index
=kdu_index
,
2986 deploy_params
=deploy_params_kdu
,
2987 descriptor_config
=descriptor_config
,
2988 base_folder
=base_folder
,
2989 task_instantiation_info
=tasks_dict_info
,
2993 # Check if each vnf has exporter for metric collection if so update prometheus job records
2994 if "exporters-endpoints" in vnfd
.get("df")[0]:
2995 exporter_config
= vnfd
.get("df")[0].get("exporters-endpoints")
2996 self
.logger
.debug("exporter config :{}".format(exporter_config
))
2997 artifact_path
= "{}/{}/{}".format(
2998 base_folder
["folder"],
2999 base_folder
["pkg-dir"],
3000 "exporter-endpoint",
3003 ee_config_descriptor
= exporter_config
3004 vnfr_id
= db_vnfr
["id"]
3005 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
3014 self
.logger
.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip
))
3015 self
.logger
.debug("Artifact_path:{}".format(artifact_path
))
3016 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
3017 vdu_id_for_prom
= None
3018 vdu_index_for_prom
= None
3019 for x
in get_iterable(db_vnfr
, "vdur"):
3020 vdu_id_for_prom
= x
.get("vdu-id-ref")
3021 vdu_index_for_prom
= x
.get("count-index")
3022 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
3024 artifact_path
=artifact_path
,
3025 ee_config_descriptor
=ee_config_descriptor
,
3028 target_ip
=rw_mgmt_ip
,
3030 vdu_id
=vdu_id_for_prom
,
3031 vdu_index
=vdu_index_for_prom
,
3034 self
.logger
.debug("Prometheus job:{}".format(prometheus_jobs
))
3036 db_nsr_update
["_admin.deployed.prometheus_jobs"] = prometheus_jobs
3043 for job
in prometheus_jobs
:
3046 {"job_name": job
["job_name"]},
3049 fail_on_empty
=False,
3052 # Check if this NS has a charm configuration
3053 descriptor_config
= nsd
.get("ns-configuration")
3054 if descriptor_config
and descriptor_config
.get("juju"):
3057 member_vnf_index
= None
3064 # Get additional parameters
3065 deploy_params
= {"OSM": {"vim_account_id": ns_params
["vimAccountId"]}}
3066 if db_nsr
.get("additionalParamsForNs"):
3067 deploy_params
.update(
3068 parse_yaml_strings(db_nsr
["additionalParamsForNs"].copy())
3070 base_folder
= nsd
["_admin"]["storage"]
3072 logging_text
=logging_text
,
3075 nslcmop_id
=nslcmop_id
,
3081 member_vnf_index
=member_vnf_index
,
3082 vdu_index
=vdu_index
,
3083 kdu_index
=kdu_index
,
3085 deploy_params
=deploy_params
,
3086 descriptor_config
=descriptor_config
,
3087 base_folder
=base_folder
,
3088 task_instantiation_info
=tasks_dict_info
,
3092 # rest of staff will be done at finally
3095 ROclient
.ROClientException
,
3101 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
)
3104 except asyncio
.CancelledError
:
3106 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
3108 exc
= "Operation was cancelled"
3109 except Exception as e
:
3110 exc
= traceback
.format_exc()
3111 self
.logger
.critical(
3112 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
3117 error_list
.append(str(exc
))
3119 # wait for pending tasks
3121 stage
[1] = "Waiting for instantiate pending tasks."
3122 self
.logger
.debug(logging_text
+ stage
[1])
3123 error_list
+= await self
._wait
_for
_tasks
(
3131 stage
[1] = stage
[2] = ""
3132 except asyncio
.CancelledError
:
3133 error_list
.append("Cancelled")
3134 # TODO cancel all tasks
3135 except Exception as exc
:
3136 error_list
.append(str(exc
))
3138 # update operation-status
3139 db_nsr_update
["operational-status"] = "running"
3140 # let's begin with VCA 'configured' status (later we can change it)
3141 db_nsr_update
["config-status"] = "configured"
3142 for task
, task_name
in tasks_dict_info
.items():
3143 if not task
.done() or task
.cancelled() or task
.exception():
3144 if task_name
.startswith(self
.task_name_deploy_vca
):
3145 # A N2VC task is pending
3146 db_nsr_update
["config-status"] = "failed"
3148 # RO or KDU task is pending
3149 db_nsr_update
["operational-status"] = "failed"
3151 # update status at database
3153 error_detail
= ". ".join(error_list
)
3154 self
.logger
.error(logging_text
+ error_detail
)
3155 error_description_nslcmop
= "{} Detail: {}".format(
3156 stage
[0], error_detail
3158 error_description_nsr
= "Operation: INSTANTIATING.{}, {}".format(
3159 nslcmop_id
, stage
[0]
3162 db_nsr_update
["detailed-status"] = (
3163 error_description_nsr
+ " Detail: " + error_detail
3165 db_nslcmop_update
["detailed-status"] = error_detail
3166 nslcmop_operation_state
= "FAILED"
3170 error_description_nsr
= error_description_nslcmop
= None
3172 db_nsr_update
["detailed-status"] = "Done"
3173 db_nslcmop_update
["detailed-status"] = "Done"
3174 nslcmop_operation_state
= "COMPLETED"
3175 # Gather auto-healing and auto-scaling alerts for each vnfr
3178 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
3180 (sub
for sub
in db_vnfds
if sub
["_id"] == vnfr
["vnfd-id"]), None
3182 healing_alerts
= self
._gather
_vnfr
_healing
_alerts
(vnfr
, vnfd
)
3183 for alert
in healing_alerts
:
3184 self
.logger
.info(f
"Storing healing alert in MongoDB: {alert}")
3185 self
.db
.create("alerts", alert
)
3187 scaling_alerts
= self
._gather
_vnfr
_scaling
_alerts
(vnfr
, vnfd
)
3188 for alert
in scaling_alerts
:
3189 self
.logger
.info(f
"Storing scaling alert in MongoDB: {alert}")
3190 self
.db
.create("alerts", alert
)
3192 alarm_alerts
= self
._gather
_vnfr
_alarm
_alerts
(vnfr
, vnfd
)
3193 for alert
in alarm_alerts
:
3194 self
.logger
.info(f
"Storing VNF alarm alert in MongoDB: {alert}")
3195 self
.db
.create("alerts", alert
)
3197 self
._write
_ns
_status
(
3200 current_operation
="IDLE",
3201 current_operation_id
=None,
3202 error_description
=error_description_nsr
,
3203 error_detail
=error_detail
,
3204 other_update
=db_nsr_update
,
3206 self
._write
_op
_status
(
3209 error_message
=error_description_nslcmop
,
3210 operation_state
=nslcmop_operation_state
,
3211 other_update
=db_nslcmop_update
,
3214 if nslcmop_operation_state
:
3216 await self
.msg
.aiowrite(
3221 "nslcmop_id": nslcmop_id
,
3222 "operationState": nslcmop_operation_state
,
3225 except Exception as e
:
3227 logging_text
+ "kafka_write notification Exception {}".format(e
)
3230 self
.logger
.debug(logging_text
+ "Exit")
3231 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_instantiate")
3233 def _get_vnfd(self
, vnfd_id
: str, projects_read
: str, cached_vnfds
: Dict
[str, Any
]):
3234 if vnfd_id
not in cached_vnfds
:
3235 cached_vnfds
[vnfd_id
] = self
.db
.get_one(
3236 "vnfds", {"id": vnfd_id
, "_admin.projects_read": projects_read
}
3238 return cached_vnfds
[vnfd_id
]
3240 def _get_vnfr(self
, nsr_id
: str, vnf_profile_id
: str, cached_vnfrs
: Dict
[str, Any
]):
3241 if vnf_profile_id
not in cached_vnfrs
:
3242 cached_vnfrs
[vnf_profile_id
] = self
.db
.get_one(
3245 "member-vnf-index-ref": vnf_profile_id
,
3246 "nsr-id-ref": nsr_id
,
3249 return cached_vnfrs
[vnf_profile_id
]
3251 def _is_deployed_vca_in_relation(
3252 self
, vca
: DeployedVCA
, relation
: Relation
3255 for endpoint
in (relation
.provider
, relation
.requirer
):
3256 if endpoint
["kdu-resource-profile-id"]:
3259 vca
.vnf_profile_id
== endpoint
.vnf_profile_id
3260 and vca
.vdu_profile_id
== endpoint
.vdu_profile_id
3261 and vca
.execution_environment_ref
== endpoint
.execution_environment_ref
3267 def _update_ee_relation_data_with_implicit_data(
3268 self
, nsr_id
, nsd
, ee_relation_data
, cached_vnfds
, vnf_profile_id
: str = None
3270 ee_relation_data
= safe_get_ee_relation(
3271 nsr_id
, ee_relation_data
, vnf_profile_id
=vnf_profile_id
3273 ee_relation_level
= EELevel
.get_level(ee_relation_data
)
3274 if (ee_relation_level
in (EELevel
.VNF
, EELevel
.VDU
)) and not ee_relation_data
[
3275 "execution-environment-ref"
3277 vnf_profile
= get_vnf_profile(nsd
, ee_relation_data
["vnf-profile-id"])
3278 vnfd_id
= vnf_profile
["vnfd-id"]
3279 project
= nsd
["_admin"]["projects_read"][0]
3280 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3283 if ee_relation_level
== EELevel
.VNF
3284 else ee_relation_data
["vdu-profile-id"]
3286 ee
= get_juju_ee_ref(db_vnfd
, entity_id
)
3289 f
"not execution environments found for ee_relation {ee_relation_data}"
3291 ee_relation_data
["execution-environment-ref"] = ee
["id"]
3292 return ee_relation_data
3294 def _get_ns_relations(
3297 nsd
: Dict
[str, Any
],
3299 cached_vnfds
: Dict
[str, Any
],
3300 ) -> List
[Relation
]:
3302 db_ns_relations
= get_ns_configuration_relation_list(nsd
)
3303 for r
in db_ns_relations
:
3304 provider_dict
= None
3305 requirer_dict
= None
3306 if all(key
in r
for key
in ("provider", "requirer")):
3307 provider_dict
= r
["provider"]
3308 requirer_dict
= r
["requirer"]
3309 elif "entities" in r
:
3310 provider_id
= r
["entities"][0]["id"]
3313 "endpoint": r
["entities"][0]["endpoint"],
3315 if provider_id
!= nsd
["id"]:
3316 provider_dict
["vnf-profile-id"] = provider_id
3317 requirer_id
= r
["entities"][1]["id"]
3320 "endpoint": r
["entities"][1]["endpoint"],
3322 if requirer_id
!= nsd
["id"]:
3323 requirer_dict
["vnf-profile-id"] = requirer_id
3326 "provider/requirer or entities must be included in the relation."
3328 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3329 nsr_id
, nsd
, provider_dict
, cached_vnfds
3331 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3332 nsr_id
, nsd
, requirer_dict
, cached_vnfds
3334 provider
= EERelation(relation_provider
)
3335 requirer
= EERelation(relation_requirer
)
3336 relation
= Relation(r
["name"], provider
, requirer
)
3337 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3339 relations
.append(relation
)
3342 def _get_vnf_relations(
3345 nsd
: Dict
[str, Any
],
3347 cached_vnfds
: Dict
[str, Any
],
3348 ) -> List
[Relation
]:
3350 if vca
.target_element
== "ns":
3351 self
.logger
.debug("VCA is a NS charm, not a VNF.")
3353 vnf_profile
= get_vnf_profile(nsd
, vca
.vnf_profile_id
)
3354 vnf_profile_id
= vnf_profile
["id"]
3355 vnfd_id
= vnf_profile
["vnfd-id"]
3356 project
= nsd
["_admin"]["projects_read"][0]
3357 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3358 db_vnf_relations
= get_relation_list(db_vnfd
, vnfd_id
)
3359 for r
in db_vnf_relations
:
3360 provider_dict
= None
3361 requirer_dict
= None
3362 if all(key
in r
for key
in ("provider", "requirer")):
3363 provider_dict
= r
["provider"]
3364 requirer_dict
= r
["requirer"]
3365 elif "entities" in r
:
3366 provider_id
= r
["entities"][0]["id"]
3369 "vnf-profile-id": vnf_profile_id
,
3370 "endpoint": r
["entities"][0]["endpoint"],
3372 if provider_id
!= vnfd_id
:
3373 provider_dict
["vdu-profile-id"] = provider_id
3374 requirer_id
= r
["entities"][1]["id"]
3377 "vnf-profile-id": vnf_profile_id
,
3378 "endpoint": r
["entities"][1]["endpoint"],
3380 if requirer_id
!= vnfd_id
:
3381 requirer_dict
["vdu-profile-id"] = requirer_id
3384 "provider/requirer or entities must be included in the relation."
3386 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3387 nsr_id
, nsd
, provider_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3389 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3390 nsr_id
, nsd
, requirer_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3392 provider
= EERelation(relation_provider
)
3393 requirer
= EERelation(relation_requirer
)
3394 relation
= Relation(r
["name"], provider
, requirer
)
3395 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3397 relations
.append(relation
)
3400 def _get_kdu_resource_data(
3402 ee_relation
: EERelation
,
3403 db_nsr
: Dict
[str, Any
],
3404 cached_vnfds
: Dict
[str, Any
],
3405 ) -> DeployedK8sResource
:
3406 nsd
= get_nsd(db_nsr
)
3407 vnf_profiles
= get_vnf_profiles(nsd
)
3408 vnfd_id
= find_in_list(
3410 lambda vnf_profile
: vnf_profile
["id"] == ee_relation
.vnf_profile_id
,
3412 project
= nsd
["_admin"]["projects_read"][0]
3413 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3414 kdu_resource_profile
= get_kdu_resource_profile(
3415 db_vnfd
, ee_relation
.kdu_resource_profile_id
3417 kdu_name
= kdu_resource_profile
["kdu-name"]
3418 deployed_kdu
, _
= get_deployed_kdu(
3419 db_nsr
.get("_admin", ()).get("deployed", ()),
3421 ee_relation
.vnf_profile_id
,
3423 deployed_kdu
.update({"resource-name": kdu_resource_profile
["resource-name"]})
3426 def _get_deployed_component(
3428 ee_relation
: EERelation
,
3429 db_nsr
: Dict
[str, Any
],
3430 cached_vnfds
: Dict
[str, Any
],
3431 ) -> DeployedComponent
:
3432 nsr_id
= db_nsr
["_id"]
3433 deployed_component
= None
3434 ee_level
= EELevel
.get_level(ee_relation
)
3435 if ee_level
== EELevel
.NS
:
3436 vca
= get_deployed_vca(db_nsr
, {"vdu_id": None, "member-vnf-index": None})
3438 deployed_component
= DeployedVCA(nsr_id
, vca
)
3439 elif ee_level
== EELevel
.VNF
:
3440 vca
= get_deployed_vca(
3444 "member-vnf-index": ee_relation
.vnf_profile_id
,
3445 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3449 deployed_component
= DeployedVCA(nsr_id
, vca
)
3450 elif ee_level
== EELevel
.VDU
:
3451 vca
= get_deployed_vca(
3454 "vdu_id": ee_relation
.vdu_profile_id
,
3455 "member-vnf-index": ee_relation
.vnf_profile_id
,
3456 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3460 deployed_component
= DeployedVCA(nsr_id
, vca
)
3461 elif ee_level
== EELevel
.KDU
:
3462 kdu_resource_data
= self
._get
_kdu
_resource
_data
(
3463 ee_relation
, db_nsr
, cached_vnfds
3465 if kdu_resource_data
:
3466 deployed_component
= DeployedK8sResource(kdu_resource_data
)
3467 return deployed_component
3469 async def _add_relation(
3473 db_nsr
: Dict
[str, Any
],
3474 cached_vnfds
: Dict
[str, Any
],
3475 cached_vnfrs
: Dict
[str, Any
],
3477 deployed_provider
= self
._get
_deployed
_component
(
3478 relation
.provider
, db_nsr
, cached_vnfds
3480 deployed_requirer
= self
._get
_deployed
_component
(
3481 relation
.requirer
, db_nsr
, cached_vnfds
3485 and deployed_requirer
3486 and deployed_provider
.config_sw_installed
3487 and deployed_requirer
.config_sw_installed
3489 provider_db_vnfr
= (
3491 relation
.provider
.nsr_id
,
3492 relation
.provider
.vnf_profile_id
,
3495 if relation
.provider
.vnf_profile_id
3498 requirer_db_vnfr
= (
3500 relation
.requirer
.nsr_id
,
3501 relation
.requirer
.vnf_profile_id
,
3504 if relation
.requirer
.vnf_profile_id
3507 provider_vca_id
= self
.get_vca_id(provider_db_vnfr
, db_nsr
)
3508 requirer_vca_id
= self
.get_vca_id(requirer_db_vnfr
, db_nsr
)
3509 provider_relation_endpoint
= RelationEndpoint(
3510 deployed_provider
.ee_id
,
3512 relation
.provider
.endpoint
,
3514 requirer_relation_endpoint
= RelationEndpoint(
3515 deployed_requirer
.ee_id
,
3517 relation
.requirer
.endpoint
,
3520 await self
.vca_map
[vca_type
].add_relation(
3521 provider
=provider_relation_endpoint
,
3522 requirer
=requirer_relation_endpoint
,
3524 except N2VCException
as exception
:
3525 self
.logger
.error(exception
)
3526 raise LcmException(exception
)
3530 async def _add_vca_relations(
3536 timeout
: int = 3600,
3539 # 1. find all relations for this VCA
3540 # 2. wait for other peers related
3544 # STEP 1: find all relations for this VCA
3547 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3548 nsd
= get_nsd(db_nsr
)
3551 deployed_vca_dict
= get_deployed_vca_list(db_nsr
)[vca_index
]
3552 my_vca
= DeployedVCA(nsr_id
, deployed_vca_dict
)
3557 relations
.extend(self
._get
_ns
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3558 relations
.extend(self
._get
_vnf
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3560 # if no relations, terminate
3562 self
.logger
.debug(logging_text
+ " No relations")
3565 self
.logger
.debug(logging_text
+ " adding relations {}".format(relations
))
3572 if now
- start
>= timeout
:
3573 self
.logger
.error(logging_text
+ " : timeout adding relations")
3576 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3577 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3579 # for each relation, find the VCA's related
3580 for relation
in relations
.copy():
3581 added
= await self
._add
_relation
(
3589 relations
.remove(relation
)
3592 self
.logger
.debug("Relations added")
3594 await asyncio
.sleep(5.0)
3598 except Exception as e
:
3599 self
.logger
.warn(logging_text
+ " ERROR adding relations: {}".format(e
))
3602 async def _install_kdu(
3610 k8s_instance_info
: dict,
3611 k8params
: dict = None,
3616 k8sclustertype
= k8s_instance_info
["k8scluster-type"]
3619 "collection": "nsrs",
3620 "filter": {"_id": nsr_id
},
3621 "path": nsr_db_path
,
3624 if k8s_instance_info
.get("kdu-deployment-name"):
3625 kdu_instance
= k8s_instance_info
.get("kdu-deployment-name")
3627 kdu_instance
= self
.k8scluster_map
[
3629 ].generate_kdu_instance_name(
3630 db_dict
=db_dict_install
,
3631 kdu_model
=k8s_instance_info
["kdu-model"],
3632 kdu_name
=k8s_instance_info
["kdu-name"],
3635 # Update the nsrs table with the kdu-instance value
3639 _desc
={nsr_db_path
+ ".kdu-instance": kdu_instance
},
3642 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3643 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3644 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3645 # namespace, this first verification could be removed, and the next step would be done for any kind
3647 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3648 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3649 if k8sclustertype
in ("juju", "juju-bundle"):
3650 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3651 # that the user passed a namespace which he wants its KDU to be deployed in)
3657 "_admin.projects_write": k8s_instance_info
["namespace"],
3658 "_admin.projects_read": k8s_instance_info
["namespace"],
3664 f
"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3669 _desc
={f
"{nsr_db_path}.namespace": kdu_instance
},
3671 k8s_instance_info
["namespace"] = kdu_instance
3673 await self
.k8scluster_map
[k8sclustertype
].install(
3674 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3675 kdu_model
=k8s_instance_info
["kdu-model"],
3678 db_dict
=db_dict_install
,
3680 kdu_name
=k8s_instance_info
["kdu-name"],
3681 namespace
=k8s_instance_info
["namespace"],
3682 kdu_instance
=kdu_instance
,
3686 # Obtain services to obtain management service ip
3687 services
= await self
.k8scluster_map
[k8sclustertype
].get_services(
3688 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3689 kdu_instance
=kdu_instance
,
3690 namespace
=k8s_instance_info
["namespace"],
3693 # Obtain management service info (if exists)
3694 vnfr_update_dict
= {}
3695 kdu_config
= get_configuration(vnfd
, kdud
["name"])
3697 target_ee_list
= kdu_config
.get("execution-environment-list", [])
3702 vnfr_update_dict
["kdur.{}.services".format(kdu_index
)] = services
3705 for service
in kdud
.get("service", [])
3706 if service
.get("mgmt-service")
3708 for mgmt_service
in mgmt_services
:
3709 for service
in services
:
3710 if service
["name"].startswith(mgmt_service
["name"]):
3711 # Mgmt service found, Obtain service ip
3712 ip
= service
.get("external_ip", service
.get("cluster_ip"))
3713 if isinstance(ip
, list) and len(ip
) == 1:
3717 "kdur.{}.ip-address".format(kdu_index
)
3720 # Check if must update also mgmt ip at the vnf
3721 service_external_cp
= mgmt_service
.get(
3722 "external-connection-point-ref"
3724 if service_external_cp
:
3726 deep_get(vnfd
, ("mgmt-interface", "cp"))
3727 == service_external_cp
3729 vnfr_update_dict
["ip-address"] = ip
3734 "external-connection-point-ref", ""
3736 == service_external_cp
,
3739 "kdur.{}.ip-address".format(kdu_index
)
3744 "Mgmt service name: {} not found".format(
3745 mgmt_service
["name"]
3749 vnfr_update_dict
["kdur.{}.status".format(kdu_index
)] = "READY"
3750 self
.update_db_2("vnfrs", vnfr_data
.get("_id"), vnfr_update_dict
)
3752 kdu_config
= get_configuration(vnfd
, k8s_instance_info
["kdu-name"])
3755 and kdu_config
.get("initial-config-primitive")
3756 and get_juju_ee_ref(vnfd
, k8s_instance_info
["kdu-name"]) is None
3758 initial_config_primitive_list
= kdu_config
.get(
3759 "initial-config-primitive"
3761 initial_config_primitive_list
.sort(key
=lambda val
: int(val
["seq"]))
3763 for initial_config_primitive
in initial_config_primitive_list
:
3764 primitive_params_
= self
._map
_primitive
_params
(
3765 initial_config_primitive
, {}, {}
3768 await asyncio
.wait_for(
3769 self
.k8scluster_map
[k8sclustertype
].exec_primitive(
3770 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3771 kdu_instance
=kdu_instance
,
3772 primitive_name
=initial_config_primitive
["name"],
3773 params
=primitive_params_
,
3774 db_dict
=db_dict_install
,
3780 except Exception as e
:
3781 # Prepare update db with error and raise exception
3784 "nsrs", nsr_id
, {nsr_db_path
+ ".detailed-status": str(e
)}
3788 vnfr_data
.get("_id"),
3789 {"kdur.{}.status".format(kdu_index
): "ERROR"},
3792 # ignore to keep original exception
3794 # reraise original error
3799 async def deploy_kdus(
3806 task_instantiation_info
,
3808 # Launch kdus if present in the descriptor
3810 k8scluster_id_2_uuic
= {
3811 "helm-chart-v3": {},
3816 async def _get_cluster_id(cluster_id
, cluster_type
):
3817 nonlocal k8scluster_id_2_uuic
3818 if cluster_id
in k8scluster_id_2_uuic
[cluster_type
]:
3819 return k8scluster_id_2_uuic
[cluster_type
][cluster_id
]
3821 # check if K8scluster is creating and wait look if previous tasks in process
3822 task_name
, task_dependency
= self
.lcm_tasks
.lookfor_related(
3823 "k8scluster", cluster_id
3826 text
= "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3827 task_name
, cluster_id
3829 self
.logger
.debug(logging_text
+ text
)
3830 await asyncio
.wait(task_dependency
, timeout
=3600)
3832 db_k8scluster
= self
.db
.get_one(
3833 "k8sclusters", {"_id": cluster_id
}, fail_on_empty
=False
3835 if not db_k8scluster
:
3836 raise LcmException("K8s cluster {} cannot be found".format(cluster_id
))
3838 k8s_id
= deep_get(db_k8scluster
, ("_admin", cluster_type
, "id"))
3840 if cluster_type
== "helm-chart-v3":
3842 # backward compatibility for existing clusters that have not been initialized for helm v3
3843 k8s_credentials
= yaml
.safe_dump(
3844 db_k8scluster
.get("credentials")
3846 k8s_id
, uninstall_sw
= await self
.k8sclusterhelm3
.init_env(
3847 k8s_credentials
, reuse_cluster_uuid
=cluster_id
3849 db_k8scluster_update
= {}
3850 db_k8scluster_update
["_admin.helm-chart-v3.error_msg"] = None
3851 db_k8scluster_update
["_admin.helm-chart-v3.id"] = k8s_id
3852 db_k8scluster_update
[
3853 "_admin.helm-chart-v3.created"
3855 db_k8scluster_update
[
3856 "_admin.helm-chart-v3.operationalState"
3859 "k8sclusters", cluster_id
, db_k8scluster_update
3861 except Exception as e
:
3864 + "error initializing helm-v3 cluster: {}".format(str(e
))
3867 "K8s cluster '{}' has not been initialized for '{}'".format(
3868 cluster_id
, cluster_type
3873 "K8s cluster '{}' has not been initialized for '{}'".format(
3874 cluster_id
, cluster_type
3877 k8scluster_id_2_uuic
[cluster_type
][cluster_id
] = k8s_id
3880 logging_text
+= "Deploy kdus: "
3883 db_nsr_update
= {"_admin.deployed.K8s": []}
3884 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3887 updated_cluster_list
= []
3888 updated_v3_cluster_list
= []
3890 for vnfr_data
in db_vnfrs
.values():
3891 vca_id
= self
.get_vca_id(vnfr_data
, {})
3892 for kdu_index
, kdur
in enumerate(get_iterable(vnfr_data
, "kdur")):
3893 # Step 0: Prepare and set parameters
3894 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
3895 vnfd_id
= vnfr_data
.get("vnfd-id")
3896 vnfd_with_id
= find_in_list(
3897 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3901 for kdud
in vnfd_with_id
["kdu"]
3902 if kdud
["name"] == kdur
["kdu-name"]
3904 namespace
= kdur
.get("k8s-namespace")
3905 kdu_deployment_name
= kdur
.get("kdu-deployment-name")
3906 if kdur
.get("helm-chart"):
3907 kdumodel
= kdur
["helm-chart"]
3908 # Default version: helm3, if helm-version is v2 assign v2
3909 k8sclustertype
= "helm-chart-v3"
3910 self
.logger
.debug("kdur: {}".format(kdur
))
3912 kdur
.get("helm-version")
3913 and kdur
.get("helm-version") == "v2"
3915 k8sclustertype
= "helm-chart"
3916 elif kdur
.get("juju-bundle"):
3917 kdumodel
= kdur
["juju-bundle"]
3918 k8sclustertype
= "juju-bundle"
3921 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3922 "juju-bundle. Maybe an old NBI version is running".format(
3923 vnfr_data
["member-vnf-index-ref"], kdur
["kdu-name"]
3926 # check if kdumodel is a file and exists
3928 vnfd_with_id
= find_in_list(
3929 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3931 storage
= deep_get(vnfd_with_id
, ("_admin", "storage"))
3932 if storage
: # may be not present if vnfd has not artifacts
3933 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3934 if storage
["pkg-dir"]:
3935 filename
= "{}/{}/{}s/{}".format(
3942 filename
= "{}/Scripts/{}s/{}".format(
3947 if self
.fs
.file_exists(
3948 filename
, mode
="file"
3949 ) or self
.fs
.file_exists(filename
, mode
="dir"):
3950 kdumodel
= self
.fs
.path
+ filename
3951 except (asyncio
.TimeoutError
, asyncio
.CancelledError
):
3953 except Exception: # it is not a file
3956 k8s_cluster_id
= kdur
["k8s-cluster"]["id"]
3957 step
= "Synchronize repos for k8s cluster '{}'".format(
3960 cluster_uuid
= await _get_cluster_id(k8s_cluster_id
, k8sclustertype
)
3964 k8sclustertype
== "helm-chart"
3965 and cluster_uuid
not in updated_cluster_list
3967 k8sclustertype
== "helm-chart-v3"
3968 and cluster_uuid
not in updated_v3_cluster_list
3970 del_repo_list
, added_repo_dict
= await asyncio
.ensure_future(
3971 self
.k8scluster_map
[k8sclustertype
].synchronize_repos(
3972 cluster_uuid
=cluster_uuid
3975 if del_repo_list
or added_repo_dict
:
3976 if k8sclustertype
== "helm-chart":
3978 "_admin.helm_charts_added." + item
: None
3979 for item
in del_repo_list
3982 "_admin.helm_charts_added." + item
: name
3983 for item
, name
in added_repo_dict
.items()
3985 updated_cluster_list
.append(cluster_uuid
)
3986 elif k8sclustertype
== "helm-chart-v3":
3988 "_admin.helm_charts_v3_added." + item
: None
3989 for item
in del_repo_list
3992 "_admin.helm_charts_v3_added." + item
: name
3993 for item
, name
in added_repo_dict
.items()
3995 updated_v3_cluster_list
.append(cluster_uuid
)
3997 logging_text
+ "repos synchronized on k8s cluster "
3998 "'{}' to_delete: {}, to_add: {}".format(
3999 k8s_cluster_id
, del_repo_list
, added_repo_dict
4004 {"_id": k8s_cluster_id
},
4010 step
= "Instantiating KDU {}.{} in k8s cluster {}".format(
4011 vnfr_data
["member-vnf-index-ref"],
4015 k8s_instance_info
= {
4016 "kdu-instance": None,
4017 "k8scluster-uuid": cluster_uuid
,
4018 "k8scluster-type": k8sclustertype
,
4019 "member-vnf-index": vnfr_data
["member-vnf-index-ref"],
4020 "kdu-name": kdur
["kdu-name"],
4021 "kdu-model": kdumodel
,
4022 "namespace": namespace
,
4023 "kdu-deployment-name": kdu_deployment_name
,
4025 db_path
= "_admin.deployed.K8s.{}".format(index
)
4026 db_nsr_update
[db_path
] = k8s_instance_info
4027 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
4028 vnfd_with_id
= find_in_list(
4029 db_vnfds
, lambda vnf
: vnf
["_id"] == vnfd_id
4031 task
= asyncio
.ensure_future(
4040 k8params
=desc_params
,
4045 self
.lcm_tasks
.register(
4049 "instantiate_KDU-{}".format(index
),
4052 task_instantiation_info
[task
] = "Deploying KDU {}".format(
4058 except (LcmException
, asyncio
.CancelledError
):
4060 except Exception as e
:
4061 msg
= "Exception {} while {}: {}".format(type(e
).__name
__, step
, e
)
4062 if isinstance(e
, (N2VCException
, DbException
)):
4063 self
.logger
.error(logging_text
+ msg
)
4065 self
.logger
.critical(logging_text
+ msg
, exc_info
=True)
4066 raise LcmException(msg
)
4069 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
4089 task_instantiation_info
,
4092 # launch instantiate_N2VC in a asyncio task and register task object
4093 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
4094 # if not found, create one entry and update database
4095 # fill db_nsr._admin.deployed.VCA.<index>
4098 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
4102 get_charm_name
= False
4103 if "execution-environment-list" in descriptor_config
:
4104 ee_list
= descriptor_config
.get("execution-environment-list", [])
4105 elif "juju" in descriptor_config
:
4106 ee_list
= [descriptor_config
] # ns charms
4107 if "execution-environment-list" not in descriptor_config
:
4108 # charm name is only required for ns charms
4109 get_charm_name
= True
4110 else: # other types as script are not supported
4113 for ee_item
in ee_list
:
4116 + "_deploy_n2vc ee_item juju={}, helm={}".format(
4117 ee_item
.get("juju"), ee_item
.get("helm-chart")
4120 ee_descriptor_id
= ee_item
.get("id")
4121 if ee_item
.get("juju"):
4122 vca_name
= ee_item
["juju"].get("charm")
4124 charm_name
= self
.find_charm_name(db_nsr
, str(vca_name
))
4127 if ee_item
["juju"].get("charm") is not None
4130 if ee_item
["juju"].get("cloud") == "k8s":
4131 vca_type
= "k8s_proxy_charm"
4132 elif ee_item
["juju"].get("proxy") is False:
4133 vca_type
= "native_charm"
4134 elif ee_item
.get("helm-chart"):
4135 vca_name
= ee_item
["helm-chart"]
4136 if ee_item
.get("helm-version") and ee_item
.get("helm-version") == "v2":
4139 vca_type
= "helm-v3"
4142 logging_text
+ "skipping non juju neither charm configuration"
4147 for vca_index
, vca_deployed
in enumerate(
4148 db_nsr
["_admin"]["deployed"]["VCA"]
4150 if not vca_deployed
:
4153 vca_deployed
.get("member-vnf-index") == member_vnf_index
4154 and vca_deployed
.get("vdu_id") == vdu_id
4155 and vca_deployed
.get("kdu_name") == kdu_name
4156 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
4157 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
4161 # not found, create one.
4163 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
4166 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
4168 target
+= "/kdu/{}".format(kdu_name
)
4170 "target_element": target
,
4171 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4172 "member-vnf-index": member_vnf_index
,
4174 "kdu_name": kdu_name
,
4175 "vdu_count_index": vdu_index
,
4176 "operational-status": "init", # TODO revise
4177 "detailed-status": "", # TODO revise
4178 "step": "initial-deploy", # TODO revise
4180 "vdu_name": vdu_name
,
4182 "ee_descriptor_id": ee_descriptor_id
,
4183 "charm_name": charm_name
,
4187 # create VCA and configurationStatus in db
4189 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
4190 "configurationStatus.{}".format(vca_index
): dict(),
4192 self
.update_db_2("nsrs", nsr_id
, db_dict
)
4194 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
4196 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
4197 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
4198 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
4201 task_n2vc
= asyncio
.ensure_future(
4202 self
.instantiate_N2VC(
4203 logging_text
=logging_text
,
4204 vca_index
=vca_index
,
4210 vdu_index
=vdu_index
,
4211 kdu_index
=kdu_index
,
4212 deploy_params
=deploy_params
,
4213 config_descriptor
=descriptor_config
,
4214 base_folder
=base_folder
,
4215 nslcmop_id
=nslcmop_id
,
4219 ee_config_descriptor
=ee_item
,
4222 self
.lcm_tasks
.register(
4226 "instantiate_N2VC-{}".format(vca_index
),
4229 task_instantiation_info
[
4231 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
4232 member_vnf_index
or "", vdu_id
or ""
4236 def _create_nslcmop(nsr_id
, operation
, params
):
4238 Creates a ns-lcm-opp content to be stored at database.
4239 :param nsr_id: internal id of the instance
4240 :param operation: instantiate, terminate, scale, action, ...
4241 :param params: user parameters for the operation
4242 :return: dictionary following SOL005 format
4244 # Raise exception if invalid arguments
4245 if not (nsr_id
and operation
and params
):
4247 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
4254 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
4255 "operationState": "PROCESSING",
4256 "statusEnteredTime": now
,
4257 "nsInstanceId": nsr_id
,
4258 "lcmOperationType": operation
,
4260 "isAutomaticInvocation": False,
4261 "operationParams": params
,
4262 "isCancelPending": False,
4264 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id
,
4265 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id
,
4270 def _format_additional_params(self
, params
):
4271 params
= params
or {}
4272 for key
, value
in params
.items():
4273 if str(value
).startswith("!!yaml "):
4274 params
[key
] = yaml
.safe_load(value
[7:])
4277 def _get_terminate_primitive_params(self
, seq
, vnf_index
):
4278 primitive
= seq
.get("name")
4279 primitive_params
= {}
4281 "member_vnf_index": vnf_index
,
4282 "primitive": primitive
,
4283 "primitive_params": primitive_params
,
4286 return self
._map
_primitive
_params
(seq
, params
, desc_params
)
4290 def _retry_or_skip_suboperation(self
, db_nslcmop
, op_index
):
4291 op
= deep_get(db_nslcmop
, ("_admin", "operations"), [])[op_index
]
4292 if op
.get("operationState") == "COMPLETED":
4293 # b. Skip sub-operation
4294 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4295 return self
.SUBOPERATION_STATUS_SKIP
4297 # c. retry executing sub-operation
4298 # The sub-operation exists, and operationState != 'COMPLETED'
4299 # Update operationState = 'PROCESSING' to indicate a retry.
4300 operationState
= "PROCESSING"
4301 detailed_status
= "In progress"
4302 self
._update
_suboperation
_status
(
4303 db_nslcmop
, op_index
, operationState
, detailed_status
4305 # Return the sub-operation index
4306 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4307 # with arguments extracted from the sub-operation
4310 # Find a sub-operation where all keys in a matching dictionary must match
4311 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4312 def _find_suboperation(self
, db_nslcmop
, match
):
4313 if db_nslcmop
and match
:
4314 op_list
= db_nslcmop
.get("_admin", {}).get("operations", [])
4315 for i
, op
in enumerate(op_list
):
4316 if all(op
.get(k
) == match
[k
] for k
in match
):
4318 return self
.SUBOPERATION_STATUS_NOT_FOUND
4320 # Update status for a sub-operation given its index
4321 def _update_suboperation_status(
4322 self
, db_nslcmop
, op_index
, operationState
, detailed_status
4324 # Update DB for HA tasks
4325 q_filter
= {"_id": db_nslcmop
["_id"]}
4327 "_admin.operations.{}.operationState".format(op_index
): operationState
,
4328 "_admin.operations.{}.detailed-status".format(op_index
): detailed_status
,
4331 "nslcmops", q_filter
=q_filter
, update_dict
=update_dict
, fail_on_empty
=False
4334 # Add sub-operation, return the index of the added sub-operation
4335 # Optionally, set operationState, detailed-status, and operationType
4336 # Status and type are currently set for 'scale' sub-operations:
4337 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4338 # 'detailed-status' : status message
4339 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4340 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4341 def _add_suboperation(
4349 mapped_primitive_params
,
4350 operationState
=None,
4351 detailed_status
=None,
4354 RO_scaling_info
=None,
4357 return self
.SUBOPERATION_STATUS_NOT_FOUND
4358 # Get the "_admin.operations" list, if it exists
4359 db_nslcmop_admin
= db_nslcmop
.get("_admin", {})
4360 op_list
= db_nslcmop_admin
.get("operations")
4361 # Create or append to the "_admin.operations" list
4363 "member_vnf_index": vnf_index
,
4365 "vdu_count_index": vdu_count_index
,
4366 "primitive": primitive
,
4367 "primitive_params": mapped_primitive_params
,
4370 new_op
["operationState"] = operationState
4372 new_op
["detailed-status"] = detailed_status
4374 new_op
["lcmOperationType"] = operationType
4376 new_op
["RO_nsr_id"] = RO_nsr_id
4378 new_op
["RO_scaling_info"] = RO_scaling_info
4380 # No existing operations, create key 'operations' with current operation as first list element
4381 db_nslcmop_admin
.update({"operations": [new_op
]})
4382 op_list
= db_nslcmop_admin
.get("operations")
4384 # Existing operations, append operation to list
4385 op_list
.append(new_op
)
4387 db_nslcmop_update
= {"_admin.operations": op_list
}
4388 self
.update_db_2("nslcmops", db_nslcmop
["_id"], db_nslcmop_update
)
4389 op_index
= len(op_list
) - 1
4392 # Helper methods for scale() sub-operations
4394 # pre-scale/post-scale:
4395 # Check for 3 different cases:
4396 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4397 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4398 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4399 def _check_or_add_scale_suboperation(
4403 vnf_config_primitive
,
4407 RO_scaling_info
=None,
4409 # Find this sub-operation
4410 if RO_nsr_id
and RO_scaling_info
:
4411 operationType
= "SCALE-RO"
4413 "member_vnf_index": vnf_index
,
4414 "RO_nsr_id": RO_nsr_id
,
4415 "RO_scaling_info": RO_scaling_info
,
4419 "member_vnf_index": vnf_index
,
4420 "primitive": vnf_config_primitive
,
4421 "primitive_params": primitive_params
,
4422 "lcmOperationType": operationType
,
4424 op_index
= self
._find
_suboperation
(db_nslcmop
, match
)
4425 if op_index
== self
.SUBOPERATION_STATUS_NOT_FOUND
:
4426 # a. New sub-operation
4427 # The sub-operation does not exist, add it.
4428 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4429 # The following parameters are set to None for all kind of scaling:
4431 vdu_count_index
= None
4433 if RO_nsr_id
and RO_scaling_info
:
4434 vnf_config_primitive
= None
4435 primitive_params
= None
4438 RO_scaling_info
= None
4439 # Initial status for sub-operation
4440 operationState
= "PROCESSING"
4441 detailed_status
= "In progress"
4442 # Add sub-operation for pre/post-scaling (zero or more operations)
4443 self
._add
_suboperation
(
4449 vnf_config_primitive
,
4457 return self
.SUBOPERATION_STATUS_NEW
4459 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4460 # or op_index (operationState != 'COMPLETED')
4461 return self
._retry
_or
_skip
_suboperation
(db_nslcmop
, op_index
)
4463 # Function to return execution_environment id
4465 def _get_ee_id(self
, vnf_index
, vdu_id
, vca_deployed_list
):
4466 # TODO vdu_index_count
4467 for vca
in vca_deployed_list
:
4468 if vca
["member-vnf-index"] == vnf_index
and vca
["vdu_id"] == vdu_id
:
4469 return vca
.get("ee_id")
4471 async def destroy_N2VC(
4479 exec_primitives
=True,
4484 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4485 :param logging_text:
4487 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4488 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4489 :param vca_index: index in the database _admin.deployed.VCA
4490 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4491 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4492 not executed properly
4493 :param scaling_in: True destroys the application, False destroys the model
4494 :return: None or exception
4499 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4500 vca_index
, vca_deployed
, config_descriptor
, destroy_ee
4504 vca_type
= vca_deployed
.get("type", "lxc_proxy_charm")
4506 # execute terminate_primitives
4508 terminate_primitives
= get_ee_sorted_terminate_config_primitive_list(
4509 config_descriptor
.get("terminate-config-primitive"),
4510 vca_deployed
.get("ee_descriptor_id"),
4512 vdu_id
= vca_deployed
.get("vdu_id")
4513 vdu_count_index
= vca_deployed
.get("vdu_count_index")
4514 vdu_name
= vca_deployed
.get("vdu_name")
4515 vnf_index
= vca_deployed
.get("member-vnf-index")
4516 if terminate_primitives
and vca_deployed
.get("needed_terminate"):
4517 for seq
in terminate_primitives
:
4518 # For each sequence in list, get primitive and call _ns_execute_primitive()
4519 step
= "Calling terminate action for vnf_member_index={} primitive={}".format(
4520 vnf_index
, seq
.get("name")
4522 self
.logger
.debug(logging_text
+ step
)
4523 # Create the primitive for each sequence, i.e. "primitive": "touch"
4524 primitive
= seq
.get("name")
4525 mapped_primitive_params
= self
._get
_terminate
_primitive
_params
(
4530 self
._add
_suboperation
(
4537 mapped_primitive_params
,
4539 # Sub-operations: Call _ns_execute_primitive() instead of action()
4541 result
, result_detail
= await self
._ns
_execute
_primitive
(
4542 vca_deployed
["ee_id"],
4544 mapped_primitive_params
,
4548 except LcmException
:
4549 # this happens when VCA is not deployed. In this case it is not needed to terminate
4551 result_ok
= ["COMPLETED", "PARTIALLY_COMPLETED"]
4552 if result
not in result_ok
:
4554 "terminate_primitive {} for vnf_member_index={} fails with "
4555 "error {}".format(seq
.get("name"), vnf_index
, result_detail
)
4557 # set that this VCA do not need terminated
4558 db_update_entry
= "_admin.deployed.VCA.{}.needed_terminate".format(
4562 "nsrs", db_nslcmop
["nsInstanceId"], {db_update_entry
: False}
4565 # Delete Prometheus Jobs if any
4566 # This uses NSR_ID, so it will destroy any jobs under this index
4567 self
.db
.del_list("prometheus_jobs", {"nsr_id": db_nslcmop
["nsInstanceId"]})
4570 await self
.vca_map
[vca_type
].delete_execution_environment(
4571 vca_deployed
["ee_id"],
4572 scaling_in
=scaling_in
,
4577 async def _delete_all_N2VC(self
, db_nsr
: dict, vca_id
: str = None):
4578 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="TERMINATING")
4579 namespace
= "." + db_nsr
["_id"]
4581 await self
.n2vc
.delete_namespace(
4582 namespace
=namespace
,
4583 total_timeout
=self
.timeout
.charm_delete
,
4586 except N2VCNotFound
: # already deleted. Skip
4588 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="DELETED")
4590 async def terminate(self
, nsr_id
, nslcmop_id
):
4591 # Try to lock HA task here
4592 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
4593 if not task_is_locked_by_me
:
4596 logging_text
= "Task ns={} terminate={} ".format(nsr_id
, nslcmop_id
)
4597 self
.logger
.debug(logging_text
+ "Enter")
4598 timeout_ns_terminate
= self
.timeout
.ns_terminate
4601 operation_params
= None
4603 error_list
= [] # annotates all failed error messages
4604 db_nslcmop_update
= {}
4605 autoremove
= False # autoremove after terminated
4606 tasks_dict_info
= {}
4609 "Stage 1/3: Preparing task.",
4610 "Waiting for previous operations to terminate.",
4613 # ^ contains [stage, step, VIM-status]
4615 # wait for any previous tasks in process
4616 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
4618 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
4619 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
4620 operation_params
= db_nslcmop
.get("operationParams") or {}
4621 if operation_params
.get("timeout_ns_terminate"):
4622 timeout_ns_terminate
= operation_params
["timeout_ns_terminate"]
4623 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
4624 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
4626 db_nsr_update
["operational-status"] = "terminating"
4627 db_nsr_update
["config-status"] = "terminating"
4628 self
._write
_ns
_status
(
4630 ns_state
="TERMINATING",
4631 current_operation
="TERMINATING",
4632 current_operation_id
=nslcmop_id
,
4633 other_update
=db_nsr_update
,
4635 self
._write
_op
_status
(op_id
=nslcmop_id
, queuePosition
=0, stage
=stage
)
4636 nsr_deployed
= deepcopy(db_nsr
["_admin"].get("deployed")) or {}
4637 if db_nsr
["_admin"]["nsState"] == "NOT_INSTANTIATED":
4640 stage
[1] = "Getting vnf descriptors from db."
4641 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
4643 db_vnfr
["member-vnf-index-ref"]: db_vnfr
for db_vnfr
in db_vnfrs_list
4645 db_vnfds_from_id
= {}
4646 db_vnfds_from_member_index
= {}
4648 for vnfr
in db_vnfrs_list
:
4649 vnfd_id
= vnfr
["vnfd-id"]
4650 if vnfd_id
not in db_vnfds_from_id
:
4651 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
4652 db_vnfds_from_id
[vnfd_id
] = vnfd
4653 db_vnfds_from_member_index
[
4654 vnfr
["member-vnf-index-ref"]
4655 ] = db_vnfds_from_id
[vnfd_id
]
4657 # Destroy individual execution environments when there are terminating primitives.
4658 # Rest of EE will be deleted at once
4659 # TODO - check before calling _destroy_N2VC
4660 # if not operation_params.get("skip_terminate_primitives"):#
4661 # or not vca.get("needed_terminate"):
4662 stage
[0] = "Stage 2/3 execute terminating primitives."
4663 self
.logger
.debug(logging_text
+ stage
[0])
4664 stage
[1] = "Looking execution environment that needs terminate."
4665 self
.logger
.debug(logging_text
+ stage
[1])
4667 for vca_index
, vca
in enumerate(get_iterable(nsr_deployed
, "VCA")):
4668 config_descriptor
= None
4669 vca_member_vnf_index
= vca
.get("member-vnf-index")
4670 vca_id
= self
.get_vca_id(
4671 db_vnfrs_dict
.get(vca_member_vnf_index
)
4672 if vca_member_vnf_index
4676 if not vca
or not vca
.get("ee_id"):
4678 if not vca
.get("member-vnf-index"):
4680 config_descriptor
= db_nsr
.get("ns-configuration")
4681 elif vca
.get("vdu_id"):
4682 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4683 config_descriptor
= get_configuration(db_vnfd
, vca
.get("vdu_id"))
4684 elif vca
.get("kdu_name"):
4685 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4686 config_descriptor
= get_configuration(db_vnfd
, vca
.get("kdu_name"))
4688 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4689 config_descriptor
= get_configuration(db_vnfd
, db_vnfd
["id"])
4690 vca_type
= vca
.get("type")
4691 exec_terminate_primitives
= not operation_params
.get(
4692 "skip_terminate_primitives"
4693 ) and vca
.get("needed_terminate")
4694 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4695 # pending native charms
4697 True if vca_type
in ("helm", "helm-v3", "native_charm") else False
4699 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4700 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4701 task
= asyncio
.ensure_future(
4709 exec_terminate_primitives
,
4713 tasks_dict_info
[task
] = "Terminating VCA {}".format(vca
.get("ee_id"))
4715 # wait for pending tasks of terminate primitives
4719 + "Waiting for tasks {}".format(list(tasks_dict_info
.keys()))
4721 error_list
= await self
._wait
_for
_tasks
(
4724 min(self
.timeout
.charm_delete
, timeout_ns_terminate
),
4728 tasks_dict_info
.clear()
4730 return # raise LcmException("; ".join(error_list))
4732 # remove All execution environments at once
4733 stage
[0] = "Stage 3/3 delete all."
4735 if nsr_deployed
.get("VCA"):
4736 stage
[1] = "Deleting all execution environments."
4737 self
.logger
.debug(logging_text
+ stage
[1])
4738 vca_id
= self
.get_vca_id({}, db_nsr
)
4739 task_delete_ee
= asyncio
.ensure_future(
4741 self
._delete
_all
_N
2VC
(db_nsr
=db_nsr
, vca_id
=vca_id
),
4742 timeout
=self
.timeout
.charm_delete
,
4745 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4746 tasks_dict_info
[task_delete_ee
] = "Terminating all VCA"
4748 # Delete Namespace and Certificates if necessary
4749 if check_helm_ee_in_ns(list(db_vnfds_from_member_index
.values())):
4750 await self
.vca_map
["helm-v3"].delete_tls_certificate(
4751 namespace
=db_nslcmop
["nsInstanceId"],
4752 certificate_name
=self
.EE_TLS_NAME
,
4754 await self
.vca_map
["helm-v3"].delete_namespace(
4755 namespace
=db_nslcmop
["nsInstanceId"],
4758 # Delete from k8scluster
4759 stage
[1] = "Deleting KDUs."
4760 self
.logger
.debug(logging_text
+ stage
[1])
4761 # print(nsr_deployed)
4762 for kdu
in get_iterable(nsr_deployed
, "K8s"):
4763 if not kdu
or not kdu
.get("kdu-instance"):
4765 kdu_instance
= kdu
.get("kdu-instance")
4766 if kdu
.get("k8scluster-type") in self
.k8scluster_map
:
4767 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4768 vca_id
= self
.get_vca_id({}, db_nsr
)
4769 task_delete_kdu_instance
= asyncio
.ensure_future(
4770 self
.k8scluster_map
[kdu
["k8scluster-type"]].uninstall(
4771 cluster_uuid
=kdu
.get("k8scluster-uuid"),
4772 kdu_instance
=kdu_instance
,
4774 namespace
=kdu
.get("namespace"),
4780 + "Unknown k8s deployment type {}".format(
4781 kdu
.get("k8scluster-type")
4786 task_delete_kdu_instance
4787 ] = "Terminating KDU '{}'".format(kdu
.get("kdu-name"))
4790 stage
[1] = "Deleting ns from VIM."
4791 if self
.ro_config
.ng
:
4792 task_delete_ro
= asyncio
.ensure_future(
4793 self
._terminate
_ng
_ro
(
4794 logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
4797 tasks_dict_info
[task_delete_ro
] = "Removing deployment from VIM"
4799 # rest of staff will be done at finally
4802 ROclient
.ROClientException
,
4807 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
4809 except asyncio
.CancelledError
:
4811 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
4813 exc
= "Operation was cancelled"
4814 except Exception as e
:
4815 exc
= traceback
.format_exc()
4816 self
.logger
.critical(
4817 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
4822 error_list
.append(str(exc
))
4824 # wait for pending tasks
4826 stage
[1] = "Waiting for terminate pending tasks."
4827 self
.logger
.debug(logging_text
+ stage
[1])
4828 error_list
+= await self
._wait
_for
_tasks
(
4831 timeout_ns_terminate
,
4835 stage
[1] = stage
[2] = ""
4836 except asyncio
.CancelledError
:
4837 error_list
.append("Cancelled")
4838 # TODO cancell all tasks
4839 except Exception as exc
:
4840 error_list
.append(str(exc
))
4841 # update status at database
4843 error_detail
= "; ".join(error_list
)
4844 # self.logger.error(logging_text + error_detail)
4845 error_description_nslcmop
= "{} Detail: {}".format(
4846 stage
[0], error_detail
4848 error_description_nsr
= "Operation: TERMINATING.{}, {}.".format(
4849 nslcmop_id
, stage
[0]
4852 db_nsr_update
["operational-status"] = "failed"
4853 db_nsr_update
["detailed-status"] = (
4854 error_description_nsr
+ " Detail: " + error_detail
4856 db_nslcmop_update
["detailed-status"] = error_detail
4857 nslcmop_operation_state
= "FAILED"
4861 error_description_nsr
= error_description_nslcmop
= None
4862 ns_state
= "NOT_INSTANTIATED"
4863 db_nsr_update
["operational-status"] = "terminated"
4864 db_nsr_update
["detailed-status"] = "Done"
4865 db_nsr_update
["_admin.nsState"] = "NOT_INSTANTIATED"
4866 db_nslcmop_update
["detailed-status"] = "Done"
4867 nslcmop_operation_state
= "COMPLETED"
4870 self
._write
_ns
_status
(
4873 current_operation
="IDLE",
4874 current_operation_id
=None,
4875 error_description
=error_description_nsr
,
4876 error_detail
=error_detail
,
4877 other_update
=db_nsr_update
,
4879 self
._write
_op
_status
(
4882 error_message
=error_description_nslcmop
,
4883 operation_state
=nslcmop_operation_state
,
4884 other_update
=db_nslcmop_update
,
4886 if ns_state
== "NOT_INSTANTIATED":
4890 {"nsr-id-ref": nsr_id
},
4891 {"_admin.nsState": "NOT_INSTANTIATED"},
4893 except DbException
as e
:
4896 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4900 if operation_params
:
4901 autoremove
= operation_params
.get("autoremove", False)
4902 if nslcmop_operation_state
:
4904 await self
.msg
.aiowrite(
4909 "nslcmop_id": nslcmop_id
,
4910 "operationState": nslcmop_operation_state
,
4911 "autoremove": autoremove
,
4914 except Exception as e
:
4916 logging_text
+ "kafka_write notification Exception {}".format(e
)
4918 self
.logger
.debug(f
"Deleting alerts: ns_id={nsr_id}")
4919 self
.db
.del_list("alerts", {"tags.ns_id": nsr_id
})
4921 self
.logger
.debug(logging_text
+ "Exit")
4922 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_terminate")
4924 async def _wait_for_tasks(
4925 self
, logging_text
, created_tasks_info
, timeout
, stage
, nslcmop_id
, nsr_id
=None
4928 error_detail_list
= []
4930 pending_tasks
= list(created_tasks_info
.keys())
4931 num_tasks
= len(pending_tasks
)
4933 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4934 self
._write
_op
_status
(nslcmop_id
, stage
)
4935 while pending_tasks
:
4937 _timeout
= timeout
+ time_start
- time()
4938 done
, pending_tasks
= await asyncio
.wait(
4939 pending_tasks
, timeout
=_timeout
, return_when
=asyncio
.FIRST_COMPLETED
4941 num_done
+= len(done
)
4942 if not done
: # Timeout
4943 for task
in pending_tasks
:
4944 new_error
= created_tasks_info
[task
] + ": Timeout"
4945 error_detail_list
.append(new_error
)
4946 error_list
.append(new_error
)
4949 if task
.cancelled():
4952 exc
= task
.exception()
4954 if isinstance(exc
, asyncio
.TimeoutError
):
4956 new_error
= created_tasks_info
[task
] + ": {}".format(exc
)
4957 error_list
.append(created_tasks_info
[task
])
4958 error_detail_list
.append(new_error
)
4965 ROclient
.ROClientException
,
4971 self
.logger
.error(logging_text
+ new_error
)
4973 exc_traceback
= "".join(
4974 traceback
.format_exception(None, exc
, exc
.__traceback
__)
4978 + created_tasks_info
[task
]
4984 logging_text
+ created_tasks_info
[task
] + ": Done"
4986 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4988 stage
[1] += " Errors: " + ". ".join(error_detail_list
) + "."
4989 if nsr_id
: # update also nsr
4994 "errorDescription": "Error at: " + ", ".join(error_list
),
4995 "errorDetail": ". ".join(error_detail_list
),
4998 self
._write
_op
_status
(nslcmop_id
, stage
)
4999 return error_detail_list
5002 def _map_primitive_params(primitive_desc
, params
, instantiation_params
):
5004 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
5005 The default-value is used. If it is between < > it look for a value at instantiation_params
5006 :param primitive_desc: portion of VNFD/NSD that describes primitive
5007 :param params: Params provided by user
5008 :param instantiation_params: Instantiation params provided by user
5009 :return: a dictionary with the calculated params
5011 calculated_params
= {}
5012 for parameter
in primitive_desc
.get("parameter", ()):
5013 param_name
= parameter
["name"]
5014 if param_name
in params
:
5015 calculated_params
[param_name
] = params
[param_name
]
5016 elif "default-value" in parameter
or "value" in parameter
:
5017 if "value" in parameter
:
5018 calculated_params
[param_name
] = parameter
["value"]
5020 calculated_params
[param_name
] = parameter
["default-value"]
5022 isinstance(calculated_params
[param_name
], str)
5023 and calculated_params
[param_name
].startswith("<")
5024 and calculated_params
[param_name
].endswith(">")
5026 if calculated_params
[param_name
][1:-1] in instantiation_params
:
5027 calculated_params
[param_name
] = instantiation_params
[
5028 calculated_params
[param_name
][1:-1]
5032 "Parameter {} needed to execute primitive {} not provided".format(
5033 calculated_params
[param_name
], primitive_desc
["name"]
5038 "Parameter {} needed to execute primitive {} not provided".format(
5039 param_name
, primitive_desc
["name"]
5043 if isinstance(calculated_params
[param_name
], (dict, list, tuple)):
5044 calculated_params
[param_name
] = yaml
.safe_dump(
5045 calculated_params
[param_name
], default_flow_style
=True, width
=256
5047 elif isinstance(calculated_params
[param_name
], str) and calculated_params
[
5049 ].startswith("!!yaml "):
5050 calculated_params
[param_name
] = calculated_params
[param_name
][7:]
5051 if parameter
.get("data-type") == "INTEGER":
5053 calculated_params
[param_name
] = int(calculated_params
[param_name
])
5054 except ValueError: # error converting string to int
5056 "Parameter {} of primitive {} must be integer".format(
5057 param_name
, primitive_desc
["name"]
5060 elif parameter
.get("data-type") == "BOOLEAN":
5061 calculated_params
[param_name
] = not (
5062 (str(calculated_params
[param_name
])).lower() == "false"
5065 # add always ns_config_info if primitive name is config
5066 if primitive_desc
["name"] == "config":
5067 if "ns_config_info" in instantiation_params
:
5068 calculated_params
["ns_config_info"] = instantiation_params
[
5071 return calculated_params
5073 def _look_for_deployed_vca(
5080 ee_descriptor_id
=None,
5082 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
5083 for vca
in deployed_vca
:
5086 if member_vnf_index
!= vca
["member-vnf-index"] or vdu_id
!= vca
["vdu_id"]:
5089 vdu_count_index
is not None
5090 and vdu_count_index
!= vca
["vdu_count_index"]
5093 if kdu_name
and kdu_name
!= vca
["kdu_name"]:
5095 if ee_descriptor_id
and ee_descriptor_id
!= vca
["ee_descriptor_id"]:
5099 # vca_deployed not found
5101 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
5102 " is not deployed".format(
5111 ee_id
= vca
.get("ee_id")
5113 "type", "lxc_proxy_charm"
5114 ) # default value for backward compatibility - proxy charm
5117 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
5118 "execution environment".format(
5119 member_vnf_index
, vdu_id
, kdu_name
, vdu_count_index
5122 return ee_id
, vca_type
5124 async def _ns_execute_primitive(
5130 retries_interval
=30,
5137 if primitive
== "config":
5138 primitive_params
= {"params": primitive_params
}
5140 vca_type
= vca_type
or "lxc_proxy_charm"
5144 output
= await asyncio
.wait_for(
5145 self
.vca_map
[vca_type
].exec_primitive(
5147 primitive_name
=primitive
,
5148 params_dict
=primitive_params
,
5149 progress_timeout
=self
.timeout
.progress_primitive
,
5150 total_timeout
=self
.timeout
.primitive
,
5155 timeout
=timeout
or self
.timeout
.primitive
,
5159 except asyncio
.CancelledError
:
5161 except Exception as e
:
5165 "Error executing action {} on {} -> {}".format(
5170 await asyncio
.sleep(retries_interval
)
5172 if isinstance(e
, asyncio
.TimeoutError
):
5174 message
="Timed out waiting for action to complete"
5176 return "FAILED", getattr(e
, "message", repr(e
))
5178 return "COMPLETED", output
5180 except (LcmException
, asyncio
.CancelledError
):
5182 except Exception as e
:
5183 return "FAIL", "Error executing action {}: {}".format(primitive
, e
)
5185 async def vca_status_refresh(self
, nsr_id
, nslcmop_id
):
5187 Updating the vca_status with latest juju information in nsrs record
5188 :param: nsr_id: Id of the nsr
5189 :param: nslcmop_id: Id of the nslcmop
5193 self
.logger
.debug("Task ns={} action={} Enter".format(nsr_id
, nslcmop_id
))
5194 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5195 vca_id
= self
.get_vca_id({}, db_nsr
)
5196 if db_nsr
["_admin"]["deployed"]["K8s"]:
5197 for _
, k8s
in enumerate(db_nsr
["_admin"]["deployed"]["K8s"]):
5198 cluster_uuid
, kdu_instance
, cluster_type
= (
5199 k8s
["k8scluster-uuid"],
5200 k8s
["kdu-instance"],
5201 k8s
["k8scluster-type"],
5203 await self
._on
_update
_k
8s
_db
(
5204 cluster_uuid
=cluster_uuid
,
5205 kdu_instance
=kdu_instance
,
5206 filter={"_id": nsr_id
},
5208 cluster_type
=cluster_type
,
5211 for vca_index
, _
in enumerate(db_nsr
["_admin"]["deployed"]["VCA"]):
5212 table
, filter = "nsrs", {"_id": nsr_id
}
5213 path
= "_admin.deployed.VCA.{}.".format(vca_index
)
5214 await self
._on
_update
_n
2vc
_db
(table
, filter, path
, {})
5216 self
.logger
.debug("Task ns={} action={} Exit".format(nsr_id
, nslcmop_id
))
5217 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_vca_status_refresh")
5219 async def action(self
, nsr_id
, nslcmop_id
):
5220 # Try to lock HA task here
5221 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5222 if not task_is_locked_by_me
:
5225 logging_text
= "Task ns={} action={} ".format(nsr_id
, nslcmop_id
)
5226 self
.logger
.debug(logging_text
+ "Enter")
5227 # get all needed from database
5231 db_nslcmop_update
= {}
5232 nslcmop_operation_state
= None
5233 error_description_nslcmop
= None
5237 # wait for any previous tasks in process
5238 step
= "Waiting for previous operations to terminate"
5239 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5241 self
._write
_ns
_status
(
5244 current_operation
="RUNNING ACTION",
5245 current_operation_id
=nslcmop_id
,
5248 step
= "Getting information from database"
5249 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5250 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5251 if db_nslcmop
["operationParams"].get("primitive_params"):
5252 db_nslcmop
["operationParams"]["primitive_params"] = json
.loads(
5253 db_nslcmop
["operationParams"]["primitive_params"]
5256 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5257 vnf_index
= db_nslcmop
["operationParams"].get("member_vnf_index")
5258 vdu_id
= db_nslcmop
["operationParams"].get("vdu_id")
5259 kdu_name
= db_nslcmop
["operationParams"].get("kdu_name")
5260 vdu_count_index
= db_nslcmop
["operationParams"].get("vdu_count_index")
5261 primitive
= db_nslcmop
["operationParams"]["primitive"]
5262 primitive_params
= db_nslcmop
["operationParams"]["primitive_params"]
5263 timeout_ns_action
= db_nslcmop
["operationParams"].get(
5264 "timeout_ns_action", self
.timeout
.primitive
5268 step
= "Getting vnfr from database"
5269 db_vnfr
= self
.db
.get_one(
5270 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
5272 if db_vnfr
.get("kdur"):
5274 for kdur
in db_vnfr
["kdur"]:
5275 if kdur
.get("additionalParams"):
5276 kdur
["additionalParams"] = json
.loads(
5277 kdur
["additionalParams"]
5279 kdur_list
.append(kdur
)
5280 db_vnfr
["kdur"] = kdur_list
5281 step
= "Getting vnfd from database"
5282 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
5284 # Sync filesystem before running a primitive
5285 self
.fs
.sync(db_vnfr
["vnfd-id"])
5287 step
= "Getting nsd from database"
5288 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
5290 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
5291 # for backward compatibility
5292 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
5293 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
5294 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
5295 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5297 # look for primitive
5298 config_primitive_desc
= descriptor_configuration
= None
5300 descriptor_configuration
= get_configuration(db_vnfd
, vdu_id
)
5302 descriptor_configuration
= get_configuration(db_vnfd
, kdu_name
)
5304 descriptor_configuration
= get_configuration(db_vnfd
, db_vnfd
["id"])
5306 descriptor_configuration
= db_nsd
.get("ns-configuration")
5308 if descriptor_configuration
and descriptor_configuration
.get(
5311 for config_primitive
in descriptor_configuration
["config-primitive"]:
5312 if config_primitive
["name"] == primitive
:
5313 config_primitive_desc
= config_primitive
5316 if not config_primitive_desc
:
5317 if not (kdu_name
and primitive
in ("upgrade", "rollback", "status")):
5319 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5323 primitive_name
= primitive
5324 ee_descriptor_id
= None
5326 primitive_name
= config_primitive_desc
.get(
5327 "execution-environment-primitive", primitive
5329 ee_descriptor_id
= config_primitive_desc
.get(
5330 "execution-environment-ref"
5336 (x
for x
in db_vnfr
["vdur"] if x
["vdu-id-ref"] == vdu_id
), None
5338 desc_params
= parse_yaml_strings(vdur
.get("additionalParams"))
5341 (x
for x
in db_vnfr
["kdur"] if x
["kdu-name"] == kdu_name
), None
5343 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
5345 desc_params
= parse_yaml_strings(
5346 db_vnfr
.get("additionalParamsForVnf")
5349 desc_params
= parse_yaml_strings(db_nsr
.get("additionalParamsForNs"))
5350 if kdu_name
and get_configuration(db_vnfd
, kdu_name
):
5351 kdu_configuration
= get_configuration(db_vnfd
, kdu_name
)
5353 for primitive
in kdu_configuration
.get("initial-config-primitive", []):
5354 actions
.add(primitive
["name"])
5355 for primitive
in kdu_configuration
.get("config-primitive", []):
5356 actions
.add(primitive
["name"])
5358 nsr_deployed
["K8s"],
5359 lambda kdu
: kdu_name
== kdu
["kdu-name"]
5360 and kdu
["member-vnf-index"] == vnf_index
,
5364 if primitive_name
in actions
5365 and kdu
["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5369 # TODO check if ns is in a proper status
5371 primitive_name
in ("upgrade", "rollback", "status") or kdu_action
5373 # kdur and desc_params already set from before
5374 if primitive_params
:
5375 desc_params
.update(primitive_params
)
5376 # TODO Check if we will need something at vnf level
5377 for index
, kdu
in enumerate(get_iterable(nsr_deployed
, "K8s")):
5379 kdu_name
== kdu
["kdu-name"]
5380 and kdu
["member-vnf-index"] == vnf_index
5385 "KDU '{}' for vnf '{}' not deployed".format(kdu_name
, vnf_index
)
5388 if kdu
.get("k8scluster-type") not in self
.k8scluster_map
:
5389 msg
= "unknown k8scluster-type '{}'".format(
5390 kdu
.get("k8scluster-type")
5392 raise LcmException(msg
)
5395 "collection": "nsrs",
5396 "filter": {"_id": nsr_id
},
5397 "path": "_admin.deployed.K8s.{}".format(index
),
5401 + "Exec k8s {} on {}.{}".format(primitive_name
, vnf_index
, kdu_name
)
5403 step
= "Executing kdu {}".format(primitive_name
)
5404 if primitive_name
== "upgrade":
5405 if desc_params
.get("kdu_model"):
5406 kdu_model
= desc_params
.get("kdu_model")
5407 del desc_params
["kdu_model"]
5409 kdu_model
= kdu
.get("kdu-model")
5410 if kdu_model
.count("/") < 2: # helm chart is not embedded
5411 parts
= kdu_model
.split(sep
=":")
5413 kdu_model
= parts
[0]
5414 if desc_params
.get("kdu_atomic_upgrade"):
5415 atomic_upgrade
= desc_params
.get(
5416 "kdu_atomic_upgrade"
5417 ).lower() in ("yes", "true", "1")
5418 del desc_params
["kdu_atomic_upgrade"]
5420 atomic_upgrade
= True
5422 detailed_status
= await asyncio
.wait_for(
5423 self
.k8scluster_map
[kdu
["k8scluster-type"]].upgrade(
5424 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5425 kdu_instance
=kdu
.get("kdu-instance"),
5426 atomic
=atomic_upgrade
,
5427 kdu_model
=kdu_model
,
5430 timeout
=timeout_ns_action
,
5432 timeout
=timeout_ns_action
+ 10,
5435 logging_text
+ " Upgrade of kdu {} done".format(detailed_status
)
5437 elif primitive_name
== "rollback":
5438 detailed_status
= await asyncio
.wait_for(
5439 self
.k8scluster_map
[kdu
["k8scluster-type"]].rollback(
5440 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5441 kdu_instance
=kdu
.get("kdu-instance"),
5444 timeout
=timeout_ns_action
,
5446 elif primitive_name
== "status":
5447 detailed_status
= await asyncio
.wait_for(
5448 self
.k8scluster_map
[kdu
["k8scluster-type"]].status_kdu(
5449 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5450 kdu_instance
=kdu
.get("kdu-instance"),
5453 timeout
=timeout_ns_action
,
5456 kdu_instance
= kdu
.get("kdu-instance") or "{}-{}".format(
5457 kdu
["kdu-name"], nsr_id
5459 params
= self
._map
_primitive
_params
(
5460 config_primitive_desc
, primitive_params
, desc_params
5463 detailed_status
= await asyncio
.wait_for(
5464 self
.k8scluster_map
[kdu
["k8scluster-type"]].exec_primitive(
5465 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5466 kdu_instance
=kdu_instance
,
5467 primitive_name
=primitive_name
,
5470 timeout
=timeout_ns_action
,
5473 timeout
=timeout_ns_action
,
5477 nslcmop_operation_state
= "COMPLETED"
5479 detailed_status
= ""
5480 nslcmop_operation_state
= "FAILED"
5482 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
5483 nsr_deployed
["VCA"],
5484 member_vnf_index
=vnf_index
,
5486 vdu_count_index
=vdu_count_index
,
5487 ee_descriptor_id
=ee_descriptor_id
,
5489 for vca_index
, vca_deployed
in enumerate(
5490 db_nsr
["_admin"]["deployed"]["VCA"]
5492 if vca_deployed
.get("member-vnf-index") == vnf_index
:
5494 "collection": "nsrs",
5495 "filter": {"_id": nsr_id
},
5496 "path": "_admin.deployed.VCA.{}.".format(vca_index
),
5500 nslcmop_operation_state
,
5502 ) = await self
._ns
_execute
_primitive
(
5504 primitive
=primitive_name
,
5505 primitive_params
=self
._map
_primitive
_params
(
5506 config_primitive_desc
, primitive_params
, desc_params
5508 timeout
=timeout_ns_action
,
5514 db_nslcmop_update
["detailed-status"] = detailed_status
5515 error_description_nslcmop
= (
5516 detailed_status
if nslcmop_operation_state
== "FAILED" else ""
5520 + "Done with result {} {}".format(
5521 nslcmop_operation_state
, detailed_status
5524 return # database update is called inside finally
5526 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
5527 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
5529 except asyncio
.CancelledError
:
5531 logging_text
+ "Cancelled Exception while '{}'".format(step
)
5533 exc
= "Operation was cancelled"
5534 except asyncio
.TimeoutError
:
5535 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
5537 except Exception as e
:
5538 exc
= traceback
.format_exc()
5539 self
.logger
.critical(
5540 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
5549 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
5550 nslcmop_operation_state
= "FAILED"
5552 self
._write
_ns
_status
(
5556 ], # TODO check if degraded. For the moment use previous status
5557 current_operation
="IDLE",
5558 current_operation_id
=None,
5559 # error_description=error_description_nsr,
5560 # error_detail=error_detail,
5561 other_update
=db_nsr_update
,
5564 self
._write
_op
_status
(
5567 error_message
=error_description_nslcmop
,
5568 operation_state
=nslcmop_operation_state
,
5569 other_update
=db_nslcmop_update
,
5572 if nslcmop_operation_state
:
5574 await self
.msg
.aiowrite(
5579 "nslcmop_id": nslcmop_id
,
5580 "operationState": nslcmop_operation_state
,
5583 except Exception as e
:
5585 logging_text
+ "kafka_write notification Exception {}".format(e
)
5587 self
.logger
.debug(logging_text
+ "Exit")
5588 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_action")
5589 return nslcmop_operation_state
, detailed_status
5591 async def terminate_vdus(
5592 self
, db_vnfr
, member_vnf_index
, db_nsr
, update_db_nslcmops
, stage
, logging_text
5594 """This method terminates VDUs
5597 db_vnfr: VNF instance record
5598 member_vnf_index: VNF index to identify the VDUs to be removed
5599 db_nsr: NS instance record
5600 update_db_nslcmops: Nslcmop update record
5602 vca_scaling_info
= []
5603 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5604 scaling_info
["scaling_direction"] = "IN"
5605 scaling_info
["vdu-delete"] = {}
5606 scaling_info
["kdu-delete"] = {}
5607 db_vdur
= db_vnfr
.get("vdur")
5608 vdur_list
= copy(db_vdur
)
5610 for index
, vdu
in enumerate(vdur_list
):
5611 vca_scaling_info
.append(
5613 "osm_vdu_id": vdu
["vdu-id-ref"],
5614 "member-vnf-index": member_vnf_index
,
5616 "vdu_index": count_index
,
5619 scaling_info
["vdu-delete"][vdu
["vdu-id-ref"]] = count_index
5620 scaling_info
["vdu"].append(
5622 "name": vdu
.get("name") or vdu
.get("vdu-name"),
5623 "vdu_id": vdu
["vdu-id-ref"],
5627 for interface
in vdu
["interfaces"]:
5628 scaling_info
["vdu"][index
]["interface"].append(
5630 "name": interface
["name"],
5631 "ip_address": interface
["ip-address"],
5632 "mac_address": interface
.get("mac-address"),
5635 self
.logger
.info("NS update scaling info{}".format(scaling_info
))
5636 stage
[2] = "Terminating VDUs"
5637 if scaling_info
.get("vdu-delete"):
5638 # scale_process = "RO"
5639 if self
.ro_config
.ng
:
5640 await self
._scale
_ng
_ro
(
5649 async def remove_vnf(self
, nsr_id
, nslcmop_id
, vnf_instance_id
):
5650 """This method is to Remove VNF instances from NS.
5653 nsr_id: NS instance id
5654 nslcmop_id: nslcmop id of update
5655 vnf_instance_id: id of the VNF instance to be removed
5658 result: (str, str) COMPLETED/FAILED, details
5662 logging_text
= "Task ns={} update ".format(nsr_id
)
5663 check_vnfr_count
= len(self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}))
5664 self
.logger
.info("check_vnfr_count {}".format(check_vnfr_count
))
5665 if check_vnfr_count
> 1:
5666 stage
= ["", "", ""]
5667 step
= "Getting nslcmop from database"
5669 step
+ " after having waited for previous tasks to be completed"
5671 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5672 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5673 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
5674 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5675 """ db_vnfr = self.db.get_one(
5676 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5678 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5679 await self
.terminate_vdus(
5688 constituent_vnfr
= db_nsr
.get("constituent-vnfr-ref")
5689 constituent_vnfr
.remove(db_vnfr
.get("_id"))
5690 db_nsr_update
["constituent-vnfr-ref"] = db_nsr
.get(
5691 "constituent-vnfr-ref"
5693 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5694 self
.db
.del_one("vnfrs", {"_id": db_vnfr
.get("_id")})
5695 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5696 return "COMPLETED", "Done"
5698 step
= "Terminate VNF Failed with"
5700 "{} Cannot terminate the last VNF in this NS.".format(
5704 except (LcmException
, asyncio
.CancelledError
):
5706 except Exception as e
:
5707 self
.logger
.debug("Error removing VNF {}".format(e
))
5708 return "FAILED", "Error removing VNF {}".format(e
)
5710 async def _ns_redeploy_vnf(
5718 """This method updates and redeploys VNF instances
5721 nsr_id: NS instance id
5722 nslcmop_id: nslcmop id
5723 db_vnfd: VNF descriptor
5724 db_vnfr: VNF instance record
5725 db_nsr: NS instance record
5728 result: (str, str) COMPLETED/FAILED, details
5732 stage
= ["", "", ""]
5733 logging_text
= "Task ns={} update ".format(nsr_id
)
5734 latest_vnfd_revision
= db_vnfd
["_admin"].get("revision")
5735 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5737 # Terminate old VNF resources
5738 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5739 await self
.terminate_vdus(
5748 # old_vnfd_id = db_vnfr["vnfd-id"]
5749 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5750 new_db_vnfd
= db_vnfd
5751 # new_vnfd_ref = new_db_vnfd["id"]
5752 # new_vnfd_id = vnfd_id
5756 for cp
in new_db_vnfd
.get("ext-cpd", ()):
5758 "name": cp
.get("id"),
5759 "connection-point-id": cp
.get("int-cpd", {}).get("cpd"),
5760 "connection-point-vdu-id": cp
.get("int-cpd", {}).get("vdu-id"),
5763 new_vnfr_cp
.append(vnf_cp
)
5764 new_vdur
= update_db_nslcmops
["operationParams"]["newVdur"]
5765 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5766 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5768 "revision": latest_vnfd_revision
,
5769 "connection-point": new_vnfr_cp
,
5773 self
.update_db_2("vnfrs", db_vnfr
["_id"], new_vnfr_update
)
5774 updated_db_vnfr
= self
.db
.get_one(
5776 {"member-vnf-index-ref": member_vnf_index
, "nsr-id-ref": nsr_id
},
5779 # Instantiate new VNF resources
5780 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5781 vca_scaling_info
= []
5782 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5783 scaling_info
["scaling_direction"] = "OUT"
5784 scaling_info
["vdu-create"] = {}
5785 scaling_info
["kdu-create"] = {}
5786 vdud_instantiate_list
= db_vnfd
["vdu"]
5787 for index
, vdud
in enumerate(vdud_instantiate_list
):
5788 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(vdud
, db_vnfd
)
5790 additional_params
= (
5791 self
._get
_vdu
_additional
_params
(updated_db_vnfr
, vdud
["id"])
5794 cloud_init_list
= []
5796 # TODO Information of its own ip is not available because db_vnfr is not updated.
5797 additional_params
["OSM"] = get_osm_params(
5798 updated_db_vnfr
, vdud
["id"], 1
5800 cloud_init_list
.append(
5801 self
._parse
_cloud
_init
(
5808 vca_scaling_info
.append(
5810 "osm_vdu_id": vdud
["id"],
5811 "member-vnf-index": member_vnf_index
,
5813 "vdu_index": count_index
,
5816 scaling_info
["vdu-create"][vdud
["id"]] = count_index
5817 if self
.ro_config
.ng
:
5819 "New Resources to be deployed: {}".format(scaling_info
)
5821 await self
._scale
_ng
_ro
(
5829 return "COMPLETED", "Done"
5830 except (LcmException
, asyncio
.CancelledError
):
5832 except Exception as e
:
5833 self
.logger
.debug("Error updating VNF {}".format(e
))
5834 return "FAILED", "Error updating VNF {}".format(e
)
5836 async def _ns_charm_upgrade(
5842 timeout
: float = None,
5844 """This method upgrade charms in VNF instances
5847 ee_id: Execution environment id
5848 path: Local path to the charm
5850 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5851 timeout: (Float) Timeout for the ns update operation
5854 result: (str, str) COMPLETED/FAILED, details
5857 charm_type
= charm_type
or "lxc_proxy_charm"
5858 output
= await self
.vca_map
[charm_type
].upgrade_charm(
5862 charm_type
=charm_type
,
5863 timeout
=timeout
or self
.timeout
.ns_update
,
5867 return "COMPLETED", output
5869 except (LcmException
, asyncio
.CancelledError
):
5872 except Exception as e
:
5873 self
.logger
.debug("Error upgrading charm {}".format(path
))
5875 return "FAILED", "Error upgrading charm {}: {}".format(path
, e
)
5877 async def update(self
, nsr_id
, nslcmop_id
):
5878 """Update NS according to different update types
5880 This method performs upgrade of VNF instances then updates the revision
5881 number in VNF record
5884 nsr_id: Network service will be updated
5885 nslcmop_id: ns lcm operation id
5888 It may raise DbException, LcmException, N2VCException, K8sException
5891 # Try to lock HA task here
5892 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5893 if not task_is_locked_by_me
:
5896 logging_text
= "Task ns={} update={} ".format(nsr_id
, nslcmop_id
)
5897 self
.logger
.debug(logging_text
+ "Enter")
5899 # Set the required variables to be filled up later
5901 db_nslcmop_update
= {}
5903 nslcmop_operation_state
= None
5905 error_description_nslcmop
= ""
5907 change_type
= "updated"
5908 detailed_status
= ""
5909 member_vnf_index
= None
5912 # wait for any previous tasks in process
5913 step
= "Waiting for previous operations to terminate"
5914 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5915 self
._write
_ns
_status
(
5918 current_operation
="UPDATING",
5919 current_operation_id
=nslcmop_id
,
5922 step
= "Getting nslcmop from database"
5923 db_nslcmop
= self
.db
.get_one(
5924 "nslcmops", {"_id": nslcmop_id
}, fail_on_empty
=False
5926 update_type
= db_nslcmop
["operationParams"]["updateType"]
5928 step
= "Getting nsr from database"
5929 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5930 old_operational_status
= db_nsr
["operational-status"]
5931 db_nsr_update
["operational-status"] = "updating"
5932 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5933 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5935 if update_type
== "CHANGE_VNFPKG":
5936 # Get the input parameters given through update request
5937 vnf_instance_id
= db_nslcmop
["operationParams"][
5938 "changeVnfPackageData"
5939 ].get("vnfInstanceId")
5941 vnfd_id
= db_nslcmop
["operationParams"]["changeVnfPackageData"].get(
5944 timeout_seconds
= db_nslcmop
["operationParams"].get("timeout_ns_update")
5946 step
= "Getting vnfr from database"
5947 db_vnfr
= self
.db
.get_one(
5948 "vnfrs", {"_id": vnf_instance_id
}, fail_on_empty
=False
5951 step
= "Getting vnfds from database"
5953 latest_vnfd
= self
.db
.get_one(
5954 "vnfds", {"_id": vnfd_id
}, fail_on_empty
=False
5956 latest_vnfd_revision
= latest_vnfd
["_admin"].get("revision")
5959 current_vnf_revision
= db_vnfr
.get("revision", 1)
5960 current_vnfd
= self
.db
.get_one(
5962 {"_id": vnfd_id
+ ":" + str(current_vnf_revision
)},
5963 fail_on_empty
=False,
5965 # Charm artifact paths will be filled up later
5967 current_charm_artifact_path
,
5968 target_charm_artifact_path
,
5969 charm_artifact_paths
,
5971 ) = ([], [], [], [])
5973 step
= "Checking if revision has changed in VNFD"
5974 if current_vnf_revision
!= latest_vnfd_revision
:
5975 change_type
= "policy_updated"
5977 # There is new revision of VNFD, update operation is required
5978 current_vnfd_path
= vnfd_id
+ ":" + str(current_vnf_revision
)
5979 latest_vnfd_path
= vnfd_id
+ ":" + str(latest_vnfd_revision
)
5981 step
= "Removing the VNFD packages if they exist in the local path"
5982 shutil
.rmtree(self
.fs
.path
+ current_vnfd_path
, ignore_errors
=True)
5983 shutil
.rmtree(self
.fs
.path
+ latest_vnfd_path
, ignore_errors
=True)
5985 step
= "Get the VNFD packages from FSMongo"
5986 self
.fs
.sync(from_path
=latest_vnfd_path
)
5987 self
.fs
.sync(from_path
=current_vnfd_path
)
5990 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5992 current_base_folder
= current_vnfd
["_admin"]["storage"]
5993 latest_base_folder
= latest_vnfd
["_admin"]["storage"]
5995 for vca_index
, vca_deployed
in enumerate(
5996 get_iterable(nsr_deployed
, "VCA")
5998 vnf_index
= db_vnfr
.get("member-vnf-index-ref")
6000 # Getting charm-id and charm-type
6001 if vca_deployed
.get("member-vnf-index") == vnf_index
:
6002 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
6003 vca_type
= vca_deployed
.get("type")
6004 vdu_count_index
= vca_deployed
.get("vdu_count_index")
6007 ee_id
= vca_deployed
.get("ee_id")
6009 step
= "Getting descriptor config"
6010 if current_vnfd
.get("kdu"):
6011 search_key
= "kdu_name"
6013 search_key
= "vnfd_id"
6015 entity_id
= vca_deployed
.get(search_key
)
6017 descriptor_config
= get_configuration(
6018 current_vnfd
, entity_id
6021 if "execution-environment-list" in descriptor_config
:
6022 ee_list
= descriptor_config
.get(
6023 "execution-environment-list", []
6028 # There could be several charm used in the same VNF
6029 for ee_item
in ee_list
:
6030 if ee_item
.get("juju"):
6031 step
= "Getting charm name"
6032 charm_name
= ee_item
["juju"].get("charm")
6034 step
= "Setting Charm artifact paths"
6035 current_charm_artifact_path
.append(
6036 get_charm_artifact_path(
6037 current_base_folder
,
6040 current_vnf_revision
,
6043 target_charm_artifact_path
.append(
6044 get_charm_artifact_path(
6048 latest_vnfd_revision
,
6051 elif ee_item
.get("helm-chart"):
6052 # add chart to list and all parameters
6053 step
= "Getting helm chart name"
6054 chart_name
= ee_item
.get("helm-chart")
6056 ee_item
.get("helm-version")
6057 and ee_item
.get("helm-version") == "v2"
6061 vca_type
= "helm-v3"
6062 step
= "Setting Helm chart artifact paths"
6064 helm_artifacts
.append(
6066 "current_artifact_path": get_charm_artifact_path(
6067 current_base_folder
,
6070 current_vnf_revision
,
6072 "target_artifact_path": get_charm_artifact_path(
6076 latest_vnfd_revision
,
6079 "vca_index": vca_index
,
6080 "vdu_index": vdu_count_index
,
6084 charm_artifact_paths
= zip(
6085 current_charm_artifact_path
, target_charm_artifact_path
6088 step
= "Checking if software version has changed in VNFD"
6089 if find_software_version(current_vnfd
) != find_software_version(
6092 step
= "Checking if existing VNF has charm"
6093 for current_charm_path
, target_charm_path
in list(
6094 charm_artifact_paths
6096 if current_charm_path
:
6098 "Software version change is not supported as VNF instance {} has charm.".format(
6103 # There is no change in the charm package, then redeploy the VNF
6104 # based on new descriptor
6105 step
= "Redeploying VNF"
6106 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6107 (result
, detailed_status
) = await self
._ns
_redeploy
_vnf
(
6108 nsr_id
, nslcmop_id
, latest_vnfd
, db_vnfr
, db_nsr
6110 if result
== "FAILED":
6111 nslcmop_operation_state
= result
6112 error_description_nslcmop
= detailed_status
6113 db_nslcmop_update
["detailed-status"] = detailed_status
6116 + " step {} Done with result {} {}".format(
6117 step
, nslcmop_operation_state
, detailed_status
6122 step
= "Checking if any charm package has changed or not"
6123 for current_charm_path
, target_charm_path
in list(
6124 charm_artifact_paths
6128 and target_charm_path
6129 and self
.check_charm_hash_changed(
6130 current_charm_path
, target_charm_path
6133 step
= "Checking whether VNF uses juju bundle"
6134 if check_juju_bundle_existence(current_vnfd
):
6136 "Charm upgrade is not supported for the instance which"
6137 " uses juju-bundle: {}".format(
6138 check_juju_bundle_existence(current_vnfd
)
6142 step
= "Upgrading Charm"
6146 ) = await self
._ns
_charm
_upgrade
(
6149 charm_type
=vca_type
,
6150 path
=self
.fs
.path
+ target_charm_path
,
6151 timeout
=timeout_seconds
,
6154 if result
== "FAILED":
6155 nslcmop_operation_state
= result
6156 error_description_nslcmop
= detailed_status
6158 db_nslcmop_update
["detailed-status"] = detailed_status
6161 + " step {} Done with result {} {}".format(
6162 step
, nslcmop_operation_state
, detailed_status
6166 step
= "Updating policies"
6167 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6168 result
= "COMPLETED"
6169 detailed_status
= "Done"
6170 db_nslcmop_update
["detailed-status"] = "Done"
6173 for item
in helm_artifacts
:
6175 item
["current_artifact_path"]
6176 and item
["target_artifact_path"]
6177 and self
.check_charm_hash_changed(
6178 item
["current_artifact_path"],
6179 item
["target_artifact_path"],
6183 db_update_entry
= "_admin.deployed.VCA.{}.".format(
6186 vnfr_id
= db_vnfr
["_id"]
6187 osm_config
= {"osm": {"ns_id": nsr_id
, "vnf_id": vnfr_id
}}
6189 "collection": "nsrs",
6190 "filter": {"_id": nsr_id
},
6191 "path": db_update_entry
,
6193 vca_type
, namespace
, helm_id
= get_ee_id_parts(item
["ee_id"])
6194 await self
.vca_map
[vca_type
].upgrade_execution_environment(
6195 namespace
=namespace
,
6199 artifact_path
=item
["target_artifact_path"],
6202 vnf_id
= db_vnfr
.get("vnfd-ref")
6203 config_descriptor
= get_configuration(latest_vnfd
, vnf_id
)
6204 self
.logger
.debug("get ssh key block")
6208 ("config-access", "ssh-access", "required"),
6210 # Needed to inject a ssh key
6213 ("config-access", "ssh-access", "default-user"),
6216 "Install configuration Software, getting public ssh key"
6218 pub_key
= await self
.vca_map
[
6220 ].get_ee_ssh_public__key(
6221 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
6225 "Insert public key into VM user={} ssh_key={}".format(
6229 self
.logger
.debug(logging_text
+ step
)
6231 # wait for RO (ip-address) Insert pub_key into VM
6232 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
6242 initial_config_primitive_list
= config_descriptor
.get(
6243 "initial-config-primitive"
6245 config_primitive
= next(
6248 for p
in initial_config_primitive_list
6249 if p
["name"] == "config"
6253 if not config_primitive
:
6256 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
6258 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
6259 if db_vnfr
.get("additionalParamsForVnf"):
6260 deploy_params
.update(
6262 db_vnfr
["additionalParamsForVnf"].copy()
6265 primitive_params_
= self
._map
_primitive
_params
(
6266 config_primitive
, {}, deploy_params
6269 step
= "execute primitive '{}' params '{}'".format(
6270 config_primitive
["name"], primitive_params_
6272 self
.logger
.debug(logging_text
+ step
)
6273 await self
.vca_map
[vca_type
].exec_primitive(
6275 primitive_name
=config_primitive
["name"],
6276 params_dict
=primitive_params_
,
6282 step
= "Updating policies"
6283 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6284 detailed_status
= "Done"
6285 db_nslcmop_update
["detailed-status"] = "Done"
6287 # If nslcmop_operation_state is None, so any operation is not failed.
6288 if not nslcmop_operation_state
:
6289 nslcmop_operation_state
= "COMPLETED"
6291 # If update CHANGE_VNFPKG nslcmop_operation is successful
6292 # vnf revision need to be updated
6293 vnfr_update
["revision"] = latest_vnfd_revision
6294 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
6298 + " task Done with result {} {}".format(
6299 nslcmop_operation_state
, detailed_status
6302 elif update_type
== "REMOVE_VNF":
6303 # This part is included in https://osm.etsi.org/gerrit/11876
6304 vnf_instance_id
= db_nslcmop
["operationParams"]["removeVnfInstanceId"]
6305 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
6306 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6307 step
= "Removing VNF"
6308 (result
, detailed_status
) = await self
.remove_vnf(
6309 nsr_id
, nslcmop_id
, vnf_instance_id
6311 if result
== "FAILED":
6312 nslcmop_operation_state
= result
6313 error_description_nslcmop
= detailed_status
6314 db_nslcmop_update
["detailed-status"] = detailed_status
6315 change_type
= "vnf_terminated"
6316 if not nslcmop_operation_state
:
6317 nslcmop_operation_state
= "COMPLETED"
6320 + " task Done with result {} {}".format(
6321 nslcmop_operation_state
, detailed_status
6325 elif update_type
== "OPERATE_VNF":
6326 vnf_id
= db_nslcmop
["operationParams"]["operateVnfData"][
6329 operation_type
= db_nslcmop
["operationParams"]["operateVnfData"][
6332 additional_param
= db_nslcmop
["operationParams"]["operateVnfData"][
6335 (result
, detailed_status
) = await self
.rebuild_start_stop(
6336 nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
6338 if result
== "FAILED":
6339 nslcmop_operation_state
= result
6340 error_description_nslcmop
= detailed_status
6341 db_nslcmop_update
["detailed-status"] = detailed_status
6342 if not nslcmop_operation_state
:
6343 nslcmop_operation_state
= "COMPLETED"
6346 + " task Done with result {} {}".format(
6347 nslcmop_operation_state
, detailed_status
6351 # If nslcmop_operation_state is None, so any operation is not failed.
6352 # All operations are executed in overall.
6353 if not nslcmop_operation_state
:
6354 nslcmop_operation_state
= "COMPLETED"
6355 db_nsr_update
["operational-status"] = old_operational_status
6357 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
6358 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
6360 except asyncio
.CancelledError
:
6362 logging_text
+ "Cancelled Exception while '{}'".format(step
)
6364 exc
= "Operation was cancelled"
6365 except asyncio
.TimeoutError
:
6366 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
6368 except Exception as e
:
6369 exc
= traceback
.format_exc()
6370 self
.logger
.critical(
6371 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
6380 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
6381 nslcmop_operation_state
= "FAILED"
6382 db_nsr_update
["operational-status"] = old_operational_status
6384 self
._write
_ns
_status
(
6386 ns_state
=db_nsr
["nsState"],
6387 current_operation
="IDLE",
6388 current_operation_id
=None,
6389 other_update
=db_nsr_update
,
6392 self
._write
_op
_status
(
6395 error_message
=error_description_nslcmop
,
6396 operation_state
=nslcmop_operation_state
,
6397 other_update
=db_nslcmop_update
,
6400 if nslcmop_operation_state
:
6404 "nslcmop_id": nslcmop_id
,
6405 "operationState": nslcmop_operation_state
,
6408 change_type
in ("vnf_terminated", "policy_updated")
6409 and member_vnf_index
6411 msg
.update({"vnf_member_index": member_vnf_index
})
6412 await self
.msg
.aiowrite("ns", change_type
, msg
)
6413 except Exception as e
:
6415 logging_text
+ "kafka_write notification Exception {}".format(e
)
6417 self
.logger
.debug(logging_text
+ "Exit")
6418 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_update")
6419 return nslcmop_operation_state
, detailed_status
6421 async def scale(self
, nsr_id
, nslcmop_id
):
6422 # Try to lock HA task here
6423 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
6424 if not task_is_locked_by_me
:
6427 logging_text
= "Task ns={} scale={} ".format(nsr_id
, nslcmop_id
)
6428 stage
= ["", "", ""]
6429 tasks_dict_info
= {}
6430 # ^ stage, step, VIM progress
6431 self
.logger
.debug(logging_text
+ "Enter")
6432 # get all needed from database
6434 db_nslcmop_update
= {}
6437 # in case of error, indicates what part of scale was failed to put nsr at error status
6438 scale_process
= None
6439 old_operational_status
= ""
6440 old_config_status
= ""
6443 # wait for any previous tasks in process
6444 step
= "Waiting for previous operations to terminate"
6445 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
6446 self
._write
_ns
_status
(
6449 current_operation
="SCALING",
6450 current_operation_id
=nslcmop_id
,
6453 step
= "Getting nslcmop from database"
6455 step
+ " after having waited for previous tasks to be completed"
6457 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
6459 step
= "Getting nsr from database"
6460 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
6461 old_operational_status
= db_nsr
["operational-status"]
6462 old_config_status
= db_nsr
["config-status"]
6464 step
= "Parsing scaling parameters"
6465 db_nsr_update
["operational-status"] = "scaling"
6466 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6467 nsr_deployed
= db_nsr
["_admin"].get("deployed")
6469 vnf_index
= db_nslcmop
["operationParams"]["scaleVnfData"][
6471 ]["member-vnf-index"]
6472 scaling_group
= db_nslcmop
["operationParams"]["scaleVnfData"][
6474 ]["scaling-group-descriptor"]
6475 scaling_type
= db_nslcmop
["operationParams"]["scaleVnfData"]["scaleVnfType"]
6476 # for backward compatibility
6477 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
6478 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
6479 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
6480 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6482 step
= "Getting vnfr from database"
6483 db_vnfr
= self
.db
.get_one(
6484 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
6487 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
6489 step
= "Getting vnfd from database"
6490 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
6492 base_folder
= db_vnfd
["_admin"]["storage"]
6494 step
= "Getting scaling-group-descriptor"
6495 scaling_descriptor
= find_in_list(
6496 get_scaling_aspect(db_vnfd
),
6497 lambda scale_desc
: scale_desc
["name"] == scaling_group
,
6499 if not scaling_descriptor
:
6501 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6502 "at vnfd:scaling-group-descriptor".format(scaling_group
)
6505 step
= "Sending scale order to VIM"
6506 # TODO check if ns is in a proper status
6508 if not db_nsr
["_admin"].get("scaling-group"):
6513 "_admin.scaling-group": [
6514 {"name": scaling_group
, "nb-scale-op": 0}
6518 admin_scale_index
= 0
6520 for admin_scale_index
, admin_scale_info
in enumerate(
6521 db_nsr
["_admin"]["scaling-group"]
6523 if admin_scale_info
["name"] == scaling_group
:
6524 nb_scale_op
= admin_scale_info
.get("nb-scale-op", 0)
6526 else: # not found, set index one plus last element and add new entry with the name
6527 admin_scale_index
+= 1
6529 "_admin.scaling-group.{}.name".format(admin_scale_index
)
6532 vca_scaling_info
= []
6533 scaling_info
= {"scaling_group_name": scaling_group
, "vdu": [], "kdu": []}
6534 if scaling_type
== "SCALE_OUT":
6535 if "aspect-delta-details" not in scaling_descriptor
:
6537 "Aspect delta details not fount in scaling descriptor {}".format(
6538 scaling_descriptor
["name"]
6541 # count if max-instance-count is reached
6542 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6544 scaling_info
["scaling_direction"] = "OUT"
6545 scaling_info
["vdu-create"] = {}
6546 scaling_info
["kdu-create"] = {}
6547 for delta
in deltas
:
6548 for vdu_delta
in delta
.get("vdu-delta", {}):
6549 vdud
= get_vdu(db_vnfd
, vdu_delta
["id"])
6550 # vdu_index also provides the number of instance of the targeted vdu
6551 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6552 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(
6556 additional_params
= (
6557 self
._get
_vdu
_additional
_params
(db_vnfr
, vdud
["id"])
6560 cloud_init_list
= []
6562 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6563 max_instance_count
= 10
6564 if vdu_profile
and "max-number-of-instances" in vdu_profile
:
6565 max_instance_count
= vdu_profile
.get(
6566 "max-number-of-instances", 10
6569 default_instance_num
= get_number_of_instances(
6572 instances_number
= vdu_delta
.get("number-of-instances", 1)
6573 nb_scale_op
+= instances_number
6575 new_instance_count
= nb_scale_op
+ default_instance_num
6576 # Control if new count is over max and vdu count is less than max.
6577 # Then assign new instance count
6578 if new_instance_count
> max_instance_count
> vdu_count
:
6579 instances_number
= new_instance_count
- max_instance_count
6581 instances_number
= instances_number
6583 if new_instance_count
> max_instance_count
:
6585 "reached the limit of {} (max-instance-count) "
6586 "scaling-out operations for the "
6587 "scaling-group-descriptor '{}'".format(
6588 nb_scale_op
, scaling_group
6591 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6593 # TODO Information of its own ip is not available because db_vnfr is not updated.
6594 additional_params
["OSM"] = get_osm_params(
6595 db_vnfr
, vdu_delta
["id"], vdu_index
+ x
6597 cloud_init_list
.append(
6598 self
._parse
_cloud
_init
(
6605 vca_scaling_info
.append(
6607 "osm_vdu_id": vdu_delta
["id"],
6608 "member-vnf-index": vnf_index
,
6610 "vdu_index": vdu_index
+ x
,
6613 scaling_info
["vdu-create"][vdu_delta
["id"]] = instances_number
6614 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6615 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6616 kdu_name
= kdu_profile
["kdu-name"]
6617 resource_name
= kdu_profile
.get("resource-name", "")
6619 # Might have different kdus in the same delta
6620 # Should have list for each kdu
6621 if not scaling_info
["kdu-create"].get(kdu_name
, None):
6622 scaling_info
["kdu-create"][kdu_name
] = []
6624 kdur
= get_kdur(db_vnfr
, kdu_name
)
6625 if kdur
.get("helm-chart"):
6626 k8s_cluster_type
= "helm-chart-v3"
6627 self
.logger
.debug("kdur: {}".format(kdur
))
6629 kdur
.get("helm-version")
6630 and kdur
.get("helm-version") == "v2"
6632 k8s_cluster_type
= "helm-chart"
6633 elif kdur
.get("juju-bundle"):
6634 k8s_cluster_type
= "juju-bundle"
6637 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6638 "juju-bundle. Maybe an old NBI version is running".format(
6639 db_vnfr
["member-vnf-index-ref"], kdu_name
6643 max_instance_count
= 10
6644 if kdu_profile
and "max-number-of-instances" in kdu_profile
:
6645 max_instance_count
= kdu_profile
.get(
6646 "max-number-of-instances", 10
6649 nb_scale_op
+= kdu_delta
.get("number-of-instances", 1)
6650 deployed_kdu
, _
= get_deployed_kdu(
6651 nsr_deployed
, kdu_name
, vnf_index
6653 if deployed_kdu
is None:
6655 "KDU '{}' for vnf '{}' not deployed".format(
6659 kdu_instance
= deployed_kdu
.get("kdu-instance")
6660 instance_num
= await self
.k8scluster_map
[
6666 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6667 kdu_model
=deployed_kdu
.get("kdu-model"),
6669 kdu_replica_count
= instance_num
+ kdu_delta
.get(
6670 "number-of-instances", 1
6673 # Control if new count is over max and instance_num is less than max.
6674 # Then assign max instance number to kdu replica count
6675 if kdu_replica_count
> max_instance_count
> instance_num
:
6676 kdu_replica_count
= max_instance_count
6677 if kdu_replica_count
> max_instance_count
:
6679 "reached the limit of {} (max-instance-count) "
6680 "scaling-out operations for the "
6681 "scaling-group-descriptor '{}'".format(
6682 instance_num
, scaling_group
6686 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6687 vca_scaling_info
.append(
6689 "osm_kdu_id": kdu_name
,
6690 "member-vnf-index": vnf_index
,
6692 "kdu_index": instance_num
+ x
- 1,
6695 scaling_info
["kdu-create"][kdu_name
].append(
6697 "member-vnf-index": vnf_index
,
6699 "k8s-cluster-type": k8s_cluster_type
,
6700 "resource-name": resource_name
,
6701 "scale": kdu_replica_count
,
6704 elif scaling_type
== "SCALE_IN":
6705 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6707 scaling_info
["scaling_direction"] = "IN"
6708 scaling_info
["vdu-delete"] = {}
6709 scaling_info
["kdu-delete"] = {}
6711 for delta
in deltas
:
6712 for vdu_delta
in delta
.get("vdu-delta", {}):
6713 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6714 min_instance_count
= 0
6715 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6716 if vdu_profile
and "min-number-of-instances" in vdu_profile
:
6717 min_instance_count
= vdu_profile
["min-number-of-instances"]
6719 default_instance_num
= get_number_of_instances(
6720 db_vnfd
, vdu_delta
["id"]
6722 instance_num
= vdu_delta
.get("number-of-instances", 1)
6723 nb_scale_op
-= instance_num
6725 new_instance_count
= nb_scale_op
+ default_instance_num
6727 if new_instance_count
< min_instance_count
< vdu_count
:
6728 instances_number
= min_instance_count
- new_instance_count
6730 instances_number
= instance_num
6732 if new_instance_count
< min_instance_count
:
6734 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6735 "scaling-group-descriptor '{}'".format(
6736 nb_scale_op
, scaling_group
6739 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6740 vca_scaling_info
.append(
6742 "osm_vdu_id": vdu_delta
["id"],
6743 "member-vnf-index": vnf_index
,
6745 "vdu_index": vdu_index
- 1 - x
,
6748 scaling_info
["vdu-delete"][vdu_delta
["id"]] = instances_number
6749 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6750 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6751 kdu_name
= kdu_profile
["kdu-name"]
6752 resource_name
= kdu_profile
.get("resource-name", "")
6754 if not scaling_info
["kdu-delete"].get(kdu_name
, None):
6755 scaling_info
["kdu-delete"][kdu_name
] = []
6757 kdur
= get_kdur(db_vnfr
, kdu_name
)
6758 if kdur
.get("helm-chart"):
6759 k8s_cluster_type
= "helm-chart-v3"
6760 self
.logger
.debug("kdur: {}".format(kdur
))
6762 kdur
.get("helm-version")
6763 and kdur
.get("helm-version") == "v2"
6765 k8s_cluster_type
= "helm-chart"
6766 elif kdur
.get("juju-bundle"):
6767 k8s_cluster_type
= "juju-bundle"
6770 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6771 "juju-bundle. Maybe an old NBI version is running".format(
6772 db_vnfr
["member-vnf-index-ref"], kdur
["kdu-name"]
6776 min_instance_count
= 0
6777 if kdu_profile
and "min-number-of-instances" in kdu_profile
:
6778 min_instance_count
= kdu_profile
["min-number-of-instances"]
6780 nb_scale_op
-= kdu_delta
.get("number-of-instances", 1)
6781 deployed_kdu
, _
= get_deployed_kdu(
6782 nsr_deployed
, kdu_name
, vnf_index
6784 if deployed_kdu
is None:
6786 "KDU '{}' for vnf '{}' not deployed".format(
6790 kdu_instance
= deployed_kdu
.get("kdu-instance")
6791 instance_num
= await self
.k8scluster_map
[
6797 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6798 kdu_model
=deployed_kdu
.get("kdu-model"),
6800 kdu_replica_count
= instance_num
- kdu_delta
.get(
6801 "number-of-instances", 1
6804 if kdu_replica_count
< min_instance_count
< instance_num
:
6805 kdu_replica_count
= min_instance_count
6806 if kdu_replica_count
< min_instance_count
:
6808 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6809 "scaling-group-descriptor '{}'".format(
6810 instance_num
, scaling_group
6814 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6815 vca_scaling_info
.append(
6817 "osm_kdu_id": kdu_name
,
6818 "member-vnf-index": vnf_index
,
6820 "kdu_index": instance_num
- x
- 1,
6823 scaling_info
["kdu-delete"][kdu_name
].append(
6825 "member-vnf-index": vnf_index
,
6827 "k8s-cluster-type": k8s_cluster_type
,
6828 "resource-name": resource_name
,
6829 "scale": kdu_replica_count
,
6833 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6834 vdu_delete
= copy(scaling_info
.get("vdu-delete"))
6835 if scaling_info
["scaling_direction"] == "IN":
6836 for vdur
in reversed(db_vnfr
["vdur"]):
6837 if vdu_delete
.get(vdur
["vdu-id-ref"]):
6838 vdu_delete
[vdur
["vdu-id-ref"]] -= 1
6839 scaling_info
["vdu"].append(
6841 "name": vdur
.get("name") or vdur
.get("vdu-name"),
6842 "vdu_id": vdur
["vdu-id-ref"],
6846 for interface
in vdur
["interfaces"]:
6847 scaling_info
["vdu"][-1]["interface"].append(
6849 "name": interface
["name"],
6850 "ip_address": interface
["ip-address"],
6851 "mac_address": interface
.get("mac-address"),
6854 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6857 step
= "Executing pre-scale vnf-config-primitive"
6858 if scaling_descriptor
.get("scaling-config-action"):
6859 for scaling_config_action
in scaling_descriptor
[
6860 "scaling-config-action"
6863 scaling_config_action
.get("trigger") == "pre-scale-in"
6864 and scaling_type
== "SCALE_IN"
6866 scaling_config_action
.get("trigger") == "pre-scale-out"
6867 and scaling_type
== "SCALE_OUT"
6869 vnf_config_primitive
= scaling_config_action
[
6870 "vnf-config-primitive-name-ref"
6872 step
= db_nslcmop_update
[
6874 ] = "executing pre-scale scaling-config-action '{}'".format(
6875 vnf_config_primitive
6878 # look for primitive
6879 for config_primitive
in (
6880 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
6881 ).get("config-primitive", ()):
6882 if config_primitive
["name"] == vnf_config_primitive
:
6886 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6887 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6888 "primitive".format(scaling_group
, vnf_config_primitive
)
6891 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
6892 if db_vnfr
.get("additionalParamsForVnf"):
6893 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
6895 scale_process
= "VCA"
6896 db_nsr_update
["config-status"] = "configuring pre-scaling"
6897 primitive_params
= self
._map
_primitive
_params
(
6898 config_primitive
, {}, vnfr_params
6901 # Pre-scale retry check: Check if this sub-operation has been executed before
6902 op_index
= self
._check
_or
_add
_scale
_suboperation
(
6905 vnf_config_primitive
,
6909 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
6910 # Skip sub-operation
6911 result
= "COMPLETED"
6912 result_detail
= "Done"
6915 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6916 vnf_config_primitive
, result
, result_detail
6920 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
6921 # New sub-operation: Get index of this sub-operation
6923 len(db_nslcmop
.get("_admin", {}).get("operations"))
6928 + "vnf_config_primitive={} New sub-operation".format(
6929 vnf_config_primitive
6933 # retry: Get registered params for this existing sub-operation
6934 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
6937 vnf_index
= op
.get("member_vnf_index")
6938 vnf_config_primitive
= op
.get("primitive")
6939 primitive_params
= op
.get("primitive_params")
6942 + "vnf_config_primitive={} Sub-operation retry".format(
6943 vnf_config_primitive
6946 # Execute the primitive, either with new (first-time) or registered (reintent) args
6947 ee_descriptor_id
= config_primitive
.get(
6948 "execution-environment-ref"
6950 primitive_name
= config_primitive
.get(
6951 "execution-environment-primitive", vnf_config_primitive
6953 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
6954 nsr_deployed
["VCA"],
6955 member_vnf_index
=vnf_index
,
6957 vdu_count_index
=None,
6958 ee_descriptor_id
=ee_descriptor_id
,
6960 result
, result_detail
= await self
._ns
_execute
_primitive
(
6969 + "vnf_config_primitive={} Done with result {} {}".format(
6970 vnf_config_primitive
, result
, result_detail
6973 # Update operationState = COMPLETED | FAILED
6974 self
._update
_suboperation
_status
(
6975 db_nslcmop
, op_index
, result
, result_detail
6978 if result
== "FAILED":
6979 raise LcmException(result_detail
)
6980 db_nsr_update
["config-status"] = old_config_status
6981 scale_process
= None
6985 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index
)
6988 "_admin.scaling-group.{}.time".format(admin_scale_index
)
6991 # SCALE-IN VCA - BEGIN
6992 if vca_scaling_info
:
6993 step
= db_nslcmop_update
[
6995 ] = "Deleting the execution environments"
6996 scale_process
= "VCA"
6997 for vca_info
in vca_scaling_info
:
6998 if vca_info
["type"] == "delete" and not vca_info
.get("osm_kdu_id"):
6999 member_vnf_index
= str(vca_info
["member-vnf-index"])
7001 logging_text
+ "vdu info: {}".format(vca_info
)
7003 if vca_info
.get("osm_vdu_id"):
7004 vdu_id
= vca_info
["osm_vdu_id"]
7005 vdu_index
= int(vca_info
["vdu_index"])
7008 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7009 member_vnf_index
, vdu_id
, vdu_index
7011 stage
[2] = step
= "Scaling in VCA"
7012 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
7013 vca_update
= db_nsr
["_admin"]["deployed"]["VCA"]
7014 config_update
= db_nsr
["configurationStatus"]
7015 for vca_index
, vca
in enumerate(vca_update
):
7017 (vca
or vca
.get("ee_id"))
7018 and vca
["member-vnf-index"] == member_vnf_index
7019 and vca
["vdu_count_index"] == vdu_index
7021 if vca
.get("vdu_id"):
7022 config_descriptor
= get_configuration(
7023 db_vnfd
, vca
.get("vdu_id")
7025 elif vca
.get("kdu_name"):
7026 config_descriptor
= get_configuration(
7027 db_vnfd
, vca
.get("kdu_name")
7030 config_descriptor
= get_configuration(
7031 db_vnfd
, db_vnfd
["id"]
7033 operation_params
= (
7034 db_nslcmop
.get("operationParams") or {}
7036 exec_terminate_primitives
= not operation_params
.get(
7037 "skip_terminate_primitives"
7038 ) and vca
.get("needed_terminate")
7039 task
= asyncio
.ensure_future(
7048 exec_primitives
=exec_terminate_primitives
,
7052 timeout
=self
.timeout
.charm_delete
,
7055 tasks_dict_info
[task
] = "Terminating VCA {}".format(
7058 del vca_update
[vca_index
]
7059 del config_update
[vca_index
]
7060 # wait for pending tasks of terminate primitives
7064 + "Waiting for tasks {}".format(
7065 list(tasks_dict_info
.keys())
7068 error_list
= await self
._wait
_for
_tasks
(
7072 self
.timeout
.charm_delete
, self
.timeout
.ns_terminate
7077 tasks_dict_info
.clear()
7079 raise LcmException("; ".join(error_list
))
7081 db_vca_and_config_update
= {
7082 "_admin.deployed.VCA": vca_update
,
7083 "configurationStatus": config_update
,
7086 "nsrs", db_nsr
["_id"], db_vca_and_config_update
7088 scale_process
= None
7089 # SCALE-IN VCA - END
7092 if scaling_info
.get("vdu-create") or scaling_info
.get("vdu-delete"):
7093 scale_process
= "RO"
7094 if self
.ro_config
.ng
:
7095 await self
._scale
_ng
_ro
(
7096 logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, scaling_info
, stage
7098 scaling_info
.pop("vdu-create", None)
7099 scaling_info
.pop("vdu-delete", None)
7101 scale_process
= None
7105 if scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete"):
7106 scale_process
= "KDU"
7107 await self
._scale
_kdu
(
7108 logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
7110 scaling_info
.pop("kdu-create", None)
7111 scaling_info
.pop("kdu-delete", None)
7113 scale_process
= None
7117 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7119 # SCALE-UP VCA - BEGIN
7120 if vca_scaling_info
:
7121 step
= db_nslcmop_update
[
7123 ] = "Creating new execution environments"
7124 scale_process
= "VCA"
7125 for vca_info
in vca_scaling_info
:
7126 if vca_info
["type"] == "create" and not vca_info
.get("osm_kdu_id"):
7127 member_vnf_index
= str(vca_info
["member-vnf-index"])
7129 logging_text
+ "vdu info: {}".format(vca_info
)
7131 vnfd_id
= db_vnfr
["vnfd-ref"]
7132 if vca_info
.get("osm_vdu_id"):
7133 vdu_index
= int(vca_info
["vdu_index"])
7134 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
7135 if db_vnfr
.get("additionalParamsForVnf"):
7136 deploy_params
.update(
7138 db_vnfr
["additionalParamsForVnf"].copy()
7141 descriptor_config
= get_configuration(
7142 db_vnfd
, db_vnfd
["id"]
7144 if descriptor_config
:
7150 logging_text
=logging_text
7151 + "member_vnf_index={} ".format(member_vnf_index
),
7154 nslcmop_id
=nslcmop_id
,
7160 kdu_index
=kdu_index
,
7161 member_vnf_index
=member_vnf_index
,
7162 vdu_index
=vdu_index
,
7164 deploy_params
=deploy_params
,
7165 descriptor_config
=descriptor_config
,
7166 base_folder
=base_folder
,
7167 task_instantiation_info
=tasks_dict_info
,
7170 vdu_id
= vca_info
["osm_vdu_id"]
7171 vdur
= find_in_list(
7172 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
7174 descriptor_config
= get_configuration(db_vnfd
, vdu_id
)
7175 if vdur
.get("additionalParams"):
7176 deploy_params_vdu
= parse_yaml_strings(
7177 vdur
["additionalParams"]
7180 deploy_params_vdu
= deploy_params
7181 deploy_params_vdu
["OSM"] = get_osm_params(
7182 db_vnfr
, vdu_id
, vdu_count_index
=vdu_index
7184 if descriptor_config
:
7190 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7191 member_vnf_index
, vdu_id
, vdu_index
7193 stage
[2] = step
= "Scaling out VCA"
7194 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
7196 logging_text
=logging_text
7197 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7198 member_vnf_index
, vdu_id
, vdu_index
7202 nslcmop_id
=nslcmop_id
,
7208 member_vnf_index
=member_vnf_index
,
7209 vdu_index
=vdu_index
,
7210 kdu_index
=kdu_index
,
7212 deploy_params
=deploy_params_vdu
,
7213 descriptor_config
=descriptor_config
,
7214 base_folder
=base_folder
,
7215 task_instantiation_info
=tasks_dict_info
,
7218 # SCALE-UP VCA - END
7219 scale_process
= None
7222 # execute primitive service POST-SCALING
7223 step
= "Executing post-scale vnf-config-primitive"
7224 if scaling_descriptor
.get("scaling-config-action"):
7225 for scaling_config_action
in scaling_descriptor
[
7226 "scaling-config-action"
7229 scaling_config_action
.get("trigger") == "post-scale-in"
7230 and scaling_type
== "SCALE_IN"
7232 scaling_config_action
.get("trigger") == "post-scale-out"
7233 and scaling_type
== "SCALE_OUT"
7235 vnf_config_primitive
= scaling_config_action
[
7236 "vnf-config-primitive-name-ref"
7238 step
= db_nslcmop_update
[
7240 ] = "executing post-scale scaling-config-action '{}'".format(
7241 vnf_config_primitive
7244 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
7245 if db_vnfr
.get("additionalParamsForVnf"):
7246 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
7248 # look for primitive
7249 for config_primitive
in (
7250 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
7251 ).get("config-primitive", ()):
7252 if config_primitive
["name"] == vnf_config_primitive
:
7256 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7257 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7258 "config-primitive".format(
7259 scaling_group
, vnf_config_primitive
7262 scale_process
= "VCA"
7263 db_nsr_update
["config-status"] = "configuring post-scaling"
7264 primitive_params
= self
._map
_primitive
_params
(
7265 config_primitive
, {}, vnfr_params
7268 # Post-scale retry check: Check if this sub-operation has been executed before
7269 op_index
= self
._check
_or
_add
_scale
_suboperation
(
7272 vnf_config_primitive
,
7276 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
7277 # Skip sub-operation
7278 result
= "COMPLETED"
7279 result_detail
= "Done"
7282 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7283 vnf_config_primitive
, result
, result_detail
7287 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
7288 # New sub-operation: Get index of this sub-operation
7290 len(db_nslcmop
.get("_admin", {}).get("operations"))
7295 + "vnf_config_primitive={} New sub-operation".format(
7296 vnf_config_primitive
7300 # retry: Get registered params for this existing sub-operation
7301 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
7304 vnf_index
= op
.get("member_vnf_index")
7305 vnf_config_primitive
= op
.get("primitive")
7306 primitive_params
= op
.get("primitive_params")
7309 + "vnf_config_primitive={} Sub-operation retry".format(
7310 vnf_config_primitive
7313 # Execute the primitive, either with new (first-time) or registered (reintent) args
7314 ee_descriptor_id
= config_primitive
.get(
7315 "execution-environment-ref"
7317 primitive_name
= config_primitive
.get(
7318 "execution-environment-primitive", vnf_config_primitive
7320 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
7321 nsr_deployed
["VCA"],
7322 member_vnf_index
=vnf_index
,
7324 vdu_count_index
=None,
7325 ee_descriptor_id
=ee_descriptor_id
,
7327 result
, result_detail
= await self
._ns
_execute
_primitive
(
7336 + "vnf_config_primitive={} Done with result {} {}".format(
7337 vnf_config_primitive
, result
, result_detail
7340 # Update operationState = COMPLETED | FAILED
7341 self
._update
_suboperation
_status
(
7342 db_nslcmop
, op_index
, result
, result_detail
7345 if result
== "FAILED":
7346 raise LcmException(result_detail
)
7347 db_nsr_update
["config-status"] = old_config_status
7348 scale_process
= None
7353 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7354 db_nsr_update
["operational-status"] = (
7356 if old_operational_status
== "failed"
7357 else old_operational_status
7359 db_nsr_update
["config-status"] = old_config_status
7362 ROclient
.ROClientException
,
7367 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
7369 except asyncio
.CancelledError
:
7371 logging_text
+ "Cancelled Exception while '{}'".format(step
)
7373 exc
= "Operation was cancelled"
7374 except Exception as e
:
7375 exc
= traceback
.format_exc()
7376 self
.logger
.critical(
7377 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
7381 self
._write
_ns
_status
(
7384 current_operation
="IDLE",
7385 current_operation_id
=None,
7388 stage
[1] = "Waiting for instantiate pending tasks."
7389 self
.logger
.debug(logging_text
+ stage
[1])
7390 exc
= await self
._wait
_for
_tasks
(
7393 self
.timeout
.ns_deploy
,
7401 ] = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
7402 nslcmop_operation_state
= "FAILED"
7404 db_nsr_update
["operational-status"] = old_operational_status
7405 db_nsr_update
["config-status"] = old_config_status
7406 db_nsr_update
["detailed-status"] = ""
7408 if "VCA" in scale_process
:
7409 db_nsr_update
["config-status"] = "failed"
7410 if "RO" in scale_process
:
7411 db_nsr_update
["operational-status"] = "failed"
7414 ] = "FAILED scaling nslcmop={} {}: {}".format(
7415 nslcmop_id
, step
, exc
7418 error_description_nslcmop
= None
7419 nslcmop_operation_state
= "COMPLETED"
7420 db_nslcmop_update
["detailed-status"] = "Done"
7422 self
._write
_op
_status
(
7425 error_message
=error_description_nslcmop
,
7426 operation_state
=nslcmop_operation_state
,
7427 other_update
=db_nslcmop_update
,
7430 self
._write
_ns
_status
(
7433 current_operation
="IDLE",
7434 current_operation_id
=None,
7435 other_update
=db_nsr_update
,
7438 if nslcmop_operation_state
:
7442 "nslcmop_id": nslcmop_id
,
7443 "operationState": nslcmop_operation_state
,
7445 await self
.msg
.aiowrite("ns", "scaled", msg
)
7446 except Exception as e
:
7448 logging_text
+ "kafka_write notification Exception {}".format(e
)
7450 self
.logger
.debug(logging_text
+ "Exit")
7451 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_scale")
7453 async def _scale_kdu(
7454 self
, logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
7456 _scaling_info
= scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete")
7457 for kdu_name
in _scaling_info
:
7458 for kdu_scaling_info
in _scaling_info
[kdu_name
]:
7459 deployed_kdu
, index
= get_deployed_kdu(
7460 nsr_deployed
, kdu_name
, kdu_scaling_info
["member-vnf-index"]
7462 cluster_uuid
= deployed_kdu
["k8scluster-uuid"]
7463 kdu_instance
= deployed_kdu
["kdu-instance"]
7464 kdu_model
= deployed_kdu
.get("kdu-model")
7465 scale
= int(kdu_scaling_info
["scale"])
7466 k8s_cluster_type
= kdu_scaling_info
["k8s-cluster-type"]
7469 "collection": "nsrs",
7470 "filter": {"_id": nsr_id
},
7471 "path": "_admin.deployed.K8s.{}".format(index
),
7474 step
= "scaling application {}".format(
7475 kdu_scaling_info
["resource-name"]
7477 self
.logger
.debug(logging_text
+ step
)
7479 if kdu_scaling_info
["type"] == "delete":
7480 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7483 and kdu_config
.get("terminate-config-primitive")
7484 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7486 terminate_config_primitive_list
= kdu_config
.get(
7487 "terminate-config-primitive"
7489 terminate_config_primitive_list
.sort(
7490 key
=lambda val
: int(val
["seq"])
7494 terminate_config_primitive
7495 ) in terminate_config_primitive_list
:
7496 primitive_params_
= self
._map
_primitive
_params
(
7497 terminate_config_primitive
, {}, {}
7499 step
= "execute terminate config primitive"
7500 self
.logger
.debug(logging_text
+ step
)
7501 await asyncio
.wait_for(
7502 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7503 cluster_uuid
=cluster_uuid
,
7504 kdu_instance
=kdu_instance
,
7505 primitive_name
=terminate_config_primitive
["name"],
7506 params
=primitive_params_
,
7508 total_timeout
=self
.timeout
.primitive
,
7511 timeout
=self
.timeout
.primitive
7512 * self
.timeout
.primitive_outer_factor
,
7515 await asyncio
.wait_for(
7516 self
.k8scluster_map
[k8s_cluster_type
].scale(
7517 kdu_instance
=kdu_instance
,
7519 resource_name
=kdu_scaling_info
["resource-name"],
7520 total_timeout
=self
.timeout
.scale_on_error
,
7522 cluster_uuid
=cluster_uuid
,
7523 kdu_model
=kdu_model
,
7527 timeout
=self
.timeout
.scale_on_error
7528 * self
.timeout
.scale_on_error_outer_factor
,
7531 if kdu_scaling_info
["type"] == "create":
7532 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7535 and kdu_config
.get("initial-config-primitive")
7536 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7538 initial_config_primitive_list
= kdu_config
.get(
7539 "initial-config-primitive"
7541 initial_config_primitive_list
.sort(
7542 key
=lambda val
: int(val
["seq"])
7545 for initial_config_primitive
in initial_config_primitive_list
:
7546 primitive_params_
= self
._map
_primitive
_params
(
7547 initial_config_primitive
, {}, {}
7549 step
= "execute initial config primitive"
7550 self
.logger
.debug(logging_text
+ step
)
7551 await asyncio
.wait_for(
7552 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7553 cluster_uuid
=cluster_uuid
,
7554 kdu_instance
=kdu_instance
,
7555 primitive_name
=initial_config_primitive
["name"],
7556 params
=primitive_params_
,
7563 async def _scale_ng_ro(
7564 self
, logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, vdu_scaling_info
, stage
7566 nsr_id
= db_nslcmop
["nsInstanceId"]
7567 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
7570 # read from db: vnfd's for every vnf
7573 # for each vnf in ns, read vnfd
7574 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
7575 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
7576 vnfd_id
= vnfr
["vnfd-id"] # vnfd uuid for this vnf
7577 # if we haven't this vnfd, read it from db
7578 if not find_in_list(db_vnfds
, lambda a_vnfd
: a_vnfd
["id"] == vnfd_id
):
7580 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
7581 db_vnfds
.append(vnfd
)
7582 n2vc_key
= self
.n2vc
.get_public_key()
7583 n2vc_key_list
= [n2vc_key
]
7586 vdu_scaling_info
.get("vdu-create"),
7587 vdu_scaling_info
.get("vdu-delete"),
7590 # db_vnfr has been updated, update db_vnfrs to use it
7591 db_vnfrs
[db_vnfr
["member-vnf-index-ref"]] = db_vnfr
7592 await self
._instantiate
_ng
_ro
(
7602 start_deploy
=time(),
7603 timeout_ns_deploy
=self
.timeout
.ns_deploy
,
7605 if vdu_scaling_info
.get("vdu-delete"):
7607 db_vnfr
, None, vdu_scaling_info
["vdu-delete"], mark_delete
=False
7610 async def extract_prometheus_scrape_jobs(
7614 ee_config_descriptor
: dict,
7619 vnf_member_index
: str = "",
7621 vdu_index
: int = None,
7623 kdu_index
: int = None,
7625 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7626 This method will wait until the corresponding VDU or KDU is fully instantiated
7629 ee_id (str): Execution Environment ID
7630 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7631 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7632 vnfr_id (str): VNFR ID where this EE applies
7633 nsr_id (str): NSR ID where this EE applies
7634 target_ip (str): VDU/KDU instance IP address
7635 element_type (str): NS or VNF or VDU or KDU
7636 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7637 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7638 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7639 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7640 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7643 LcmException: When the VDU or KDU instance was not found in an hour
7646 _type_: Prometheus jobs
7648 # default the vdur and kdur names to an empty string, to avoid any later
7649 # problem with Prometheus when the element type is not VDU or KDU
7653 # look if exist a file called 'prometheus*.j2' and
7654 artifact_content
= self
.fs
.dir_ls(artifact_path
)
7658 for f
in artifact_content
7659 if f
.startswith("prometheus") and f
.endswith(".j2")
7665 self
.logger
.debug("Artifact path{}".format(artifact_path
))
7666 self
.logger
.debug("job file{}".format(job_file
))
7667 with self
.fs
.file_open((artifact_path
, job_file
), "r") as f
:
7670 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7671 if element_type
in ("VDU", "KDU"):
7672 for _
in range(360):
7673 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
7674 if vdu_id
and vdu_index
is not None:
7678 for x
in get_iterable(db_vnfr
, "vdur")
7680 x
.get("vdu-id-ref") == vdu_id
7681 and x
.get("count-index") == vdu_index
7686 if vdur
.get("name"):
7687 vdur_name
= vdur
.get("name")
7689 if kdu_name
and kdu_index
is not None:
7693 for x
in get_iterable(db_vnfr
, "kdur")
7695 x
.get("kdu-name") == kdu_name
7696 and x
.get("count-index") == kdu_index
7701 if kdur
.get("name"):
7702 kdur_name
= kdur
.get("name")
7705 await asyncio
.sleep(10)
7707 if vdu_id
and vdu_index
is not None:
7709 f
"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7711 if kdu_name
and kdu_index
is not None:
7713 f
"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7716 if ee_id
is not None:
7717 _
, namespace
, helm_id
= get_ee_id_parts(
7719 ) # get namespace and EE gRPC service name
7720 host_name
= f
'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7722 vnfr_id
= vnfr_id
.replace("-", "")
7724 "JOB_NAME": vnfr_id
,
7725 "TARGET_IP": target_ip
,
7726 "EXPORTER_POD_IP": host_name
,
7727 "EXPORTER_POD_PORT": host_port
,
7729 "VNF_MEMBER_INDEX": vnf_member_index
,
7730 "VDUR_NAME": vdur_name
,
7731 "KDUR_NAME": kdur_name
,
7732 "ELEMENT_TYPE": element_type
,
7735 metric_path
= ee_config_descriptor
["metric-path"]
7736 target_port
= ee_config_descriptor
["metric-port"]
7737 vnfr_id
= vnfr_id
.replace("-", "")
7739 "JOB_NAME": vnfr_id
,
7740 "TARGET_IP": target_ip
,
7741 "TARGET_PORT": target_port
,
7742 "METRIC_PATH": metric_path
,
7745 job_list
= parse_job(job_data
, variables
)
7746 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7747 for job
in job_list
:
7749 not isinstance(job
.get("job_name"), str)
7750 or vnfr_id
not in job
["job_name"]
7752 job
["job_name"] = vnfr_id
+ "_" + str(SystemRandom().randint(1, 10000))
7753 job
["nsr_id"] = nsr_id
7754 job
["vnfr_id"] = vnfr_id
7757 async def rebuild_start_stop(
7758 self
, nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
7760 logging_text
= "Task ns={} {}={} ".format(nsr_id
, operation_type
, nslcmop_id
)
7761 self
.logger
.info(logging_text
+ "Enter")
7762 stage
= ["Preparing the environment", ""]
7763 # database nsrs record
7767 # in case of error, indicates what part of scale was failed to put nsr at error status
7768 start_deploy
= time()
7770 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_id
})
7771 vim_account_id
= db_vnfr
.get("vim-account-id")
7772 vim_info_key
= "vim:" + vim_account_id
7773 vdu_id
= additional_param
["vdu_id"]
7774 vdurs
= [item
for item
in db_vnfr
["vdur"] if item
["vdu-id-ref"] == vdu_id
]
7775 vdur
= find_in_list(
7776 vdurs
, lambda vdu
: vdu
["count-index"] == additional_param
["count-index"]
7779 vdu_vim_name
= vdur
["name"]
7780 vim_vm_id
= vdur
["vim_info"][vim_info_key
]["vim_id"]
7781 target_vim
, _
= next(k_v
for k_v
in vdur
["vim_info"].items())
7783 raise LcmException("Target vdu is not found")
7784 self
.logger
.info("vdu_vim_name >> {} ".format(vdu_vim_name
))
7785 # wait for any previous tasks in process
7786 stage
[1] = "Waiting for previous operations to terminate"
7787 self
.logger
.info(stage
[1])
7788 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7790 stage
[1] = "Reading from database."
7791 self
.logger
.info(stage
[1])
7792 self
._write
_ns
_status
(
7795 current_operation
=operation_type
.upper(),
7796 current_operation_id
=nslcmop_id
,
7798 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7801 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
7802 db_nsr_update
["operational-status"] = operation_type
7803 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7807 "vim_vm_id": vim_vm_id
,
7809 "vdu_index": additional_param
["count-index"],
7810 "vdu_id": vdur
["id"],
7811 "target_vim": target_vim
,
7812 "vim_account_id": vim_account_id
,
7815 stage
[1] = "Sending rebuild request to RO... {}".format(desc
)
7816 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7817 self
.logger
.info("ro nsr id: {}".format(nsr_id
))
7818 result_dict
= await self
.RO
.operate(nsr_id
, desc
, operation_type
)
7819 self
.logger
.info("response from RO: {}".format(result_dict
))
7820 action_id
= result_dict
["action_id"]
7821 await self
._wait
_ng
_ro
(
7826 self
.timeout
.operate
,
7828 "start_stop_rebuild",
7830 return "COMPLETED", "Done"
7831 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7832 self
.logger
.error("Exit Exception {}".format(e
))
7834 except asyncio
.CancelledError
:
7835 self
.logger
.error("Cancelled Exception while '{}'".format(stage
))
7836 exc
= "Operation was cancelled"
7837 except Exception as e
:
7838 exc
= traceback
.format_exc()
7839 self
.logger
.critical(
7840 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7842 return "FAILED", "Error in operate VNF {}".format(exc
)
7844 def get_vca_cloud_and_credentials(self
, vim_account_id
: str) -> (str, str):
7846 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7848 :param: vim_account_id: VIM Account ID
7850 :return: (cloud_name, cloud_credential)
7852 config
= VimAccountDB
.get_vim_account_with_id(vim_account_id
).get("config", {})
7853 return config
.get("vca_cloud"), config
.get("vca_cloud_credential")
7855 def get_vca_k8s_cloud_and_credentials(self
, vim_account_id
: str) -> (str, str):
7857 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7859 :param: vim_account_id: VIM Account ID
7861 :return: (cloud_name, cloud_credential)
7863 config
= VimAccountDB
.get_vim_account_with_id(vim_account_id
).get("config", {})
7864 return config
.get("vca_k8s_cloud"), config
.get("vca_k8s_cloud_credential")
7866 async def migrate(self
, nsr_id
, nslcmop_id
):
7868 Migrate VNFs and VDUs instances in a NS
7870 :param: nsr_id: NS Instance ID
7871 :param: nslcmop_id: nslcmop ID of migrate
7874 # Try to lock HA task here
7875 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7876 if not task_is_locked_by_me
:
7878 logging_text
= "Task ns={} migrate ".format(nsr_id
)
7879 self
.logger
.debug(logging_text
+ "Enter")
7880 # get all needed from database
7882 db_nslcmop_update
= {}
7883 nslcmop_operation_state
= None
7887 # in case of error, indicates what part of scale was failed to put nsr at error status
7888 start_deploy
= time()
7891 # wait for any previous tasks in process
7892 step
= "Waiting for previous operations to terminate"
7893 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7895 self
._write
_ns
_status
(
7898 current_operation
="MIGRATING",
7899 current_operation_id
=nslcmop_id
,
7901 step
= "Getting nslcmop from database"
7903 step
+ " after having waited for previous tasks to be completed"
7905 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
7906 migrate_params
= db_nslcmop
.get("operationParams")
7909 target
.update(migrate_params
)
7910 desc
= await self
.RO
.migrate(nsr_id
, target
)
7911 self
.logger
.debug("RO return > {}".format(desc
))
7912 action_id
= desc
["action_id"]
7913 await self
._wait
_ng
_ro
(
7918 self
.timeout
.migrate
,
7919 operation
="migrate",
7921 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7922 self
.logger
.error("Exit Exception {}".format(e
))
7924 except asyncio
.CancelledError
:
7925 self
.logger
.error("Cancelled Exception while '{}'".format(step
))
7926 exc
= "Operation was cancelled"
7927 except Exception as e
:
7928 exc
= traceback
.format_exc()
7929 self
.logger
.critical(
7930 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7933 self
._write
_ns
_status
(
7936 current_operation
="IDLE",
7937 current_operation_id
=None,
7940 db_nslcmop_update
["detailed-status"] = "FAILED {}: {}".format(step
, exc
)
7941 nslcmop_operation_state
= "FAILED"
7943 nslcmop_operation_state
= "COMPLETED"
7944 db_nslcmop_update
["detailed-status"] = "Done"
7945 db_nsr_update
["detailed-status"] = "Done"
7947 self
._write
_op
_status
(
7951 operation_state
=nslcmop_operation_state
,
7952 other_update
=db_nslcmop_update
,
7954 if nslcmop_operation_state
:
7958 "nslcmop_id": nslcmop_id
,
7959 "operationState": nslcmop_operation_state
,
7961 await self
.msg
.aiowrite("ns", "migrated", msg
)
7962 except Exception as e
:
7964 logging_text
+ "kafka_write notification Exception {}".format(e
)
7966 self
.logger
.debug(logging_text
+ "Exit")
7967 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_migrate")
7969 async def heal(self
, nsr_id
, nslcmop_id
):
7973 :param nsr_id: ns instance to heal
7974 :param nslcmop_id: operation to run
7978 # Try to lock HA task here
7979 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7980 if not task_is_locked_by_me
:
7983 logging_text
= "Task ns={} heal={} ".format(nsr_id
, nslcmop_id
)
7984 stage
= ["", "", ""]
7985 tasks_dict_info
= {}
7986 # ^ stage, step, VIM progress
7987 self
.logger
.debug(logging_text
+ "Enter")
7988 # get all needed from database
7990 db_nslcmop_update
= {}
7992 db_vnfrs
= {} # vnf's info indexed by _id
7994 old_operational_status
= ""
7995 old_config_status
= ""
7998 # wait for any previous tasks in process
7999 step
= "Waiting for previous operations to terminate"
8000 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
8001 self
._write
_ns
_status
(
8004 current_operation
="HEALING",
8005 current_operation_id
=nslcmop_id
,
8008 step
= "Getting nslcmop from database"
8010 step
+ " after having waited for previous tasks to be completed"
8012 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
8014 step
= "Getting nsr from database"
8015 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8016 old_operational_status
= db_nsr
["operational-status"]
8017 old_config_status
= db_nsr
["config-status"]
8020 "_admin.deployed.RO.operational-status": "healing",
8022 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
8024 step
= "Sending heal order to VIM"
8026 logging_text
=logging_text
,
8028 db_nslcmop
=db_nslcmop
,
8033 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
8034 self
.logger
.debug(logging_text
+ stage
[1])
8035 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
8036 self
.fs
.sync(db_nsr
["nsd-id"])
8038 # read from db: vnfr's of this ns
8039 step
= "Getting vnfrs from db"
8040 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
8041 for vnfr
in db_vnfrs_list
:
8042 db_vnfrs
[vnfr
["_id"]] = vnfr
8043 self
.logger
.debug("ns.heal db_vnfrs={}".format(db_vnfrs
))
8045 # Check for each target VNF
8046 target_list
= db_nslcmop
.get("operationParams", {}).get("healVnfData", {})
8047 for target_vnf
in target_list
:
8048 # Find this VNF in the list from DB
8049 vnfr_id
= target_vnf
.get("vnfInstanceId", None)
8051 db_vnfr
= db_vnfrs
[vnfr_id
]
8052 vnfd_id
= db_vnfr
.get("vnfd-id")
8053 vnfd_ref
= db_vnfr
.get("vnfd-ref")
8054 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
8055 base_folder
= vnfd
["_admin"]["storage"]
8060 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
8061 member_vnf_index
= db_vnfr
.get("member-vnf-index-ref")
8063 # Check each target VDU and deploy N2VC
8064 target_vdu_list
= target_vnf
.get("additionalParams", {}).get(
8067 if not target_vdu_list
:
8068 # Codigo nuevo para crear diccionario
8069 target_vdu_list
= []
8070 for existing_vdu
in db_vnfr
.get("vdur"):
8071 vdu_name
= existing_vdu
.get("vdu-name", None)
8072 vdu_index
= existing_vdu
.get("count-index", 0)
8073 vdu_run_day1
= target_vnf
.get("additionalParams", {}).get(
8076 vdu_to_be_healed
= {
8078 "count-index": vdu_index
,
8079 "run-day1": vdu_run_day1
,
8081 target_vdu_list
.append(vdu_to_be_healed
)
8082 for target_vdu
in target_vdu_list
:
8083 deploy_params_vdu
= target_vdu
8084 # Set run-day1 vnf level value if not vdu level value exists
8085 if not deploy_params_vdu
.get("run-day1") and target_vnf
.get(
8086 "additionalParams", {}
8088 deploy_params_vdu
["run-day1"] = target_vnf
[
8091 vdu_name
= target_vdu
.get("vdu-id", None)
8092 # TODO: Get vdu_id from vdud.
8094 # For multi instance VDU count-index is mandatory
8095 # For single session VDU count-indes is 0
8096 vdu_index
= target_vdu
.get("count-index", 0)
8098 # n2vc_redesign STEP 3 to 6 Deploy N2VC
8099 stage
[1] = "Deploying Execution Environments."
8100 self
.logger
.debug(logging_text
+ stage
[1])
8102 # VNF Level charm. Normal case when proxy charms.
8103 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
8104 descriptor_config
= get_configuration(vnfd
, vnfd_ref
)
8105 if descriptor_config
:
8106 # Continue if healed machine is management machine
8107 vnf_ip_address
= db_vnfr
.get("ip-address")
8108 target_instance
= None
8109 for instance
in db_vnfr
.get("vdur", None):
8111 instance
["vdu-name"] == vdu_name
8112 and instance
["count-index"] == vdu_index
8114 target_instance
= instance
8116 if vnf_ip_address
== target_instance
.get("ip-address"):
8118 logging_text
=logging_text
8119 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8120 member_vnf_index
, vdu_name
, vdu_index
8124 nslcmop_id
=nslcmop_id
,
8130 member_vnf_index
=member_vnf_index
,
8133 deploy_params
=deploy_params_vdu
,
8134 descriptor_config
=descriptor_config
,
8135 base_folder
=base_folder
,
8136 task_instantiation_info
=tasks_dict_info
,
8140 # VDU Level charm. Normal case with native charms.
8141 descriptor_config
= get_configuration(vnfd
, vdu_name
)
8142 if descriptor_config
:
8144 logging_text
=logging_text
8145 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8146 member_vnf_index
, vdu_name
, vdu_index
8150 nslcmop_id
=nslcmop_id
,
8156 member_vnf_index
=member_vnf_index
,
8157 vdu_index
=vdu_index
,
8159 deploy_params
=deploy_params_vdu
,
8160 descriptor_config
=descriptor_config
,
8161 base_folder
=base_folder
,
8162 task_instantiation_info
=tasks_dict_info
,
8167 ROclient
.ROClientException
,
8172 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
8174 except asyncio
.CancelledError
:
8176 logging_text
+ "Cancelled Exception while '{}'".format(step
)
8178 exc
= "Operation was cancelled"
8179 except Exception as e
:
8180 exc
= traceback
.format_exc()
8181 self
.logger
.critical(
8182 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
8187 stage
[1] = "Waiting for healing pending tasks."
8188 self
.logger
.debug(logging_text
+ stage
[1])
8189 exc
= await self
._wait
_for
_tasks
(
8192 self
.timeout
.ns_deploy
,
8200 ] = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
8201 nslcmop_operation_state
= "FAILED"
8203 db_nsr_update
["operational-status"] = old_operational_status
8204 db_nsr_update
["config-status"] = old_config_status
8207 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id
, step
, exc
)
8208 for task
, task_name
in tasks_dict_info
.items():
8209 if not task
.done() or task
.cancelled() or task
.exception():
8210 if task_name
.startswith(self
.task_name_deploy_vca
):
8211 # A N2VC task is pending
8212 db_nsr_update
["config-status"] = "failed"
8214 # RO task is pending
8215 db_nsr_update
["operational-status"] = "failed"
8217 error_description_nslcmop
= None
8218 nslcmop_operation_state
= "COMPLETED"
8219 db_nslcmop_update
["detailed-status"] = "Done"
8220 db_nsr_update
["detailed-status"] = "Done"
8221 db_nsr_update
["operational-status"] = "running"
8222 db_nsr_update
["config-status"] = "configured"
8224 self
._write
_op
_status
(
8227 error_message
=error_description_nslcmop
,
8228 operation_state
=nslcmop_operation_state
,
8229 other_update
=db_nslcmop_update
,
8232 self
._write
_ns
_status
(
8235 current_operation
="IDLE",
8236 current_operation_id
=None,
8237 other_update
=db_nsr_update
,
8240 if nslcmop_operation_state
:
8244 "nslcmop_id": nslcmop_id
,
8245 "operationState": nslcmop_operation_state
,
8247 await self
.msg
.aiowrite("ns", "healed", msg
)
8248 except Exception as e
:
8250 logging_text
+ "kafka_write notification Exception {}".format(e
)
8252 self
.logger
.debug(logging_text
+ "Exit")
8253 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_heal")
8264 :param logging_text: preffix text to use at logging
8265 :param nsr_id: nsr identity
8266 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8267 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8268 :return: None or exception
8271 def get_vim_account(vim_account_id
):
8273 if vim_account_id
in db_vims
:
8274 return db_vims
[vim_account_id
]
8275 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
8276 db_vims
[vim_account_id
] = db_vim
8281 ns_params
= db_nslcmop
.get("operationParams")
8282 if ns_params
and ns_params
.get("timeout_ns_heal"):
8283 timeout_ns_heal
= ns_params
["timeout_ns_heal"]
8285 timeout_ns_heal
= self
.timeout
.ns_heal
8289 nslcmop_id
= db_nslcmop
["_id"]
8291 "action_id": nslcmop_id
,
8293 self
.logger
.warning(
8294 "db_nslcmop={} and timeout_ns_heal={}".format(
8295 db_nslcmop
, timeout_ns_heal
8298 target
.update(db_nslcmop
.get("operationParams", {}))
8300 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
8301 desc
= await self
.RO
.recreate(nsr_id
, target
)
8302 self
.logger
.debug("RO return > {}".format(desc
))
8303 action_id
= desc
["action_id"]
8304 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8305 await self
._wait
_ng
_ro
(
8312 operation
="healing",
8317 "_admin.deployed.RO.operational-status": "running",
8318 "detailed-status": " ".join(stage
),
8320 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
8321 self
._write
_op
_status
(nslcmop_id
, stage
)
8323 logging_text
+ "ns healed at RO. RO_id={}".format(action_id
)
8326 except Exception as e
:
8327 stage
[2] = "ERROR healing at VIM"
8328 # self.set_vnfr_at_error(db_vnfrs, str(e))
8330 "Error healing at VIM {}".format(e
),
8331 exc_info
=not isinstance(
8334 ROclient
.ROClientException
,
8360 task_instantiation_info
,
8363 # launch instantiate_N2VC in a asyncio task and register task object
8364 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8365 # if not found, create one entry and update database
8366 # fill db_nsr._admin.deployed.VCA.<index>
8369 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
8373 get_charm_name
= False
8374 if "execution-environment-list" in descriptor_config
:
8375 ee_list
= descriptor_config
.get("execution-environment-list", [])
8376 elif "juju" in descriptor_config
:
8377 ee_list
= [descriptor_config
] # ns charms
8378 if "execution-environment-list" not in descriptor_config
:
8379 # charm name is only required for ns charms
8380 get_charm_name
= True
8381 else: # other types as script are not supported
8384 for ee_item
in ee_list
:
8387 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8388 ee_item
.get("juju"), ee_item
.get("helm-chart")
8391 ee_descriptor_id
= ee_item
.get("id")
8392 if ee_item
.get("juju"):
8393 vca_name
= ee_item
["juju"].get("charm")
8395 charm_name
= self
.find_charm_name(db_nsr
, str(vca_name
))
8398 if ee_item
["juju"].get("charm") is not None
8401 if ee_item
["juju"].get("cloud") == "k8s":
8402 vca_type
= "k8s_proxy_charm"
8403 elif ee_item
["juju"].get("proxy") is False:
8404 vca_type
= "native_charm"
8405 elif ee_item
.get("helm-chart"):
8406 vca_name
= ee_item
["helm-chart"]
8407 if ee_item
.get("helm-version") and ee_item
.get("helm-version") == "v2":
8410 vca_type
= "helm-v3"
8413 logging_text
+ "skipping non juju neither charm configuration"
8418 for vca_index
, vca_deployed
in enumerate(
8419 db_nsr
["_admin"]["deployed"]["VCA"]
8421 if not vca_deployed
:
8424 vca_deployed
.get("member-vnf-index") == member_vnf_index
8425 and vca_deployed
.get("vdu_id") == vdu_id
8426 and vca_deployed
.get("kdu_name") == kdu_name
8427 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
8428 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
8432 # not found, create one.
8434 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
8437 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
8439 target
+= "/kdu/{}".format(kdu_name
)
8441 "target_element": target
,
8442 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8443 "member-vnf-index": member_vnf_index
,
8445 "kdu_name": kdu_name
,
8446 "vdu_count_index": vdu_index
,
8447 "operational-status": "init", # TODO revise
8448 "detailed-status": "", # TODO revise
8449 "step": "initial-deploy", # TODO revise
8451 "vdu_name": vdu_name
,
8453 "ee_descriptor_id": ee_descriptor_id
,
8454 "charm_name": charm_name
,
8458 # create VCA and configurationStatus in db
8460 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
8461 "configurationStatus.{}".format(vca_index
): dict(),
8463 self
.update_db_2("nsrs", nsr_id
, db_dict
)
8465 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
8467 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
8468 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
8469 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
8472 task_n2vc
= asyncio
.ensure_future(
8474 logging_text
=logging_text
,
8475 vca_index
=vca_index
,
8481 vdu_index
=vdu_index
,
8482 deploy_params
=deploy_params
,
8483 config_descriptor
=descriptor_config
,
8484 base_folder
=base_folder
,
8485 nslcmop_id
=nslcmop_id
,
8489 ee_config_descriptor
=ee_item
,
8492 self
.lcm_tasks
.register(
8496 "instantiate_N2VC-{}".format(vca_index
),
8499 task_instantiation_info
[
8501 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
8502 member_vnf_index
or "", vdu_id
or ""
8505 async def heal_N2VC(
8522 ee_config_descriptor
,
8524 nsr_id
= db_nsr
["_id"]
8525 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
8526 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
8527 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
8528 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
8530 "collection": "nsrs",
8531 "filter": {"_id": nsr_id
},
8532 "path": db_update_entry
,
8537 element_under_configuration
= nsr_id
8541 vnfr_id
= db_vnfr
["_id"]
8542 osm_config
["osm"]["vnf_id"] = vnfr_id
8544 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
8546 if vca_type
== "native_charm":
8549 index_number
= vdu_index
or 0
8552 element_type
= "VNF"
8553 element_under_configuration
= vnfr_id
8554 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
8556 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
8557 element_type
= "VDU"
8558 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
8559 osm_config
["osm"]["vdu_id"] = vdu_id
8561 namespace
+= ".{}".format(kdu_name
)
8562 element_type
= "KDU"
8563 element_under_configuration
= kdu_name
8564 osm_config
["osm"]["kdu_name"] = kdu_name
8567 if base_folder
["pkg-dir"]:
8568 artifact_path
= "{}/{}/{}/{}".format(
8569 base_folder
["folder"],
8570 base_folder
["pkg-dir"],
8573 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8578 artifact_path
= "{}/Scripts/{}/{}/".format(
8579 base_folder
["folder"],
8582 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8587 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
8589 # get initial_config_primitive_list that applies to this element
8590 initial_config_primitive_list
= config_descriptor
.get(
8591 "initial-config-primitive"
8595 "Initial config primitive list > {}".format(
8596 initial_config_primitive_list
8600 # add config if not present for NS charm
8601 ee_descriptor_id
= ee_config_descriptor
.get("id")
8602 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
8603 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
8604 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
8608 "Initial config primitive list #2 > {}".format(
8609 initial_config_primitive_list
8612 # n2vc_redesign STEP 3.1
8613 # find old ee_id if exists
8614 ee_id
= vca_deployed
.get("ee_id")
8616 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
8617 # create or register execution environment in VCA. Only for native charms when healing
8618 if vca_type
== "native_charm":
8619 step
= "Waiting to VM being up and getting IP address"
8620 self
.logger
.debug(logging_text
+ step
)
8621 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8630 credentials
= {"hostname": rw_mgmt_ip
}
8632 username
= deep_get(
8633 config_descriptor
, ("config-access", "ssh-access", "default-user")
8635 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8636 # merged. Meanwhile let's get username from initial-config-primitive
8637 if not username
and initial_config_primitive_list
:
8638 for config_primitive
in initial_config_primitive_list
:
8639 for param
in config_primitive
.get("parameter", ()):
8640 if param
["name"] == "ssh-username":
8641 username
= param
["value"]
8645 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8646 "'config-access.ssh-access.default-user'"
8648 credentials
["username"] = username
8650 # n2vc_redesign STEP 3.2
8651 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8652 self
._write
_configuration
_status
(
8654 vca_index
=vca_index
,
8655 status
="REGISTERING",
8656 element_under_configuration
=element_under_configuration
,
8657 element_type
=element_type
,
8660 step
= "register execution environment {}".format(credentials
)
8661 self
.logger
.debug(logging_text
+ step
)
8662 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
8663 credentials
=credentials
,
8664 namespace
=namespace
,
8669 # update ee_id en db
8671 "_admin.deployed.VCA.{}.ee_id".format(vca_index
): ee_id
,
8673 self
.update_db_2("nsrs", nsr_id
, db_dict_ee_id
)
8675 # for compatibility with MON/POL modules, the need model and application name at database
8676 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8677 # Not sure if this need to be done when healing
8679 ee_id_parts = ee_id.split(".")
8680 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8681 if len(ee_id_parts) >= 2:
8682 model_name = ee_id_parts[0]
8683 application_name = ee_id_parts[1]
8684 db_nsr_update[db_update_entry + "model"] = model_name
8685 db_nsr_update[db_update_entry + "application"] = application_name
8688 # n2vc_redesign STEP 3.3
8689 # Install configuration software. Only for native charms.
8690 step
= "Install configuration Software"
8692 self
._write
_configuration
_status
(
8694 vca_index
=vca_index
,
8695 status
="INSTALLING SW",
8696 element_under_configuration
=element_under_configuration
,
8697 element_type
=element_type
,
8698 # other_update=db_nsr_update,
8702 # TODO check if already done
8703 self
.logger
.debug(logging_text
+ step
)
8705 if vca_type
== "native_charm":
8706 config_primitive
= next(
8707 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
8710 if config_primitive
:
8711 config
= self
._map
_primitive
_params
(
8712 config_primitive
, {}, deploy_params
8714 await self
.vca_map
[vca_type
].install_configuration_sw(
8716 artifact_path
=artifact_path
,
8724 # write in db flag of configuration_sw already installed
8726 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
8729 # Not sure if this need to be done when healing
8731 # add relations for this VCA (wait for other peers related with this VCA)
8732 await self._add_vca_relations(
8733 logging_text=logging_text,
8736 vca_index=vca_index,
8740 # if SSH access is required, then get execution environment SSH public
8741 # if native charm we have waited already to VM be UP
8742 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8745 # self.logger.debug("get ssh key block")
8747 config_descriptor
, ("config-access", "ssh-access", "required")
8749 # self.logger.debug("ssh key needed")
8750 # Needed to inject a ssh key
8753 ("config-access", "ssh-access", "default-user"),
8755 step
= "Install configuration Software, getting public ssh key"
8756 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
8757 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
8760 step
= "Insert public key into VM user={} ssh_key={}".format(
8764 # self.logger.debug("no need to get ssh key")
8765 step
= "Waiting to VM being up and getting IP address"
8766 self
.logger
.debug(logging_text
+ step
)
8768 # n2vc_redesign STEP 5.1
8769 # wait for RO (ip-address) Insert pub_key into VM
8770 # IMPORTANT: We need do wait for RO to complete healing operation.
8771 await self
._wait
_heal
_ro
(nsr_id
, self
.timeout
.ns_heal
)
8774 rw_mgmt_ip
= await self
.wait_kdu_up(
8775 logging_text
, nsr_id
, vnfr_id
, kdu_name
8778 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8788 rw_mgmt_ip
= None # This is for a NS configuration
8790 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
8792 # store rw_mgmt_ip in deploy params for later replacement
8793 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
8796 # get run-day1 operation parameter
8797 runDay1
= deploy_params
.get("run-day1", False)
8799 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id
, vdu_id
, runDay1
)
8802 # n2vc_redesign STEP 6 Execute initial config primitive
8803 step
= "execute initial config primitive"
8805 # wait for dependent primitives execution (NS -> VNF -> VDU)
8806 if initial_config_primitive_list
:
8807 await self
._wait
_dependent
_n
2vc
(
8808 nsr_id
, vca_deployed_list
, vca_index
8811 # stage, in function of element type: vdu, kdu, vnf or ns
8812 my_vca
= vca_deployed_list
[vca_index
]
8813 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
8815 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
8816 elif my_vca
.get("member-vnf-index"):
8818 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
8821 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
8823 self
._write
_configuration
_status
(
8824 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
8827 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
8829 check_if_terminated_needed
= True
8830 for initial_config_primitive
in initial_config_primitive_list
:
8831 # adding information on the vca_deployed if it is a NS execution environment
8832 if not vca_deployed
["member-vnf-index"]:
8833 deploy_params
["ns_config_info"] = json
.dumps(
8834 self
._get
_ns
_config
_info
(nsr_id
)
8836 # TODO check if already done
8837 primitive_params_
= self
._map
_primitive
_params
(
8838 initial_config_primitive
, {}, deploy_params
8841 step
= "execute primitive '{}' params '{}'".format(
8842 initial_config_primitive
["name"], primitive_params_
8844 self
.logger
.debug(logging_text
+ step
)
8845 await self
.vca_map
[vca_type
].exec_primitive(
8847 primitive_name
=initial_config_primitive
["name"],
8848 params_dict
=primitive_params_
,
8853 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8854 if check_if_terminated_needed
:
8855 if config_descriptor
.get("terminate-config-primitive"):
8859 {db_update_entry
+ "needed_terminate": True},
8861 check_if_terminated_needed
= False
8863 # TODO register in database that primitive is done
8865 # STEP 7 Configure metrics
8866 # Not sure if this need to be done when healing
8868 if vca_type == "helm" or vca_type == "helm-v3":
8869 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8871 artifact_path=artifact_path,
8872 ee_config_descriptor=ee_config_descriptor,
8875 target_ip=rw_mgmt_ip,
8881 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8884 for job in prometheus_jobs:
8887 {"job_name": job["job_name"]},
8890 fail_on_empty=False,
8894 step
= "instantiated at VCA"
8895 self
.logger
.debug(logging_text
+ step
)
8897 self
._write
_configuration
_status
(
8898 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
8901 except Exception as e
: # TODO not use Exception but N2VC exception
8902 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8904 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
8907 "Exception while {} : {}".format(step
, e
), exc_info
=True
8909 self
._write
_configuration
_status
(
8910 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
8912 raise LcmException("{} {}".format(step
, e
)) from e
8914 async def _wait_heal_ro(
8920 while time() <= start_time
+ timeout
:
8921 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8922 operational_status_ro
= db_nsr
["_admin"]["deployed"]["RO"][
8923 "operational-status"
8925 self
.logger
.debug("Wait Heal RO > {}".format(operational_status_ro
))
8926 if operational_status_ro
!= "healing":
8928 await asyncio
.sleep(15)
8929 else: # timeout_ns_deploy
8930 raise NgRoException("Timeout waiting ns to deploy")
8932 async def vertical_scale(self
, nsr_id
, nslcmop_id
):
8934 Vertical Scale the VDUs in a NS
8936 :param: nsr_id: NS Instance ID
8937 :param: nslcmop_id: nslcmop ID of migrate
8940 # Try to lock HA task here
8941 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
8942 if not task_is_locked_by_me
:
8944 logging_text
= "Task ns={} vertical scale ".format(nsr_id
)
8945 self
.logger
.debug(logging_text
+ "Enter")
8946 # get all needed from database
8948 db_nslcmop_update
= {}
8949 nslcmop_operation_state
= None
8953 # in case of error, indicates what part of scale was failed to put nsr at error status
8954 start_deploy
= time()
8957 # wait for any previous tasks in process
8958 step
= "Waiting for previous operations to terminate"
8959 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
8961 self
._write
_ns
_status
(
8964 current_operation
="VerticalScale",
8965 current_operation_id
=nslcmop_id
,
8967 step
= "Getting nslcmop from database"
8969 step
+ " after having waited for previous tasks to be completed"
8971 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
8972 operationParams
= db_nslcmop
.get("operationParams")
8974 target
.update(operationParams
)
8975 desc
= await self
.RO
.vertical_scale(nsr_id
, target
)
8976 self
.logger
.debug("RO return > {}".format(desc
))
8977 action_id
= desc
["action_id"]
8978 await self
._wait
_ng
_ro
(
8983 self
.timeout
.verticalscale
,
8984 operation
="verticalscale",
8986 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
8987 self
.logger
.error("Exit Exception {}".format(e
))
8989 except asyncio
.CancelledError
:
8990 self
.logger
.error("Cancelled Exception while '{}'".format(step
))
8991 exc
= "Operation was cancelled"
8992 except Exception as e
:
8993 exc
= traceback
.format_exc()
8994 self
.logger
.critical(
8995 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
8998 self
._write
_ns
_status
(
9001 current_operation
="IDLE",
9002 current_operation_id
=None,
9005 db_nslcmop_update
["detailed-status"] = "FAILED {}: {}".format(step
, exc
)
9006 nslcmop_operation_state
= "FAILED"
9008 nslcmop_operation_state
= "COMPLETED"
9009 db_nslcmop_update
["detailed-status"] = "Done"
9010 db_nsr_update
["detailed-status"] = "Done"
9012 self
._write
_op
_status
(
9016 operation_state
=nslcmop_operation_state
,
9017 other_update
=db_nslcmop_update
,
9019 if nslcmop_operation_state
:
9023 "nslcmop_id": nslcmop_id
,
9024 "operationState": nslcmop_operation_state
,
9026 await self
.msg
.aiowrite("ns", "verticalscaled", msg
)
9027 except Exception as e
:
9029 logging_text
+ "kafka_write notification Exception {}".format(e
)
9031 self
.logger
.debug(logging_text
+ "Exit")
9032 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_verticalscale")