1 # -*- coding: utf-8 -*-
4 # Copyright 2018 Telefonica S.A.
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
21 from typing
import Any
, Dict
, List
24 import logging
.handlers
36 from osm_lcm
import ROclient
37 from osm_lcm
.data_utils
.lcm_config
import LcmCfg
38 from osm_lcm
.data_utils
.nsr
import (
41 get_deployed_vca_list
,
44 from osm_lcm
.data_utils
.vca
import (
53 from osm_lcm
.ng_ro
import NgRoClient
, NgRoException
54 from osm_lcm
.lcm_utils
import (
61 check_juju_bundle_existence
,
62 get_charm_artifact_path
,
66 from osm_lcm
.data_utils
.nsd
import (
67 get_ns_configuration_relation_list
,
71 from osm_lcm
.data_utils
.vnfd
import (
77 get_ee_sorted_initial_config_primitive_list
,
78 get_ee_sorted_terminate_config_primitive_list
,
80 get_virtual_link_profiles
,
85 get_number_of_instances
,
87 get_kdu_resource_profile
,
88 find_software_version
,
91 from osm_lcm
.data_utils
.list_utils
import find_in_list
92 from osm_lcm
.data_utils
.vnfr
import (
96 get_volumes_from_instantiation_params
,
98 from osm_lcm
.data_utils
.dict_utils
import parse_yaml_strings
99 from osm_lcm
.data_utils
.database
.vim_account
import VimAccountDB
100 from n2vc
.definitions
import RelationEndpoint
101 from n2vc
.k8s_helm_conn
import K8sHelmConnector
102 from n2vc
.k8s_helm3_conn
import K8sHelm3Connector
103 from n2vc
.k8s_juju_conn
import K8sJujuConnector
105 from osm_common
.dbbase
import DbException
106 from osm_common
.fsbase
import FsException
108 from osm_lcm
.data_utils
.database
.database
import Database
109 from osm_lcm
.data_utils
.filesystem
.filesystem
import Filesystem
110 from osm_lcm
.data_utils
.wim
import (
112 get_target_wim_attrs
,
113 select_feasible_wim_account
,
116 from n2vc
.n2vc_juju_conn
import N2VCJujuConnector
117 from n2vc
.exceptions
import N2VCException
, N2VCNotFound
, K8sException
119 from osm_lcm
.lcm_helm_conn
import LCMHelmConn
120 from osm_lcm
.osm_config
import OsmConfigBuilder
121 from osm_lcm
.prometheus
import parse_job
123 from copy
import copy
, deepcopy
124 from time
import time
125 from uuid
import uuid4
127 from random
import randint
129 __author__
= "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
132 class NsLcm(LcmBase
):
133 SUBOPERATION_STATUS_NOT_FOUND
= -1
134 SUBOPERATION_STATUS_NEW
= -2
135 SUBOPERATION_STATUS_SKIP
= -3
136 task_name_deploy_vca
= "Deploying VCA"
138 def __init__(self
, msg
, lcm_tasks
, config
: LcmCfg
, loop
):
140 Init, Connect to database, filesystem storage, and messaging
141 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
144 super().__init
__(msg
=msg
, logger
=logging
.getLogger("lcm.ns"))
146 self
.db
= Database().instance
.db
147 self
.fs
= Filesystem().instance
.fs
149 self
.lcm_tasks
= lcm_tasks
150 self
.timeout
= config
.timeout
151 self
.ro_config
= config
.RO
152 self
.vca_config
= config
.VCA
154 # create N2VC connector
155 self
.n2vc
= N2VCJujuConnector(
158 on_update_db
=self
._on
_update
_n
2vc
_db
,
163 self
.conn_helm_ee
= LCMHelmConn(
166 vca_config
=self
.vca_config
,
167 on_update_db
=self
._on
_update
_n
2vc
_db
,
170 self
.k8sclusterhelm2
= K8sHelmConnector(
171 kubectl_command
=self
.vca_config
.kubectlpath
,
172 helm_command
=self
.vca_config
.helmpath
,
179 self
.k8sclusterhelm3
= K8sHelm3Connector(
180 kubectl_command
=self
.vca_config
.kubectlpath
,
181 helm_command
=self
.vca_config
.helm3path
,
188 self
.k8sclusterjuju
= K8sJujuConnector(
189 kubectl_command
=self
.vca_config
.kubectlpath
,
190 juju_command
=self
.vca_config
.jujupath
,
193 on_update_db
=self
._on
_update
_k
8s
_db
,
198 self
.k8scluster_map
= {
199 "helm-chart": self
.k8sclusterhelm2
,
200 "helm-chart-v3": self
.k8sclusterhelm3
,
201 "chart": self
.k8sclusterhelm3
,
202 "juju-bundle": self
.k8sclusterjuju
,
203 "juju": self
.k8sclusterjuju
,
207 "lxc_proxy_charm": self
.n2vc
,
208 "native_charm": self
.n2vc
,
209 "k8s_proxy_charm": self
.n2vc
,
210 "helm": self
.conn_helm_ee
,
211 "helm-v3": self
.conn_helm_ee
,
215 self
.RO
= NgRoClient(self
.loop
, **self
.ro_config
.to_dict())
217 self
.op_status_map
= {
218 "instantiation": self
.RO
.status
,
219 "termination": self
.RO
.status
,
220 "migrate": self
.RO
.status
,
221 "healing": self
.RO
.recreate_status
,
222 "verticalscale": self
.RO
.status
,
223 "start_stop_rebuild": self
.RO
.status
,
227 def increment_ip_mac(ip_mac
, vm_index
=1):
228 if not isinstance(ip_mac
, str):
231 # try with ipv4 look for last dot
232 i
= ip_mac
.rfind(".")
235 return "{}{}".format(ip_mac
[:i
], int(ip_mac
[i
:]) + vm_index
)
236 # try with ipv6 or mac look for last colon. Operate in hex
237 i
= ip_mac
.rfind(":")
240 # format in hex, len can be 2 for mac or 4 for ipv6
241 return ("{}{:0" + str(len(ip_mac
) - i
) + "x}").format(
242 ip_mac
[:i
], int(ip_mac
[i
:], 16) + vm_index
248 def _on_update_ro_db(self
, nsrs_id
, ro_descriptor
):
249 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
252 # TODO filter RO descriptor fields...
256 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
257 db_dict
["deploymentStatus"] = ro_descriptor
258 self
.update_db_2("nsrs", nsrs_id
, db_dict
)
260 except Exception as e
:
262 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id
, e
)
265 async def _on_update_n2vc_db(self
, table
, filter, path
, updated_data
, vca_id
=None):
266 # remove last dot from path (if exists)
267 if path
.endswith("."):
270 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
271 # .format(table, filter, path, updated_data))
273 nsr_id
= filter.get("_id")
275 # read ns record from database
276 nsr
= self
.db
.get_one(table
="nsrs", q_filter
=filter)
277 current_ns_status
= nsr
.get("nsState")
279 # get vca status for NS
280 status_dict
= await self
.n2vc
.get_status(
281 namespace
="." + nsr_id
, yaml_format
=False, vca_id
=vca_id
286 db_dict
["vcaStatus"] = status_dict
288 # update configurationStatus for this VCA
290 vca_index
= int(path
[path
.rfind(".") + 1 :])
293 target_dict
=nsr
, key_list
=("_admin", "deployed", "VCA")
295 vca_status
= vca_list
[vca_index
].get("status")
297 configuration_status_list
= nsr
.get("configurationStatus")
298 config_status
= configuration_status_list
[vca_index
].get("status")
300 if config_status
== "BROKEN" and vca_status
!= "failed":
301 db_dict
["configurationStatus"][vca_index
] = "READY"
302 elif config_status
!= "BROKEN" and vca_status
== "failed":
303 db_dict
["configurationStatus"][vca_index
] = "BROKEN"
304 except Exception as e
:
305 # not update configurationStatus
306 self
.logger
.debug("Error updating vca_index (ignore): {}".format(e
))
308 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
309 # if nsState = 'DEGRADED' check if all is OK
311 if current_ns_status
in ("READY", "DEGRADED"):
312 error_description
= ""
314 if status_dict
.get("machines"):
315 for machine_id
in status_dict
.get("machines"):
316 machine
= status_dict
.get("machines").get(machine_id
)
317 # check machine agent-status
318 if machine
.get("agent-status"):
319 s
= machine
.get("agent-status").get("status")
322 error_description
+= (
323 "machine {} agent-status={} ; ".format(
327 # check machine instance status
328 if machine
.get("instance-status"):
329 s
= machine
.get("instance-status").get("status")
332 error_description
+= (
333 "machine {} instance-status={} ; ".format(
338 if status_dict
.get("applications"):
339 for app_id
in status_dict
.get("applications"):
340 app
= status_dict
.get("applications").get(app_id
)
341 # check application status
342 if app
.get("status"):
343 s
= app
.get("status").get("status")
346 error_description
+= (
347 "application {} status={} ; ".format(app_id
, s
)
350 if error_description
:
351 db_dict
["errorDescription"] = error_description
352 if current_ns_status
== "READY" and is_degraded
:
353 db_dict
["nsState"] = "DEGRADED"
354 if current_ns_status
== "DEGRADED" and not is_degraded
:
355 db_dict
["nsState"] = "READY"
358 self
.update_db_2("nsrs", nsr_id
, db_dict
)
360 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
362 except Exception as e
:
363 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
365 async def _on_update_k8s_db(
366 self
, cluster_uuid
, kdu_instance
, filter=None, vca_id
=None, cluster_type
="juju"
369 Updating vca status in NSR record
370 :param cluster_uuid: UUID of a k8s cluster
371 :param kdu_instance: The unique name of the KDU instance
372 :param filter: To get nsr_id
373 :cluster_type: The cluster type (juju, k8s)
377 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
378 # .format(cluster_uuid, kdu_instance, filter))
380 nsr_id
= filter.get("_id")
382 vca_status
= await self
.k8scluster_map
[cluster_type
].status_kdu(
383 cluster_uuid
=cluster_uuid
,
384 kdu_instance
=kdu_instance
,
386 complete_status
=True,
392 db_dict
["vcaStatus"] = {nsr_id
: vca_status
}
395 f
"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
399 self
.update_db_2("nsrs", nsr_id
, db_dict
)
400 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
402 except Exception as e
:
403 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
406 def _parse_cloud_init(cloud_init_text
, additional_params
, vnfd_id
, vdu_id
):
409 undefined
=StrictUndefined
,
410 autoescape
=select_autoescape(default_for_string
=True, default
=True),
412 template
= env
.from_string(cloud_init_text
)
413 return template
.render(additional_params
or {})
414 except UndefinedError
as e
:
416 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
417 "file, must be provided in the instantiation parameters inside the "
418 "'additionalParamsForVnf/Vdu' block".format(e
, vnfd_id
, vdu_id
)
420 except (TemplateError
, TemplateNotFound
) as e
:
422 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
427 def _get_vdu_cloud_init_content(self
, vdu
, vnfd
):
428 cloud_init_content
= cloud_init_file
= None
430 if vdu
.get("cloud-init-file"):
431 base_folder
= vnfd
["_admin"]["storage"]
432 if base_folder
["pkg-dir"]:
433 cloud_init_file
= "{}/{}/cloud_init/{}".format(
434 base_folder
["folder"],
435 base_folder
["pkg-dir"],
436 vdu
["cloud-init-file"],
439 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
440 base_folder
["folder"],
441 vdu
["cloud-init-file"],
443 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
444 cloud_init_content
= ci_file
.read()
445 elif vdu
.get("cloud-init"):
446 cloud_init_content
= vdu
["cloud-init"]
448 return cloud_init_content
449 except FsException
as e
:
451 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
452 vnfd
["id"], vdu
["id"], cloud_init_file
, e
456 def _get_vdu_additional_params(self
, db_vnfr
, vdu_id
):
458 (vdur
for vdur
in db_vnfr
.get("vdur") if vdu_id
== vdur
["vdu-id-ref"]), {}
460 additional_params
= vdur
.get("additionalParams")
461 return parse_yaml_strings(additional_params
)
463 def vnfd2RO(self
, vnfd
, new_id
=None, additionalParams
=None, nsrId
=None):
465 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
466 :param vnfd: input vnfd
467 :param new_id: overrides vnf id if provided
468 :param additionalParams: Instantiation params for VNFs provided
469 :param nsrId: Id of the NSR
470 :return: copy of vnfd
472 vnfd_RO
= deepcopy(vnfd
)
473 # remove unused by RO configuration, monitoring, scaling and internal keys
474 vnfd_RO
.pop("_id", None)
475 vnfd_RO
.pop("_admin", None)
476 vnfd_RO
.pop("monitoring-param", None)
477 vnfd_RO
.pop("scaling-group-descriptor", None)
478 vnfd_RO
.pop("kdu", None)
479 vnfd_RO
.pop("k8s-cluster", None)
481 vnfd_RO
["id"] = new_id
483 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
484 for vdu
in get_iterable(vnfd_RO
, "vdu"):
485 vdu
.pop("cloud-init-file", None)
486 vdu
.pop("cloud-init", None)
490 def ip_profile_2_RO(ip_profile
):
491 RO_ip_profile
= deepcopy(ip_profile
)
492 if "dns-server" in RO_ip_profile
:
493 if isinstance(RO_ip_profile
["dns-server"], list):
494 RO_ip_profile
["dns-address"] = []
495 for ds
in RO_ip_profile
.pop("dns-server"):
496 RO_ip_profile
["dns-address"].append(ds
["address"])
498 RO_ip_profile
["dns-address"] = RO_ip_profile
.pop("dns-server")
499 if RO_ip_profile
.get("ip-version") == "ipv4":
500 RO_ip_profile
["ip-version"] = "IPv4"
501 if RO_ip_profile
.get("ip-version") == "ipv6":
502 RO_ip_profile
["ip-version"] = "IPv6"
503 if "dhcp-params" in RO_ip_profile
:
504 RO_ip_profile
["dhcp"] = RO_ip_profile
.pop("dhcp-params")
507 def _get_ro_vim_id_for_vim_account(self
, vim_account
):
508 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account
})
509 if db_vim
["_admin"]["operationalState"] != "ENABLED":
511 "VIM={} is not available. operationalState={}".format(
512 vim_account
, db_vim
["_admin"]["operationalState"]
515 RO_vim_id
= db_vim
["_admin"]["deployed"]["RO"]
518 def get_ro_wim_id_for_wim_account(self
, wim_account
):
519 if isinstance(wim_account
, str):
520 db_wim
= self
.db
.get_one("wim_accounts", {"_id": wim_account
})
521 if db_wim
["_admin"]["operationalState"] != "ENABLED":
523 "WIM={} is not available. operationalState={}".format(
524 wim_account
, db_wim
["_admin"]["operationalState"]
527 RO_wim_id
= db_wim
["_admin"]["deployed"]["RO-account"]
532 def scale_vnfr(self
, db_vnfr
, vdu_create
=None, vdu_delete
=None, mark_delete
=False):
533 db_vdu_push_list
= []
535 db_update
= {"_admin.modified": time()}
537 for vdu_id
, vdu_count
in vdu_create
.items():
541 for vdur
in reversed(db_vnfr
["vdur"])
542 if vdur
["vdu-id-ref"] == vdu_id
547 # Read the template saved in the db:
549 "No vdur in the database. Using the vdur-template to scale"
551 vdur_template
= db_vnfr
.get("vdur-template")
552 if not vdur_template
:
554 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
558 vdur
= vdur_template
[0]
559 # Delete a template from the database after using it
562 {"_id": db_vnfr
["_id"]},
564 pull
={"vdur-template": {"_id": vdur
["_id"]}},
566 for count
in range(vdu_count
):
567 vdur_copy
= deepcopy(vdur
)
568 vdur_copy
["status"] = "BUILD"
569 vdur_copy
["status-detailed"] = None
570 vdur_copy
["ip-address"] = None
571 vdur_copy
["_id"] = str(uuid4())
572 vdur_copy
["count-index"] += count
+ 1
573 vdur_copy
["id"] = "{}-{}".format(
574 vdur_copy
["vdu-id-ref"], vdur_copy
["count-index"]
576 vdur_copy
.pop("vim_info", None)
577 for iface
in vdur_copy
["interfaces"]:
578 if iface
.get("fixed-ip"):
579 iface
["ip-address"] = self
.increment_ip_mac(
580 iface
["ip-address"], count
+ 1
583 iface
.pop("ip-address", None)
584 if iface
.get("fixed-mac"):
585 iface
["mac-address"] = self
.increment_ip_mac(
586 iface
["mac-address"], count
+ 1
589 iface
.pop("mac-address", None)
593 ) # only first vdu can be managment of vnf
594 db_vdu_push_list
.append(vdur_copy
)
595 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
597 if len(db_vnfr
["vdur"]) == 1:
598 # The scale will move to 0 instances
600 "Scaling to 0 !, creating the template with the last vdur"
602 template_vdur
= [db_vnfr
["vdur"][0]]
603 for vdu_id
, vdu_count
in vdu_delete
.items():
605 indexes_to_delete
= [
607 for iv
in enumerate(db_vnfr
["vdur"])
608 if iv
[1]["vdu-id-ref"] == vdu_id
612 "vdur.{}.status".format(i
): "DELETING"
613 for i
in indexes_to_delete
[-vdu_count
:]
617 # it must be deleted one by one because common.db does not allow otherwise
620 for v
in reversed(db_vnfr
["vdur"])
621 if v
["vdu-id-ref"] == vdu_id
623 for vdu
in vdus_to_delete
[:vdu_count
]:
626 {"_id": db_vnfr
["_id"]},
628 pull
={"vdur": {"_id": vdu
["_id"]}},
632 db_push
["vdur"] = db_vdu_push_list
634 db_push
["vdur-template"] = template_vdur
637 db_vnfr
["vdur-template"] = template_vdur
638 self
.db
.set_one("vnfrs", {"_id": db_vnfr
["_id"]}, db_update
, push_list
=db_push
)
639 # modify passed dictionary db_vnfr
640 db_vnfr_
= self
.db
.get_one("vnfrs", {"_id": db_vnfr
["_id"]})
641 db_vnfr
["vdur"] = db_vnfr_
["vdur"]
643 def ns_update_nsr(self
, ns_update_nsr
, db_nsr
, nsr_desc_RO
):
645 Updates database nsr with the RO info for the created vld
646 :param ns_update_nsr: dictionary to be filled with the updated info
647 :param db_nsr: content of db_nsr. This is also modified
648 :param nsr_desc_RO: nsr descriptor from RO
649 :return: Nothing, LcmException is raised on errors
652 for vld_index
, vld
in enumerate(get_iterable(db_nsr
, "vld")):
653 for net_RO
in get_iterable(nsr_desc_RO
, "nets"):
654 if vld
["id"] != net_RO
.get("ns_net_osm_id"):
656 vld
["vim-id"] = net_RO
.get("vim_net_id")
657 vld
["name"] = net_RO
.get("vim_name")
658 vld
["status"] = net_RO
.get("status")
659 vld
["status-detailed"] = net_RO
.get("error_msg")
660 ns_update_nsr
["vld.{}".format(vld_index
)] = vld
664 "ns_update_nsr: Not found vld={} at RO info".format(vld
["id"])
667 def set_vnfr_at_error(self
, db_vnfrs
, error_text
):
669 for db_vnfr
in db_vnfrs
.values():
670 vnfr_update
= {"status": "ERROR"}
671 for vdu_index
, vdur
in enumerate(get_iterable(db_vnfr
, "vdur")):
672 if "status" not in vdur
:
673 vdur
["status"] = "ERROR"
674 vnfr_update
["vdur.{}.status".format(vdu_index
)] = "ERROR"
676 vdur
["status-detailed"] = str(error_text
)
678 "vdur.{}.status-detailed".format(vdu_index
)
680 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
681 except DbException
as e
:
682 self
.logger
.error("Cannot update vnf. {}".format(e
))
684 def ns_update_vnfr(self
, db_vnfrs
, nsr_desc_RO
):
686 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
687 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
688 :param nsr_desc_RO: nsr descriptor from RO
689 :return: Nothing, LcmException is raised on errors
691 for vnf_index
, db_vnfr
in db_vnfrs
.items():
692 for vnf_RO
in nsr_desc_RO
["vnfs"]:
693 if vnf_RO
["member_vnf_index"] != vnf_index
:
696 if vnf_RO
.get("ip_address"):
697 db_vnfr
["ip-address"] = vnfr_update
["ip-address"] = vnf_RO
[
700 elif not db_vnfr
.get("ip-address"):
701 if db_vnfr
.get("vdur"): # if not VDUs, there is not ip_address
702 raise LcmExceptionNoMgmtIP(
703 "ns member_vnf_index '{}' has no IP address".format(
708 for vdu_index
, vdur
in enumerate(get_iterable(db_vnfr
, "vdur")):
709 vdur_RO_count_index
= 0
710 if vdur
.get("pdu-type"):
712 for vdur_RO
in get_iterable(vnf_RO
, "vms"):
713 if vdur
["vdu-id-ref"] != vdur_RO
["vdu_osm_id"]:
715 if vdur
["count-index"] != vdur_RO_count_index
:
716 vdur_RO_count_index
+= 1
718 vdur
["vim-id"] = vdur_RO
.get("vim_vm_id")
719 if vdur_RO
.get("ip_address"):
720 vdur
["ip-address"] = vdur_RO
["ip_address"].split(";")[0]
722 vdur
["ip-address"] = None
723 vdur
["vdu-id-ref"] = vdur_RO
.get("vdu_osm_id")
724 vdur
["name"] = vdur_RO
.get("vim_name")
725 vdur
["status"] = vdur_RO
.get("status")
726 vdur
["status-detailed"] = vdur_RO
.get("error_msg")
727 for ifacer
in get_iterable(vdur
, "interfaces"):
728 for interface_RO
in get_iterable(vdur_RO
, "interfaces"):
729 if ifacer
["name"] == interface_RO
.get("internal_name"):
730 ifacer
["ip-address"] = interface_RO
.get(
733 ifacer
["mac-address"] = interface_RO
.get(
739 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
740 "from VIM info".format(
741 vnf_index
, vdur
["vdu-id-ref"], ifacer
["name"]
744 vnfr_update
["vdur.{}".format(vdu_index
)] = vdur
748 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
750 vnf_index
, vdur
["vdu-id-ref"], vdur
["count-index"]
754 for vld_index
, vld
in enumerate(get_iterable(db_vnfr
, "vld")):
755 for net_RO
in get_iterable(nsr_desc_RO
, "nets"):
756 if vld
["id"] != net_RO
.get("vnf_net_osm_id"):
758 vld
["vim-id"] = net_RO
.get("vim_net_id")
759 vld
["name"] = net_RO
.get("vim_name")
760 vld
["status"] = net_RO
.get("status")
761 vld
["status-detailed"] = net_RO
.get("error_msg")
762 vnfr_update
["vld.{}".format(vld_index
)] = vld
766 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
771 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
776 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
781 def _get_ns_config_info(self
, nsr_id
):
783 Generates a mapping between vnf,vdu elements and the N2VC id
784 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
785 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
786 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
787 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
789 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
790 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
792 ns_config_info
= {"osm-config-mapping": mapping
}
793 for vca
in vca_deployed_list
:
794 if not vca
["member-vnf-index"]:
796 if not vca
["vdu_id"]:
797 mapping
[vca
["member-vnf-index"]] = vca
["application"]
801 vca
["member-vnf-index"], vca
["vdu_id"], vca
["vdu_count_index"]
803 ] = vca
["application"]
804 return ns_config_info
806 async def _instantiate_ng_ro(
822 def get_vim_account(vim_account_id
):
824 if vim_account_id
in db_vims
:
825 return db_vims
[vim_account_id
]
826 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
827 db_vims
[vim_account_id
] = db_vim
830 # modify target_vld info with instantiation parameters
831 def parse_vld_instantiation_params(
832 target_vim
, target_vld
, vld_params
, target_sdn
834 if vld_params
.get("ip-profile"):
835 target_vld
["vim_info"][target_vim
]["ip_profile"] = vld_to_ro_ip_profile(
836 vld_params
["ip-profile"]
838 if vld_params
.get("provider-network"):
839 target_vld
["vim_info"][target_vim
]["provider_network"] = vld_params
[
842 if "sdn-ports" in vld_params
["provider-network"] and target_sdn
:
843 target_vld
["vim_info"][target_sdn
]["sdn-ports"] = vld_params
[
847 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
848 # if wim_account_id is specified in vld_params, validate if it is feasible.
849 wim_account_id
, db_wim
= select_feasible_wim_account(
850 db_nsr
, db_vnfrs
, target_vld
, vld_params
, self
.logger
854 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
855 self
.logger
.info("WIM selected: {:s}".format(str(wim_account_id
)))
856 # update vld_params with correct WIM account Id
857 vld_params
["wimAccountId"] = wim_account_id
859 target_wim
= "wim:{}".format(wim_account_id
)
860 target_wim_attrs
= get_target_wim_attrs(nsr_id
, target_vld
, vld_params
)
861 sdn_ports
= get_sdn_ports(vld_params
, db_wim
)
862 if len(sdn_ports
) > 0:
863 target_vld
["vim_info"][target_wim
] = target_wim_attrs
864 target_vld
["vim_info"][target_wim
]["sdn-ports"] = sdn_ports
867 "Target VLD with WIM data: {:s}".format(str(target_vld
))
870 for param
in ("vim-network-name", "vim-network-id"):
871 if vld_params
.get(param
):
872 if isinstance(vld_params
[param
], dict):
873 for vim
, vim_net
in vld_params
[param
].items():
874 other_target_vim
= "vim:" + vim
876 target_vld
["vim_info"],
877 (other_target_vim
, param
.replace("-", "_")),
880 else: # isinstance str
881 target_vld
["vim_info"][target_vim
][
882 param
.replace("-", "_")
883 ] = vld_params
[param
]
884 if vld_params
.get("common_id"):
885 target_vld
["common_id"] = vld_params
.get("common_id")
887 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
888 def update_ns_vld_target(target
, ns_params
):
889 for vnf_params
in ns_params
.get("vnf", ()):
890 if vnf_params
.get("vimAccountId"):
894 for vnfr
in db_vnfrs
.values()
895 if vnf_params
["member-vnf-index"]
896 == vnfr
["member-vnf-index-ref"]
900 vdur
= next((vdur
for vdur
in target_vnf
.get("vdur", ())), None)
903 for a_index
, a_vld
in enumerate(target
["ns"]["vld"]):
904 target_vld
= find_in_list(
905 get_iterable(vdur
, "interfaces"),
906 lambda iface
: iface
.get("ns-vld-id") == a_vld
["name"],
909 vld_params
= find_in_list(
910 get_iterable(ns_params
, "vld"),
911 lambda v_vld
: v_vld
["name"] in (a_vld
["name"], a_vld
["id"]),
914 if vnf_params
.get("vimAccountId") not in a_vld
.get(
917 target_vim_network_list
= [
918 v
for _
, v
in a_vld
.get("vim_info").items()
920 target_vim_network_name
= next(
922 item
.get("vim_network_name", "")
923 for item
in target_vim_network_list
928 target
["ns"]["vld"][a_index
].get("vim_info").update(
930 "vim:{}".format(vnf_params
["vimAccountId"]): {
931 "vim_network_name": target_vim_network_name
,
937 for param
in ("vim-network-name", "vim-network-id"):
938 if vld_params
.get(param
) and isinstance(
939 vld_params
[param
], dict
941 for vim
, vim_net
in vld_params
[
944 other_target_vim
= "vim:" + vim
946 target
["ns"]["vld"][a_index
].get(
951 param
.replace("-", "_"),
956 nslcmop_id
= db_nslcmop
["_id"]
958 "name": db_nsr
["name"],
961 "image": deepcopy(db_nsr
["image"]),
962 "flavor": deepcopy(db_nsr
["flavor"]),
963 "action_id": nslcmop_id
,
964 "cloud_init_content": {},
966 for image
in target
["image"]:
967 image
["vim_info"] = {}
968 for flavor
in target
["flavor"]:
969 flavor
["vim_info"] = {}
970 if db_nsr
.get("affinity-or-anti-affinity-group"):
971 target
["affinity-or-anti-affinity-group"] = deepcopy(
972 db_nsr
["affinity-or-anti-affinity-group"]
974 for affinity_or_anti_affinity_group
in target
[
975 "affinity-or-anti-affinity-group"
977 affinity_or_anti_affinity_group
["vim_info"] = {}
979 if db_nslcmop
.get("lcmOperationType") != "instantiate":
980 # get parameters of instantiation:
981 db_nslcmop_instantiate
= self
.db
.get_list(
984 "nsInstanceId": db_nslcmop
["nsInstanceId"],
985 "lcmOperationType": "instantiate",
988 ns_params
= db_nslcmop_instantiate
.get("operationParams")
990 ns_params
= db_nslcmop
.get("operationParams")
991 ssh_keys_instantiation
= ns_params
.get("ssh_keys") or []
992 ssh_keys_all
= ssh_keys_instantiation
+ (n2vc_key_list
or [])
995 for vld_index
, vld
in enumerate(db_nsr
.get("vld")):
996 target_vim
= "vim:{}".format(ns_params
["vimAccountId"])
1000 "mgmt-network": vld
.get("mgmt-network", False),
1001 "type": vld
.get("type"),
1004 "vim_network_name": vld
.get("vim-network-name"),
1005 "vim_account_id": ns_params
["vimAccountId"],
1009 # check if this network needs SDN assist
1010 if vld
.get("pci-interfaces"):
1011 db_vim
= get_vim_account(ns_params
["vimAccountId"])
1012 if vim_config
:= db_vim
.get("config"):
1013 if sdnc_id
:= vim_config
.get("sdn-controller"):
1014 sdn_vld
= "nsrs:{}:vld.{}".format(nsr_id
, vld
["id"])
1015 target_sdn
= "sdn:{}".format(sdnc_id
)
1016 target_vld
["vim_info"][target_sdn
] = {
1018 "target_vim": target_vim
,
1020 "type": vld
.get("type"),
1023 nsd_vnf_profiles
= get_vnf_profiles(nsd
)
1024 for nsd_vnf_profile
in nsd_vnf_profiles
:
1025 for cp
in nsd_vnf_profile
["virtual-link-connectivity"]:
1026 if cp
["virtual-link-profile-id"] == vld
["id"]:
1028 "member_vnf:{}.{}".format(
1029 cp
["constituent-cpd-id"][0][
1030 "constituent-base-element-id"
1032 cp
["constituent-cpd-id"][0]["constituent-cpd-id"],
1034 ] = "nsrs:{}:vld.{}".format(nsr_id
, vld_index
)
1036 # check at nsd descriptor, if there is an ip-profile
1038 nsd_vlp
= find_in_list(
1039 get_virtual_link_profiles(nsd
),
1040 lambda a_link_profile
: a_link_profile
["virtual-link-desc-id"]
1045 and nsd_vlp
.get("virtual-link-protocol-data")
1046 and nsd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
1048 vld_params
["ip-profile"] = nsd_vlp
["virtual-link-protocol-data"][
1052 # update vld_params with instantiation params
1053 vld_instantiation_params
= find_in_list(
1054 get_iterable(ns_params
, "vld"),
1055 lambda a_vld
: a_vld
["name"] in (vld
["name"], vld
["id"]),
1057 if vld_instantiation_params
:
1058 vld_params
.update(vld_instantiation_params
)
1059 parse_vld_instantiation_params(target_vim
, target_vld
, vld_params
, None)
1060 target
["ns"]["vld"].append(target_vld
)
1061 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1062 update_ns_vld_target(target
, ns_params
)
1064 for vnfr
in db_vnfrs
.values():
1065 vnfd
= find_in_list(
1066 db_vnfds
, lambda db_vnf
: db_vnf
["id"] == vnfr
["vnfd-ref"]
1068 vnf_params
= find_in_list(
1069 get_iterable(ns_params
, "vnf"),
1070 lambda a_vnf
: a_vnf
["member-vnf-index"] == vnfr
["member-vnf-index-ref"],
1072 target_vnf
= deepcopy(vnfr
)
1073 target_vim
= "vim:{}".format(vnfr
["vim-account-id"])
1074 for vld
in target_vnf
.get("vld", ()):
1075 # check if connected to a ns.vld, to fill target'
1076 vnf_cp
= find_in_list(
1077 vnfd
.get("int-virtual-link-desc", ()),
1078 lambda cpd
: cpd
.get("id") == vld
["id"],
1081 ns_cp
= "member_vnf:{}.{}".format(
1082 vnfr
["member-vnf-index-ref"], vnf_cp
["id"]
1084 if cp2target
.get(ns_cp
):
1085 vld
["target"] = cp2target
[ns_cp
]
1088 target_vim
: {"vim_network_name": vld
.get("vim-network-name")}
1090 # check if this network needs SDN assist
1092 if vld
.get("pci-interfaces"):
1093 db_vim
= get_vim_account(vnfr
["vim-account-id"])
1094 sdnc_id
= db_vim
["config"].get("sdn-controller")
1096 sdn_vld
= "vnfrs:{}:vld.{}".format(target_vnf
["_id"], vld
["id"])
1097 target_sdn
= "sdn:{}".format(sdnc_id
)
1098 vld
["vim_info"][target_sdn
] = {
1100 "target_vim": target_vim
,
1102 "type": vld
.get("type"),
1105 # check at vnfd descriptor, if there is an ip-profile
1107 vnfd_vlp
= find_in_list(
1108 get_virtual_link_profiles(vnfd
),
1109 lambda a_link_profile
: a_link_profile
["id"] == vld
["id"],
1113 and vnfd_vlp
.get("virtual-link-protocol-data")
1114 and vnfd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
1116 vld_params
["ip-profile"] = vnfd_vlp
["virtual-link-protocol-data"][
1119 # update vld_params with instantiation params
1121 vld_instantiation_params
= find_in_list(
1122 get_iterable(vnf_params
, "internal-vld"),
1123 lambda i_vld
: i_vld
["name"] == vld
["id"],
1125 if vld_instantiation_params
:
1126 vld_params
.update(vld_instantiation_params
)
1127 parse_vld_instantiation_params(target_vim
, vld
, vld_params
, target_sdn
)
1130 for vdur
in target_vnf
.get("vdur", ()):
1131 if vdur
.get("status") == "DELETING" or vdur
.get("pdu-type"):
1132 continue # This vdu must not be created
1133 vdur
["vim_info"] = {"vim_account_id": vnfr
["vim-account-id"]}
1135 self
.logger
.debug("NS > ssh_keys > {}".format(ssh_keys_all
))
1138 vdu_configuration
= get_configuration(vnfd
, vdur
["vdu-id-ref"])
1139 vnf_configuration
= get_configuration(vnfd
, vnfd
["id"])
1142 and vdu_configuration
.get("config-access")
1143 and vdu_configuration
.get("config-access").get("ssh-access")
1145 vdur
["ssh-keys"] = ssh_keys_all
1146 vdur
["ssh-access-required"] = vdu_configuration
[
1148 ]["ssh-access"]["required"]
1151 and vnf_configuration
.get("config-access")
1152 and vnf_configuration
.get("config-access").get("ssh-access")
1153 and any(iface
.get("mgmt-vnf") for iface
in vdur
["interfaces"])
1155 vdur
["ssh-keys"] = ssh_keys_all
1156 vdur
["ssh-access-required"] = vnf_configuration
[
1158 ]["ssh-access"]["required"]
1159 elif ssh_keys_instantiation
and find_in_list(
1160 vdur
["interfaces"], lambda iface
: iface
.get("mgmt-vnf")
1162 vdur
["ssh-keys"] = ssh_keys_instantiation
1164 self
.logger
.debug("NS > vdur > {}".format(vdur
))
1166 vdud
= get_vdu(vnfd
, vdur
["vdu-id-ref"])
1168 if vdud
.get("cloud-init-file"):
1169 vdur
["cloud-init"] = "{}:file:{}".format(
1170 vnfd
["_id"], vdud
.get("cloud-init-file")
1172 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1173 if vdur
["cloud-init"] not in target
["cloud_init_content"]:
1174 base_folder
= vnfd
["_admin"]["storage"]
1175 if base_folder
["pkg-dir"]:
1176 cloud_init_file
= "{}/{}/cloud_init/{}".format(
1177 base_folder
["folder"],
1178 base_folder
["pkg-dir"],
1179 vdud
.get("cloud-init-file"),
1182 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
1183 base_folder
["folder"],
1184 vdud
.get("cloud-init-file"),
1186 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
1187 target
["cloud_init_content"][
1190 elif vdud
.get("cloud-init"):
1191 vdur
["cloud-init"] = "{}:vdu:{}".format(
1192 vnfd
["_id"], get_vdu_index(vnfd
, vdur
["vdu-id-ref"])
1194 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1195 target
["cloud_init_content"][vdur
["cloud-init"]] = vdud
[
1198 vdur
["additionalParams"] = vdur
.get("additionalParams") or {}
1199 deploy_params_vdu
= self
._format
_additional
_params
(
1200 vdur
.get("additionalParams") or {}
1202 deploy_params_vdu
["OSM"] = get_osm_params(
1203 vnfr
, vdur
["vdu-id-ref"], vdur
["count-index"]
1205 vdur
["additionalParams"] = deploy_params_vdu
1208 ns_flavor
= target
["flavor"][int(vdur
["ns-flavor-id"])]
1209 if target_vim
not in ns_flavor
["vim_info"]:
1210 ns_flavor
["vim_info"][target_vim
] = {}
1213 # in case alternative images are provided we must check if they should be applied
1214 # for the vim_type, modify the vim_type taking into account
1215 ns_image_id
= int(vdur
["ns-image-id"])
1216 if vdur
.get("alt-image-ids"):
1217 db_vim
= get_vim_account(vnfr
["vim-account-id"])
1218 vim_type
= db_vim
["vim_type"]
1219 for alt_image_id
in vdur
.get("alt-image-ids"):
1220 ns_alt_image
= target
["image"][int(alt_image_id
)]
1221 if vim_type
== ns_alt_image
.get("vim-type"):
1222 # must use alternative image
1224 "use alternative image id: {}".format(alt_image_id
)
1226 ns_image_id
= alt_image_id
1227 vdur
["ns-image-id"] = ns_image_id
1229 ns_image
= target
["image"][int(ns_image_id
)]
1230 if target_vim
not in ns_image
["vim_info"]:
1231 ns_image
["vim_info"][target_vim
] = {}
1234 if vdur
.get("affinity-or-anti-affinity-group-id"):
1235 for ags_id
in vdur
["affinity-or-anti-affinity-group-id"]:
1236 ns_ags
= target
["affinity-or-anti-affinity-group"][int(ags_id
)]
1237 if target_vim
not in ns_ags
["vim_info"]:
1238 ns_ags
["vim_info"][target_vim
] = {}
1240 vdur
["vim_info"] = {target_vim
: {}}
1241 # instantiation parameters
1243 vdu_instantiation_params
= find_in_list(
1244 get_iterable(vnf_params
, "vdu"),
1245 lambda i_vdu
: i_vdu
["id"] == vdud
["id"],
1247 if vdu_instantiation_params
:
1248 # Parse the vdu_volumes from the instantiation params
1249 vdu_volumes
= get_volumes_from_instantiation_params(
1250 vdu_instantiation_params
, vdud
1252 vdur
["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1253 vdur
["additionalParams"]["OSM"][
1255 ] = vdu_instantiation_params
.get("vim-flavor-id")
1256 vdur_list
.append(vdur
)
1257 target_vnf
["vdur"] = vdur_list
1258 target
["vnf"].append(target_vnf
)
1260 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
1261 desc
= await self
.RO
.deploy(nsr_id
, target
)
1262 self
.logger
.debug("RO return > {}".format(desc
))
1263 action_id
= desc
["action_id"]
1264 await self
._wait
_ng
_ro
(
1271 operation
="instantiation",
1276 "_admin.deployed.RO.operational-status": "running",
1277 "detailed-status": " ".join(stage
),
1279 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1280 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1281 self
._write
_op
_status
(nslcmop_id
, stage
)
1283 logging_text
+ "ns deployed at RO. RO_id={}".format(action_id
)
1287 async def _wait_ng_ro(
1297 detailed_status_old
= None
1299 start_time
= start_time
or time()
1300 while time() <= start_time
+ timeout
:
1301 desc_status
= await self
.op_status_map
[operation
](nsr_id
, action_id
)
1302 self
.logger
.debug("Wait NG RO > {}".format(desc_status
))
1303 if desc_status
["status"] == "FAILED":
1304 raise NgRoException(desc_status
["details"])
1305 elif desc_status
["status"] == "BUILD":
1307 stage
[2] = "VIM: ({})".format(desc_status
["details"])
1308 elif desc_status
["status"] == "DONE":
1310 stage
[2] = "Deployed at VIM"
1313 assert False, "ROclient.check_ns_status returns unknown {}".format(
1314 desc_status
["status"]
1316 if stage
and nslcmop_id
and stage
[2] != detailed_status_old
:
1317 detailed_status_old
= stage
[2]
1318 db_nsr_update
["detailed-status"] = " ".join(stage
)
1319 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1320 self
._write
_op
_status
(nslcmop_id
, stage
)
1321 await asyncio
.sleep(15, loop
=self
.loop
)
1322 else: # timeout_ns_deploy
1323 raise NgRoException("Timeout waiting ns to deploy")
1325 async def _terminate_ng_ro(
1326 self
, logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
1331 start_deploy
= time()
1338 "action_id": nslcmop_id
,
1340 desc
= await self
.RO
.deploy(nsr_id
, target
)
1341 action_id
= desc
["action_id"]
1342 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETING"
1345 + "ns terminate action at RO. action_id={}".format(action_id
)
1349 delete_timeout
= 20 * 60 # 20 minutes
1350 await self
._wait
_ng
_ro
(
1357 operation
="termination",
1359 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1361 await self
.RO
.delete(nsr_id
)
1362 except NgRoException
as e
:
1363 if e
.http_code
== 404: # not found
1364 db_nsr_update
["_admin.deployed.RO.nsr_id"] = None
1365 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1367 logging_text
+ "RO_action_id={} already deleted".format(action_id
)
1369 elif e
.http_code
== 409: # conflict
1370 failed_detail
.append("delete conflict: {}".format(e
))
1373 + "RO_action_id={} delete conflict: {}".format(action_id
, e
)
1376 failed_detail
.append("delete error: {}".format(e
))
1379 + "RO_action_id={} delete error: {}".format(action_id
, e
)
1381 except Exception as e
:
1382 failed_detail
.append("delete error: {}".format(e
))
1384 logging_text
+ "RO_action_id={} delete error: {}".format(action_id
, e
)
1388 stage
[2] = "Error deleting from VIM"
1390 stage
[2] = "Deleted from VIM"
1391 db_nsr_update
["detailed-status"] = " ".join(stage
)
1392 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1393 self
._write
_op
_status
(nslcmop_id
, stage
)
1396 raise LcmException("; ".join(failed_detail
))
1399 async def instantiate_RO(
1413 :param logging_text: preffix text to use at logging
1414 :param nsr_id: nsr identity
1415 :param nsd: database content of ns descriptor
1416 :param db_nsr: database content of ns record
1417 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1419 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1420 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1421 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1422 :return: None or exception
1425 start_deploy
= time()
1426 ns_params
= db_nslcmop
.get("operationParams")
1427 if ns_params
and ns_params
.get("timeout_ns_deploy"):
1428 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
1430 timeout_ns_deploy
= self
.timeout
.ns_deploy
1432 # Check for and optionally request placement optimization. Database will be updated if placement activated
1433 stage
[2] = "Waiting for Placement."
1434 if await self
._do
_placement
(logging_text
, db_nslcmop
, db_vnfrs
):
1435 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1436 for vnfr
in db_vnfrs
.values():
1437 if ns_params
["vimAccountId"] == vnfr
["vim-account-id"]:
1440 ns_params
["vimAccountId"] == vnfr
["vim-account-id"]
1442 return await self
._instantiate
_ng
_ro
(
1455 except Exception as e
:
1456 stage
[2] = "ERROR deploying at VIM"
1457 self
.set_vnfr_at_error(db_vnfrs
, str(e
))
1459 "Error deploying at VIM {}".format(e
),
1460 exc_info
=not isinstance(
1463 ROclient
.ROClientException
,
1472 async def wait_kdu_up(self
, logging_text
, nsr_id
, vnfr_id
, kdu_name
):
1474 Wait for kdu to be up, get ip address
1475 :param logging_text: prefix use for logging
1479 :return: IP address, K8s services
1482 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1485 while nb_tries
< 360:
1486 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1490 for x
in get_iterable(db_vnfr
, "kdur")
1491 if x
.get("kdu-name") == kdu_name
1497 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id
, kdu_name
)
1499 if kdur
.get("status"):
1500 if kdur
["status"] in ("READY", "ENABLED"):
1501 return kdur
.get("ip-address"), kdur
.get("services")
1504 "target KDU={} is in error state".format(kdu_name
)
1507 await asyncio
.sleep(10, loop
=self
.loop
)
1509 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name
))
1511 async def wait_vm_up_insert_key_ro(
1512 self
, logging_text
, nsr_id
, vnfr_id
, vdu_id
, vdu_index
, pub_key
=None, user
=None
1515 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1516 :param logging_text: prefix use for logging
1521 :param pub_key: public ssh key to inject, None to skip
1522 :param user: user to apply the public ssh key
1526 self
.logger
.debug(logging_text
+ "Starting wait_vm_up_insert_key_ro")
1528 target_vdu_id
= None
1533 if ro_retries
>= 360: # 1 hour
1535 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id
)
1538 await asyncio
.sleep(10, loop
=self
.loop
)
1541 if not target_vdu_id
:
1542 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1544 if not vdu_id
: # for the VNF case
1545 if db_vnfr
.get("status") == "ERROR":
1547 "Cannot inject ssh-key because target VNF is in error state"
1549 ip_address
= db_vnfr
.get("ip-address")
1555 for x
in get_iterable(db_vnfr
, "vdur")
1556 if x
.get("ip-address") == ip_address
1564 for x
in get_iterable(db_vnfr
, "vdur")
1565 if x
.get("vdu-id-ref") == vdu_id
1566 and x
.get("count-index") == vdu_index
1572 not vdur
and len(db_vnfr
.get("vdur", ())) == 1
1573 ): # If only one, this should be the target vdu
1574 vdur
= db_vnfr
["vdur"][0]
1577 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1578 vnfr_id
, vdu_id
, vdu_index
1581 # New generation RO stores information at "vim_info"
1584 if vdur
.get("vim_info"):
1586 t
for t
in vdur
["vim_info"]
1587 ) # there should be only one key
1588 ng_ro_status
= vdur
["vim_info"][target_vim
].get("vim_status")
1590 vdur
.get("pdu-type")
1591 or vdur
.get("status") == "ACTIVE"
1592 or ng_ro_status
== "ACTIVE"
1594 ip_address
= vdur
.get("ip-address")
1597 target_vdu_id
= vdur
["vdu-id-ref"]
1598 elif vdur
.get("status") == "ERROR" or ng_ro_status
== "ERROR":
1600 "Cannot inject ssh-key because target VM is in error state"
1603 if not target_vdu_id
:
1606 # inject public key into machine
1607 if pub_key
and user
:
1608 self
.logger
.debug(logging_text
+ "Inserting RO key")
1609 self
.logger
.debug("SSH > PubKey > {}".format(pub_key
))
1610 if vdur
.get("pdu-type"):
1611 self
.logger
.error(logging_text
+ "Cannot inject ssh-ky to a PDU")
1616 "action": "inject_ssh_key",
1620 "vnf": [{"_id": vnfr_id
, "vdur": [{"id": vdur
["id"]}]}],
1622 desc
= await self
.RO
.deploy(nsr_id
, target
)
1623 action_id
= desc
["action_id"]
1624 await self
._wait
_ng
_ro
(
1625 nsr_id
, action_id
, timeout
=600, operation
="instantiation"
1628 except NgRoException
as e
:
1630 "Reaching max tries injecting key. Error: {}".format(e
)
1637 async def _wait_dependent_n2vc(self
, nsr_id
, vca_deployed_list
, vca_index
):
1639 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1641 my_vca
= vca_deployed_list
[vca_index
]
1642 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
1643 # vdu or kdu: no dependencies
1647 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
1648 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1649 configuration_status_list
= db_nsr
["configurationStatus"]
1650 for index
, vca_deployed
in enumerate(configuration_status_list
):
1651 if index
== vca_index
:
1654 if not my_vca
.get("member-vnf-index") or (
1655 vca_deployed
.get("member-vnf-index")
1656 == my_vca
.get("member-vnf-index")
1658 internal_status
= configuration_status_list
[index
].get("status")
1659 if internal_status
== "READY":
1661 elif internal_status
== "BROKEN":
1663 "Configuration aborted because dependent charm/s has failed"
1668 # no dependencies, return
1670 await asyncio
.sleep(10)
1673 raise LcmException("Configuration aborted because dependent charm/s timeout")
1675 def get_vca_id(self
, db_vnfr
: dict, db_nsr
: dict):
1678 vca_id
= deep_get(db_vnfr
, ("vca-id",))
1680 vim_account_id
= deep_get(db_nsr
, ("instantiate_params", "vimAccountId"))
1681 vca_id
= VimAccountDB
.get_vim_account_with_id(vim_account_id
).get("vca")
1684 async def instantiate_N2VC(
1702 ee_config_descriptor
,
1704 nsr_id
= db_nsr
["_id"]
1705 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
1706 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1707 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
1708 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
1710 "collection": "nsrs",
1711 "filter": {"_id": nsr_id
},
1712 "path": db_update_entry
,
1717 element_under_configuration
= nsr_id
1721 vnfr_id
= db_vnfr
["_id"]
1722 osm_config
["osm"]["vnf_id"] = vnfr_id
1724 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
1726 if vca_type
== "native_charm":
1729 index_number
= vdu_index
or 0
1732 element_type
= "VNF"
1733 element_under_configuration
= vnfr_id
1734 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
1736 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
1737 element_type
= "VDU"
1738 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
1739 osm_config
["osm"]["vdu_id"] = vdu_id
1741 namespace
+= ".{}".format(kdu_name
)
1742 element_type
= "KDU"
1743 element_under_configuration
= kdu_name
1744 osm_config
["osm"]["kdu_name"] = kdu_name
1747 if base_folder
["pkg-dir"]:
1748 artifact_path
= "{}/{}/{}/{}".format(
1749 base_folder
["folder"],
1750 base_folder
["pkg-dir"],
1753 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1758 artifact_path
= "{}/Scripts/{}/{}/".format(
1759 base_folder
["folder"],
1762 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1767 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
1769 # get initial_config_primitive_list that applies to this element
1770 initial_config_primitive_list
= config_descriptor
.get(
1771 "initial-config-primitive"
1775 "Initial config primitive list > {}".format(
1776 initial_config_primitive_list
1780 # add config if not present for NS charm
1781 ee_descriptor_id
= ee_config_descriptor
.get("id")
1782 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
1783 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
1784 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
1788 "Initial config primitive list #2 > {}".format(
1789 initial_config_primitive_list
1792 # n2vc_redesign STEP 3.1
1793 # find old ee_id if exists
1794 ee_id
= vca_deployed
.get("ee_id")
1796 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
1797 # create or register execution environment in VCA
1798 if vca_type
in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1799 self
._write
_configuration
_status
(
1801 vca_index
=vca_index
,
1803 element_under_configuration
=element_under_configuration
,
1804 element_type
=element_type
,
1807 step
= "create execution environment"
1808 self
.logger
.debug(logging_text
+ step
)
1812 if vca_type
== "k8s_proxy_charm":
1813 ee_id
= await self
.vca_map
[vca_type
].install_k8s_proxy_charm(
1814 charm_name
=artifact_path
[artifact_path
.rfind("/") + 1 :],
1815 namespace
=namespace
,
1816 artifact_path
=artifact_path
,
1820 elif vca_type
== "helm" or vca_type
== "helm-v3":
1821 ee_id
, credentials
= await self
.vca_map
[
1823 ].create_execution_environment(
1824 namespace
=namespace
,
1828 artifact_path
=artifact_path
,
1829 chart_model
=vca_name
,
1833 ee_id
, credentials
= await self
.vca_map
[
1835 ].create_execution_environment(
1836 namespace
=namespace
,
1842 elif vca_type
== "native_charm":
1843 step
= "Waiting to VM being up and getting IP address"
1844 self
.logger
.debug(logging_text
+ step
)
1845 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
1854 credentials
= {"hostname": rw_mgmt_ip
}
1856 username
= deep_get(
1857 config_descriptor
, ("config-access", "ssh-access", "default-user")
1859 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1860 # merged. Meanwhile let's get username from initial-config-primitive
1861 if not username
and initial_config_primitive_list
:
1862 for config_primitive
in initial_config_primitive_list
:
1863 for param
in config_primitive
.get("parameter", ()):
1864 if param
["name"] == "ssh-username":
1865 username
= param
["value"]
1869 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1870 "'config-access.ssh-access.default-user'"
1872 credentials
["username"] = username
1873 # n2vc_redesign STEP 3.2
1875 self
._write
_configuration
_status
(
1877 vca_index
=vca_index
,
1878 status
="REGISTERING",
1879 element_under_configuration
=element_under_configuration
,
1880 element_type
=element_type
,
1883 step
= "register execution environment {}".format(credentials
)
1884 self
.logger
.debug(logging_text
+ step
)
1885 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
1886 credentials
=credentials
,
1887 namespace
=namespace
,
1892 # for compatibility with MON/POL modules, the need model and application name at database
1893 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1894 ee_id_parts
= ee_id
.split(".")
1895 db_nsr_update
= {db_update_entry
+ "ee_id": ee_id
}
1896 if len(ee_id_parts
) >= 2:
1897 model_name
= ee_id_parts
[0]
1898 application_name
= ee_id_parts
[1]
1899 db_nsr_update
[db_update_entry
+ "model"] = model_name
1900 db_nsr_update
[db_update_entry
+ "application"] = application_name
1902 # n2vc_redesign STEP 3.3
1903 step
= "Install configuration Software"
1905 self
._write
_configuration
_status
(
1907 vca_index
=vca_index
,
1908 status
="INSTALLING SW",
1909 element_under_configuration
=element_under_configuration
,
1910 element_type
=element_type
,
1911 other_update
=db_nsr_update
,
1914 # TODO check if already done
1915 self
.logger
.debug(logging_text
+ step
)
1917 if vca_type
== "native_charm":
1918 config_primitive
= next(
1919 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
1922 if config_primitive
:
1923 config
= self
._map
_primitive
_params
(
1924 config_primitive
, {}, deploy_params
1927 if vca_type
== "lxc_proxy_charm":
1928 if element_type
== "NS":
1929 num_units
= db_nsr
.get("config-units") or 1
1930 elif element_type
== "VNF":
1931 num_units
= db_vnfr
.get("config-units") or 1
1932 elif element_type
== "VDU":
1933 for v
in db_vnfr
["vdur"]:
1934 if vdu_id
== v
["vdu-id-ref"]:
1935 num_units
= v
.get("config-units") or 1
1937 if vca_type
!= "k8s_proxy_charm":
1938 await self
.vca_map
[vca_type
].install_configuration_sw(
1940 artifact_path
=artifact_path
,
1943 num_units
=num_units
,
1948 # write in db flag of configuration_sw already installed
1950 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
1953 # add relations for this VCA (wait for other peers related with this VCA)
1954 is_relation_added
= await self
._add
_vca
_relations
(
1955 logging_text
=logging_text
,
1958 vca_index
=vca_index
,
1961 if not is_relation_added
:
1962 raise LcmException("Relations could not be added to VCA.")
1964 # if SSH access is required, then get execution environment SSH public
1965 # if native charm we have waited already to VM be UP
1966 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1969 # self.logger.debug("get ssh key block")
1971 config_descriptor
, ("config-access", "ssh-access", "required")
1973 # self.logger.debug("ssh key needed")
1974 # Needed to inject a ssh key
1977 ("config-access", "ssh-access", "default-user"),
1979 step
= "Install configuration Software, getting public ssh key"
1980 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
1981 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
1984 step
= "Insert public key into VM user={} ssh_key={}".format(
1988 # self.logger.debug("no need to get ssh key")
1989 step
= "Waiting to VM being up and getting IP address"
1990 self
.logger
.debug(logging_text
+ step
)
1992 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1995 # n2vc_redesign STEP 5.1
1996 # wait for RO (ip-address) Insert pub_key into VM
1999 rw_mgmt_ip
, services
= await self
.wait_kdu_up(
2000 logging_text
, nsr_id
, vnfr_id
, kdu_name
2002 vnfd
= self
.db
.get_one(
2004 {"_id": f
'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2006 kdu
= get_kdu(vnfd
, kdu_name
)
2008 service
["name"] for service
in get_kdu_services(kdu
)
2010 exposed_services
= []
2011 for service
in services
:
2012 if any(s
in service
["name"] for s
in kdu_services
):
2013 exposed_services
.append(service
)
2014 await self
.vca_map
[vca_type
].exec_primitive(
2016 primitive_name
="config",
2018 "osm-config": json
.dumps(
2020 k8s
={"services": exposed_services
}
2027 # This verification is needed in order to avoid trying to add a public key
2028 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2029 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2030 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2032 elif db_vnfr
.get("vdur"):
2033 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
2043 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
2045 # store rw_mgmt_ip in deploy params for later replacement
2046 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
2048 # n2vc_redesign STEP 6 Execute initial config primitive
2049 step
= "execute initial config primitive"
2051 # wait for dependent primitives execution (NS -> VNF -> VDU)
2052 if initial_config_primitive_list
:
2053 await self
._wait
_dependent
_n
2vc
(nsr_id
, vca_deployed_list
, vca_index
)
2055 # stage, in function of element type: vdu, kdu, vnf or ns
2056 my_vca
= vca_deployed_list
[vca_index
]
2057 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
2059 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
2060 elif my_vca
.get("member-vnf-index"):
2062 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
2065 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
2067 self
._write
_configuration
_status
(
2068 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
2071 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
2073 check_if_terminated_needed
= True
2074 for initial_config_primitive
in initial_config_primitive_list
:
2075 # adding information on the vca_deployed if it is a NS execution environment
2076 if not vca_deployed
["member-vnf-index"]:
2077 deploy_params
["ns_config_info"] = json
.dumps(
2078 self
._get
_ns
_config
_info
(nsr_id
)
2080 # TODO check if already done
2081 primitive_params_
= self
._map
_primitive
_params
(
2082 initial_config_primitive
, {}, deploy_params
2085 step
= "execute primitive '{}' params '{}'".format(
2086 initial_config_primitive
["name"], primitive_params_
2088 self
.logger
.debug(logging_text
+ step
)
2089 await self
.vca_map
[vca_type
].exec_primitive(
2091 primitive_name
=initial_config_primitive
["name"],
2092 params_dict
=primitive_params_
,
2097 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2098 if check_if_terminated_needed
:
2099 if config_descriptor
.get("terminate-config-primitive"):
2101 "nsrs", nsr_id
, {db_update_entry
+ "needed_terminate": True}
2103 check_if_terminated_needed
= False
2105 # TODO register in database that primitive is done
2107 # STEP 7 Configure metrics
2108 if vca_type
== "helm" or vca_type
== "helm-v3":
2109 # TODO: review for those cases where the helm chart is a reference and
2110 # is not part of the NF package
2111 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
2113 artifact_path
=artifact_path
,
2114 ee_config_descriptor
=ee_config_descriptor
,
2117 target_ip
=rw_mgmt_ip
,
2118 element_type
=element_type
,
2119 vnf_member_index
=db_vnfr
.get("member-vnf-index-ref", ""),
2121 vdu_index
=vdu_index
,
2123 kdu_index
=kdu_index
,
2129 {db_update_entry
+ "prometheus_jobs": prometheus_jobs
},
2132 for job
in prometheus_jobs
:
2135 {"job_name": job
["job_name"]},
2138 fail_on_empty
=False,
2141 step
= "instantiated at VCA"
2142 self
.logger
.debug(logging_text
+ step
)
2144 self
._write
_configuration
_status
(
2145 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
2148 except Exception as e
: # TODO not use Exception but N2VC exception
2149 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2151 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
2154 "Exception while {} : {}".format(step
, e
), exc_info
=True
2156 self
._write
_configuration
_status
(
2157 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
2159 raise LcmException("{}. {}".format(step
, e
)) from e
2161 def _write_ns_status(
2165 current_operation
: str,
2166 current_operation_id
: str,
2167 error_description
: str = None,
2168 error_detail
: str = None,
2169 other_update
: dict = None,
2172 Update db_nsr fields.
2175 :param current_operation:
2176 :param current_operation_id:
2177 :param error_description:
2178 :param error_detail:
2179 :param other_update: Other required changes at database if provided, will be cleared
2183 db_dict
= other_update
or {}
2186 ] = current_operation_id
# for backward compatibility
2187 db_dict
["_admin.current-operation"] = current_operation_id
2188 db_dict
["_admin.operation-type"] = (
2189 current_operation
if current_operation
!= "IDLE" else None
2191 db_dict
["currentOperation"] = current_operation
2192 db_dict
["currentOperationID"] = current_operation_id
2193 db_dict
["errorDescription"] = error_description
2194 db_dict
["errorDetail"] = error_detail
2197 db_dict
["nsState"] = ns_state
2198 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2199 except DbException
as e
:
2200 self
.logger
.warn("Error writing NS status, ns={}: {}".format(nsr_id
, e
))
2202 def _write_op_status(
2206 error_message
: str = None,
2207 queuePosition
: int = 0,
2208 operation_state
: str = None,
2209 other_update
: dict = None,
2212 db_dict
= other_update
or {}
2213 db_dict
["queuePosition"] = queuePosition
2214 if isinstance(stage
, list):
2215 db_dict
["stage"] = stage
[0]
2216 db_dict
["detailed-status"] = " ".join(stage
)
2217 elif stage
is not None:
2218 db_dict
["stage"] = str(stage
)
2220 if error_message
is not None:
2221 db_dict
["errorMessage"] = error_message
2222 if operation_state
is not None:
2223 db_dict
["operationState"] = operation_state
2224 db_dict
["statusEnteredTime"] = time()
2225 self
.update_db_2("nslcmops", op_id
, db_dict
)
2226 except DbException
as e
:
2228 "Error writing OPERATION status for op_id: {} -> {}".format(op_id
, e
)
2231 def _write_all_config_status(self
, db_nsr
: dict, status
: str):
2233 nsr_id
= db_nsr
["_id"]
2234 # configurationStatus
2235 config_status
= db_nsr
.get("configurationStatus")
2238 "configurationStatus.{}.status".format(index
): status
2239 for index
, v
in enumerate(config_status
)
2243 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2245 except DbException
as e
:
2247 "Error writing all configuration status, ns={}: {}".format(nsr_id
, e
)
2250 def _write_configuration_status(
2255 element_under_configuration
: str = None,
2256 element_type
: str = None,
2257 other_update
: dict = None,
2259 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2260 # .format(vca_index, status))
2263 db_path
= "configurationStatus.{}.".format(vca_index
)
2264 db_dict
= other_update
or {}
2266 db_dict
[db_path
+ "status"] = status
2267 if element_under_configuration
:
2269 db_path
+ "elementUnderConfiguration"
2270 ] = element_under_configuration
2272 db_dict
[db_path
+ "elementType"] = element_type
2273 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2274 except DbException
as e
:
2276 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2277 status
, nsr_id
, vca_index
, e
2281 async def _do_placement(self
, logging_text
, db_nslcmop
, db_vnfrs
):
2283 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2284 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2285 Database is used because the result can be obtained from a different LCM worker in case of HA.
2286 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2287 :param db_nslcmop: database content of nslcmop
2288 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2289 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2290 computed 'vim-account-id'
2293 nslcmop_id
= db_nslcmop
["_id"]
2294 placement_engine
= deep_get(db_nslcmop
, ("operationParams", "placement-engine"))
2295 if placement_engine
== "PLA":
2297 logging_text
+ "Invoke and wait for placement optimization"
2299 await self
.msg
.aiowrite(
2300 "pla", "get_placement", {"nslcmopId": nslcmop_id
}, loop
=self
.loop
2302 db_poll_interval
= 5
2303 wait
= db_poll_interval
* 10
2305 while not pla_result
and wait
>= 0:
2306 await asyncio
.sleep(db_poll_interval
)
2307 wait
-= db_poll_interval
2308 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2309 pla_result
= deep_get(db_nslcmop
, ("_admin", "pla"))
2313 "Placement timeout for nslcmopId={}".format(nslcmop_id
)
2316 for pla_vnf
in pla_result
["vnf"]:
2317 vnfr
= db_vnfrs
.get(pla_vnf
["member-vnf-index"])
2318 if not pla_vnf
.get("vimAccountId") or not vnfr
:
2323 {"_id": vnfr
["_id"]},
2324 {"vim-account-id": pla_vnf
["vimAccountId"]},
2327 vnfr
["vim-account-id"] = pla_vnf
["vimAccountId"]
2330 def _gather_vnfr_healing_alerts(self
, vnfr
, vnfd
):
2332 nsr_id
= vnfr
["nsr-id-ref"]
2333 df
= vnfd
.get("df", [{}])[0]
2334 # Checking for auto-healing configuration
2335 if "healing-aspect" in df
:
2336 healing_aspects
= df
["healing-aspect"]
2337 for healing
in healing_aspects
:
2338 for healing_policy
in healing
.get("healing-policy", ()):
2339 vdu_id
= healing_policy
["vdu-id"]
2341 (vdur
for vdur
in vnfr
["vdur"] if vdu_id
== vdur
["vdu-id-ref"]),
2346 metric_name
= "vm_status"
2347 vdu_name
= vdur
.get("name")
2348 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2350 name
= f
"healing_{uuid}"
2351 action
= healing_policy
2352 # action_on_recovery = healing.get("action-on-recovery")
2353 # cooldown_time = healing.get("cooldown-time")
2354 # day1 = healing.get("day1")
2358 "metric": metric_name
,
2361 "vnf_member_index": vnf_member_index
,
2362 "vdu_name": vdu_name
,
2364 "alarm_status": "ok",
2365 "action_type": "healing",
2368 alerts
.append(alert
)
2371 def _gather_vnfr_scaling_alerts(self
, vnfr
, vnfd
):
2373 nsr_id
= vnfr
["nsr-id-ref"]
2374 df
= vnfd
.get("df", [{}])[0]
2375 # Checking for auto-scaling configuration
2376 if "scaling-aspect" in df
:
2377 rel_operation_types
= {
2385 scaling_aspects
= df
["scaling-aspect"]
2386 all_vnfd_monitoring_params
= {}
2387 for ivld
in vnfd
.get("int-virtual-link-desc", ()):
2388 for mp
in ivld
.get("monitoring-parameters", ()):
2389 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2390 for vdu
in vnfd
.get("vdu", ()):
2391 for mp
in vdu
.get("monitoring-parameter", ()):
2392 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2393 for df
in vnfd
.get("df", ()):
2394 for mp
in df
.get("monitoring-parameter", ()):
2395 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2396 for scaling_aspect
in scaling_aspects
:
2397 scaling_group_name
= scaling_aspect
.get("name", "")
2398 # Get monitored VDUs
2399 all_monitored_vdus
= set()
2400 for delta
in scaling_aspect
.get("aspect-delta-details", {}).get(
2403 for vdu_delta
in delta
.get("vdu-delta", ()):
2404 all_monitored_vdus
.add(vdu_delta
.get("id"))
2405 monitored_vdurs
= list(
2407 lambda vdur
: vdur
["vdu-id-ref"] in all_monitored_vdus
,
2411 if not monitored_vdurs
:
2413 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2416 for scaling_policy
in scaling_aspect
.get("scaling-policy", ()):
2417 if scaling_policy
["scaling-type"] != "automatic":
2419 threshold_time
= scaling_policy
.get("threshold-time", "1")
2420 cooldown_time
= scaling_policy
.get("cooldown-time", "0")
2421 for scaling_criteria
in scaling_policy
["scaling-criteria"]:
2422 monitoring_param_ref
= scaling_criteria
.get(
2423 "vnf-monitoring-param-ref"
2425 vnf_monitoring_param
= all_vnfd_monitoring_params
[
2426 monitoring_param_ref
2428 for vdur
in monitored_vdurs
:
2429 vdu_id
= vdur
["vdu-id-ref"]
2430 metric_name
= vnf_monitoring_param
.get("performance-metric")
2431 metric_name
= f
"osm_{metric_name}"
2432 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2433 scalein_threshold
= scaling_criteria
.get(
2434 "scale-in-threshold"
2436 scaleout_threshold
= scaling_criteria
.get(
2437 "scale-out-threshold"
2439 # Looking for min/max-number-of-instances
2440 instances_min_number
= 1
2441 instances_max_number
= 1
2442 vdu_profile
= df
["vdu-profile"]
2445 item
for item
in vdu_profile
if item
["id"] == vdu_id
2447 instances_min_number
= profile
.get(
2448 "min-number-of-instances", 1
2450 instances_max_number
= profile
.get(
2451 "max-number-of-instances", 1
2454 if scalein_threshold
:
2456 name
= f
"scalein_{uuid}"
2457 operation
= scaling_criteria
[
2458 "scale-in-relational-operation"
2460 rel_operator
= rel_operation_types
.get(operation
, "<=")
2461 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2462 expression
= f
"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2465 "vnf_member_index": vnf_member_index
,
2471 "for": str(threshold_time
) + "m",
2474 action
= scaling_policy
2476 "scaling-group": scaling_group_name
,
2477 "cooldown-time": cooldown_time
,
2482 "metric": metric_name
,
2485 "vnf_member_index": vnf_member_index
,
2488 "alarm_status": "ok",
2489 "action_type": "scale_in",
2491 "prometheus_config": prom_cfg
,
2493 alerts
.append(alert
)
2495 if scaleout_threshold
:
2497 name
= f
"scaleout_{uuid}"
2498 operation
= scaling_criteria
[
2499 "scale-out-relational-operation"
2501 rel_operator
= rel_operation_types
.get(operation
, "<=")
2502 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2503 expression
= f
"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2506 "vnf_member_index": vnf_member_index
,
2512 "for": str(threshold_time
) + "m",
2515 action
= scaling_policy
2517 "scaling-group": scaling_group_name
,
2518 "cooldown-time": cooldown_time
,
2523 "metric": metric_name
,
2526 "vnf_member_index": vnf_member_index
,
2529 "alarm_status": "ok",
2530 "action_type": "scale_out",
2532 "prometheus_config": prom_cfg
,
2534 alerts
.append(alert
)
2537 def update_nsrs_with_pla_result(self
, params
):
2539 nslcmop_id
= deep_get(params
, ("placement", "nslcmopId"))
2541 "nslcmops", nslcmop_id
, {"_admin.pla": params
.get("placement")}
2543 except Exception as e
:
2544 self
.logger
.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id
, e
))
2546 async def instantiate(self
, nsr_id
, nslcmop_id
):
2549 :param nsr_id: ns instance to deploy
2550 :param nslcmop_id: operation to run
2554 # Try to lock HA task here
2555 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
2556 if not task_is_locked_by_me
:
2558 "instantiate() task is not locked by me, ns={}".format(nsr_id
)
2562 logging_text
= "Task ns={} instantiate={} ".format(nsr_id
, nslcmop_id
)
2563 self
.logger
.debug(logging_text
+ "Enter")
2565 # get all needed from database
2567 # database nsrs record
2570 # database nslcmops record
2573 # update operation on nsrs
2575 # update operation on nslcmops
2576 db_nslcmop_update
= {}
2578 timeout_ns_deploy
= self
.timeout
.ns_deploy
2580 nslcmop_operation_state
= None
2581 db_vnfrs
= {} # vnf's info indexed by member-index
2583 tasks_dict_info
= {} # from task to info text
2587 "Stage 1/5: preparation of the environment.",
2588 "Waiting for previous operations to terminate.",
2591 # ^ stage, step, VIM progress
2593 # wait for any previous tasks in process
2594 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
2596 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2597 stage
[1] = "Reading from database."
2598 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2599 db_nsr_update
["detailed-status"] = "creating"
2600 db_nsr_update
["operational-status"] = "init"
2601 self
._write
_ns
_status
(
2603 ns_state
="BUILDING",
2604 current_operation
="INSTANTIATING",
2605 current_operation_id
=nslcmop_id
,
2606 other_update
=db_nsr_update
,
2608 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
2610 # read from db: operation
2611 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
2612 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2613 if db_nslcmop
["operationParams"].get("additionalParamsForVnf"):
2614 db_nslcmop
["operationParams"]["additionalParamsForVnf"] = json
.loads(
2615 db_nslcmop
["operationParams"]["additionalParamsForVnf"]
2617 ns_params
= db_nslcmop
.get("operationParams")
2618 if ns_params
and ns_params
.get("timeout_ns_deploy"):
2619 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
2622 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
2623 self
.logger
.debug(logging_text
+ stage
[1])
2624 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
2625 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
2626 self
.logger
.debug(logging_text
+ stage
[1])
2627 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
2628 self
.fs
.sync(db_nsr
["nsd-id"])
2630 # nsr_name = db_nsr["name"] # TODO short-name??
2632 # read from db: vnf's of this ns
2633 stage
[1] = "Getting vnfrs from db."
2634 self
.logger
.debug(logging_text
+ stage
[1])
2635 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
2637 # read from db: vnfd's for every vnf
2638 db_vnfds
= [] # every vnfd data
2640 # for each vnf in ns, read vnfd
2641 for vnfr
in db_vnfrs_list
:
2642 if vnfr
.get("kdur"):
2644 for kdur
in vnfr
["kdur"]:
2645 if kdur
.get("additionalParams"):
2646 kdur
["additionalParams"] = json
.loads(
2647 kdur
["additionalParams"]
2649 kdur_list
.append(kdur
)
2650 vnfr
["kdur"] = kdur_list
2652 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
2653 vnfd_id
= vnfr
["vnfd-id"]
2654 vnfd_ref
= vnfr
["vnfd-ref"]
2655 self
.fs
.sync(vnfd_id
)
2657 # if we haven't this vnfd, read it from db
2658 if vnfd_id
not in db_vnfds
:
2660 stage
[1] = "Getting vnfd={} id='{}' from db.".format(
2663 self
.logger
.debug(logging_text
+ stage
[1])
2664 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
2667 db_vnfds
.append(vnfd
)
2669 # Get or generates the _admin.deployed.VCA list
2670 vca_deployed_list
= None
2671 if db_nsr
["_admin"].get("deployed"):
2672 vca_deployed_list
= db_nsr
["_admin"]["deployed"].get("VCA")
2673 if vca_deployed_list
is None:
2674 vca_deployed_list
= []
2675 configuration_status_list
= []
2676 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2677 db_nsr_update
["configurationStatus"] = configuration_status_list
2678 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2679 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2680 elif isinstance(vca_deployed_list
, dict):
2681 # maintain backward compatibility. Change a dict to list at database
2682 vca_deployed_list
= list(vca_deployed_list
.values())
2683 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2684 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2687 deep_get(db_nsr
, ("_admin", "deployed", "RO", "vnfd")), list
2689 populate_dict(db_nsr
, ("_admin", "deployed", "RO", "vnfd"), [])
2690 db_nsr_update
["_admin.deployed.RO.vnfd"] = []
2692 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2693 db_nsr_update
["_admin.nsState"] = "INSTANTIATED"
2694 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2696 "vnfrs", {"nsr-id-ref": nsr_id
}, {"_admin.nsState": "INSTANTIATED"}
2699 # n2vc_redesign STEP 2 Deploy Network Scenario
2700 stage
[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2701 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
2703 stage
[1] = "Deploying KDUs."
2704 # self.logger.debug(logging_text + "Before deploy_kdus")
2705 # Call to deploy_kdus in case exists the "vdu:kdu" param
2706 await self
.deploy_kdus(
2707 logging_text
=logging_text
,
2709 nslcmop_id
=nslcmop_id
,
2712 task_instantiation_info
=tasks_dict_info
,
2715 stage
[1] = "Getting VCA public key."
2716 # n2vc_redesign STEP 1 Get VCA public ssh-key
2717 # feature 1429. Add n2vc public key to needed VMs
2718 n2vc_key
= self
.n2vc
.get_public_key()
2719 n2vc_key_list
= [n2vc_key
]
2720 if self
.vca_config
.public_key
:
2721 n2vc_key_list
.append(self
.vca_config
.public_key
)
2723 stage
[1] = "Deploying NS at VIM."
2724 task_ro
= asyncio
.ensure_future(
2725 self
.instantiate_RO(
2726 logging_text
=logging_text
,
2730 db_nslcmop
=db_nslcmop
,
2733 n2vc_key_list
=n2vc_key_list
,
2737 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "instantiate_RO", task_ro
)
2738 tasks_dict_info
[task_ro
] = "Deploying at VIM"
2740 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2741 stage
[1] = "Deploying Execution Environments."
2742 self
.logger
.debug(logging_text
+ stage
[1])
2744 # create namespace and certificate if any helm based EE is present in the NS
2745 if check_helm_ee_in_ns(db_vnfds
):
2746 # TODO: create EE namespace
2747 # create TLS certificates
2748 await self
.vca_map
["helm-v3"].create_tls_certificate(
2749 secret_name
="ee-tls-{}".format(nsr_id
),
2752 usage
="server auth",
2755 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
2756 for vnf_profile
in get_vnf_profiles(nsd
):
2757 vnfd_id
= vnf_profile
["vnfd-id"]
2758 vnfd
= find_in_list(db_vnfds
, lambda a_vnf
: a_vnf
["id"] == vnfd_id
)
2759 member_vnf_index
= str(vnf_profile
["id"])
2760 db_vnfr
= db_vnfrs
[member_vnf_index
]
2761 base_folder
= vnfd
["_admin"]["storage"]
2768 # Get additional parameters
2769 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
2770 if db_vnfr
.get("additionalParamsForVnf"):
2771 deploy_params
.update(
2772 parse_yaml_strings(db_vnfr
["additionalParamsForVnf"].copy())
2775 descriptor_config
= get_configuration(vnfd
, vnfd
["id"])
2776 if descriptor_config
:
2778 logging_text
=logging_text
2779 + "member_vnf_index={} ".format(member_vnf_index
),
2782 nslcmop_id
=nslcmop_id
,
2788 member_vnf_index
=member_vnf_index
,
2789 vdu_index
=vdu_index
,
2790 kdu_index
=kdu_index
,
2792 deploy_params
=deploy_params
,
2793 descriptor_config
=descriptor_config
,
2794 base_folder
=base_folder
,
2795 task_instantiation_info
=tasks_dict_info
,
2799 # Deploy charms for each VDU that supports one.
2800 for vdud
in get_vdu_list(vnfd
):
2802 descriptor_config
= get_configuration(vnfd
, vdu_id
)
2803 vdur
= find_in_list(
2804 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
2807 if vdur
.get("additionalParams"):
2808 deploy_params_vdu
= parse_yaml_strings(vdur
["additionalParams"])
2810 deploy_params_vdu
= deploy_params
2811 deploy_params_vdu
["OSM"] = get_osm_params(
2812 db_vnfr
, vdu_id
, vdu_count_index
=0
2814 vdud_count
= get_number_of_instances(vnfd
, vdu_id
)
2816 self
.logger
.debug("VDUD > {}".format(vdud
))
2818 "Descriptor config > {}".format(descriptor_config
)
2820 if descriptor_config
:
2824 for vdu_index
in range(vdud_count
):
2825 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2827 logging_text
=logging_text
2828 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2829 member_vnf_index
, vdu_id
, vdu_index
2833 nslcmop_id
=nslcmop_id
,
2839 kdu_index
=kdu_index
,
2840 member_vnf_index
=member_vnf_index
,
2841 vdu_index
=vdu_index
,
2843 deploy_params
=deploy_params_vdu
,
2844 descriptor_config
=descriptor_config
,
2845 base_folder
=base_folder
,
2846 task_instantiation_info
=tasks_dict_info
,
2849 for kdud
in get_kdu_list(vnfd
):
2850 kdu_name
= kdud
["name"]
2851 descriptor_config
= get_configuration(vnfd
, kdu_name
)
2852 if descriptor_config
:
2856 kdu_index
, kdur
= next(
2858 for x
in enumerate(db_vnfr
["kdur"])
2859 if x
[1]["kdu-name"] == kdu_name
2861 deploy_params_kdu
= {"OSM": get_osm_params(db_vnfr
)}
2862 if kdur
.get("additionalParams"):
2863 deploy_params_kdu
.update(
2864 parse_yaml_strings(kdur
["additionalParams"].copy())
2868 logging_text
=logging_text
,
2871 nslcmop_id
=nslcmop_id
,
2877 member_vnf_index
=member_vnf_index
,
2878 vdu_index
=vdu_index
,
2879 kdu_index
=kdu_index
,
2881 deploy_params
=deploy_params_kdu
,
2882 descriptor_config
=descriptor_config
,
2883 base_folder
=base_folder
,
2884 task_instantiation_info
=tasks_dict_info
,
2888 # Check if this NS has a charm configuration
2889 descriptor_config
= nsd
.get("ns-configuration")
2890 if descriptor_config
and descriptor_config
.get("juju"):
2893 member_vnf_index
= None
2900 # Get additional parameters
2901 deploy_params
= {"OSM": {"vim_account_id": ns_params
["vimAccountId"]}}
2902 if db_nsr
.get("additionalParamsForNs"):
2903 deploy_params
.update(
2904 parse_yaml_strings(db_nsr
["additionalParamsForNs"].copy())
2906 base_folder
= nsd
["_admin"]["storage"]
2908 logging_text
=logging_text
,
2911 nslcmop_id
=nslcmop_id
,
2917 member_vnf_index
=member_vnf_index
,
2918 vdu_index
=vdu_index
,
2919 kdu_index
=kdu_index
,
2921 deploy_params
=deploy_params
,
2922 descriptor_config
=descriptor_config
,
2923 base_folder
=base_folder
,
2924 task_instantiation_info
=tasks_dict_info
,
2928 # rest of staff will be done at finally
2931 ROclient
.ROClientException
,
2937 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
)
2940 except asyncio
.CancelledError
:
2942 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
2944 exc
= "Operation was cancelled"
2945 except Exception as e
:
2946 exc
= traceback
.format_exc()
2947 self
.logger
.critical(
2948 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
2953 error_list
.append(str(exc
))
2955 # wait for pending tasks
2957 stage
[1] = "Waiting for instantiate pending tasks."
2958 self
.logger
.debug(logging_text
+ stage
[1])
2959 error_list
+= await self
._wait
_for
_tasks
(
2967 stage
[1] = stage
[2] = ""
2968 except asyncio
.CancelledError
:
2969 error_list
.append("Cancelled")
2970 # TODO cancel all tasks
2971 except Exception as exc
:
2972 error_list
.append(str(exc
))
2974 # update operation-status
2975 db_nsr_update
["operational-status"] = "running"
2976 # let's begin with VCA 'configured' status (later we can change it)
2977 db_nsr_update
["config-status"] = "configured"
2978 for task
, task_name
in tasks_dict_info
.items():
2979 if not task
.done() or task
.cancelled() or task
.exception():
2980 if task_name
.startswith(self
.task_name_deploy_vca
):
2981 # A N2VC task is pending
2982 db_nsr_update
["config-status"] = "failed"
2984 # RO or KDU task is pending
2985 db_nsr_update
["operational-status"] = "failed"
2987 # update status at database
2989 error_detail
= ". ".join(error_list
)
2990 self
.logger
.error(logging_text
+ error_detail
)
2991 error_description_nslcmop
= "{} Detail: {}".format(
2992 stage
[0], error_detail
2994 error_description_nsr
= "Operation: INSTANTIATING.{}, {}".format(
2995 nslcmop_id
, stage
[0]
2998 db_nsr_update
["detailed-status"] = (
2999 error_description_nsr
+ " Detail: " + error_detail
3001 db_nslcmop_update
["detailed-status"] = error_detail
3002 nslcmop_operation_state
= "FAILED"
3006 error_description_nsr
= error_description_nslcmop
= None
3008 db_nsr_update
["detailed-status"] = "Done"
3009 db_nslcmop_update
["detailed-status"] = "Done"
3010 nslcmop_operation_state
= "COMPLETED"
3011 # Gather auto-healing and auto-scaling alerts for each vnfr
3014 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
3016 (sub
for sub
in db_vnfds
if sub
["_id"] == vnfr
["vnfd-id"]), None
3018 healing_alerts
= self
._gather
_vnfr
_healing
_alerts
(vnfr
, vnfd
)
3019 for alert
in healing_alerts
:
3020 self
.logger
.info(f
"Storing healing alert in MongoDB: {alert}")
3021 self
.db
.create("alerts", alert
)
3023 scaling_alerts
= self
._gather
_vnfr
_scaling
_alerts
(vnfr
, vnfd
)
3024 for alert
in scaling_alerts
:
3025 self
.logger
.info(f
"Storing scaling alert in MongoDB: {alert}")
3026 self
.db
.create("alerts", alert
)
3029 self
._write
_ns
_status
(
3032 current_operation
="IDLE",
3033 current_operation_id
=None,
3034 error_description
=error_description_nsr
,
3035 error_detail
=error_detail
,
3036 other_update
=db_nsr_update
,
3038 self
._write
_op
_status
(
3041 error_message
=error_description_nslcmop
,
3042 operation_state
=nslcmop_operation_state
,
3043 other_update
=db_nslcmop_update
,
3046 if nslcmop_operation_state
:
3048 await self
.msg
.aiowrite(
3053 "nslcmop_id": nslcmop_id
,
3054 "operationState": nslcmop_operation_state
,
3058 except Exception as e
:
3060 logging_text
+ "kafka_write notification Exception {}".format(e
)
3063 self
.logger
.debug(logging_text
+ "Exit")
3064 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_instantiate")
3066 def _get_vnfd(self
, vnfd_id
: str, projects_read
: str, cached_vnfds
: Dict
[str, Any
]):
3067 if vnfd_id
not in cached_vnfds
:
3068 cached_vnfds
[vnfd_id
] = self
.db
.get_one(
3069 "vnfds", {"id": vnfd_id
, "_admin.projects_read": projects_read
}
3071 return cached_vnfds
[vnfd_id
]
3073 def _get_vnfr(self
, nsr_id
: str, vnf_profile_id
: str, cached_vnfrs
: Dict
[str, Any
]):
3074 if vnf_profile_id
not in cached_vnfrs
:
3075 cached_vnfrs
[vnf_profile_id
] = self
.db
.get_one(
3078 "member-vnf-index-ref": vnf_profile_id
,
3079 "nsr-id-ref": nsr_id
,
3082 return cached_vnfrs
[vnf_profile_id
]
3084 def _is_deployed_vca_in_relation(
3085 self
, vca
: DeployedVCA
, relation
: Relation
3088 for endpoint
in (relation
.provider
, relation
.requirer
):
3089 if endpoint
["kdu-resource-profile-id"]:
3092 vca
.vnf_profile_id
== endpoint
.vnf_profile_id
3093 and vca
.vdu_profile_id
== endpoint
.vdu_profile_id
3094 and vca
.execution_environment_ref
== endpoint
.execution_environment_ref
3100 def _update_ee_relation_data_with_implicit_data(
3101 self
, nsr_id
, nsd
, ee_relation_data
, cached_vnfds
, vnf_profile_id
: str = None
3103 ee_relation_data
= safe_get_ee_relation(
3104 nsr_id
, ee_relation_data
, vnf_profile_id
=vnf_profile_id
3106 ee_relation_level
= EELevel
.get_level(ee_relation_data
)
3107 if (ee_relation_level
in (EELevel
.VNF
, EELevel
.VDU
)) and not ee_relation_data
[
3108 "execution-environment-ref"
3110 vnf_profile
= get_vnf_profile(nsd
, ee_relation_data
["vnf-profile-id"])
3111 vnfd_id
= vnf_profile
["vnfd-id"]
3112 project
= nsd
["_admin"]["projects_read"][0]
3113 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3116 if ee_relation_level
== EELevel
.VNF
3117 else ee_relation_data
["vdu-profile-id"]
3119 ee
= get_juju_ee_ref(db_vnfd
, entity_id
)
3122 f
"not execution environments found for ee_relation {ee_relation_data}"
3124 ee_relation_data
["execution-environment-ref"] = ee
["id"]
3125 return ee_relation_data
3127 def _get_ns_relations(
3130 nsd
: Dict
[str, Any
],
3132 cached_vnfds
: Dict
[str, Any
],
3133 ) -> List
[Relation
]:
3135 db_ns_relations
= get_ns_configuration_relation_list(nsd
)
3136 for r
in db_ns_relations
:
3137 provider_dict
= None
3138 requirer_dict
= None
3139 if all(key
in r
for key
in ("provider", "requirer")):
3140 provider_dict
= r
["provider"]
3141 requirer_dict
= r
["requirer"]
3142 elif "entities" in r
:
3143 provider_id
= r
["entities"][0]["id"]
3146 "endpoint": r
["entities"][0]["endpoint"],
3148 if provider_id
!= nsd
["id"]:
3149 provider_dict
["vnf-profile-id"] = provider_id
3150 requirer_id
= r
["entities"][1]["id"]
3153 "endpoint": r
["entities"][1]["endpoint"],
3155 if requirer_id
!= nsd
["id"]:
3156 requirer_dict
["vnf-profile-id"] = requirer_id
3159 "provider/requirer or entities must be included in the relation."
3161 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3162 nsr_id
, nsd
, provider_dict
, cached_vnfds
3164 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3165 nsr_id
, nsd
, requirer_dict
, cached_vnfds
3167 provider
= EERelation(relation_provider
)
3168 requirer
= EERelation(relation_requirer
)
3169 relation
= Relation(r
["name"], provider
, requirer
)
3170 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3172 relations
.append(relation
)
3175 def _get_vnf_relations(
3178 nsd
: Dict
[str, Any
],
3180 cached_vnfds
: Dict
[str, Any
],
3181 ) -> List
[Relation
]:
3183 if vca
.target_element
== "ns":
3184 self
.logger
.debug("VCA is a NS charm, not a VNF.")
3186 vnf_profile
= get_vnf_profile(nsd
, vca
.vnf_profile_id
)
3187 vnf_profile_id
= vnf_profile
["id"]
3188 vnfd_id
= vnf_profile
["vnfd-id"]
3189 project
= nsd
["_admin"]["projects_read"][0]
3190 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3191 db_vnf_relations
= get_relation_list(db_vnfd
, vnfd_id
)
3192 for r
in db_vnf_relations
:
3193 provider_dict
= None
3194 requirer_dict
= None
3195 if all(key
in r
for key
in ("provider", "requirer")):
3196 provider_dict
= r
["provider"]
3197 requirer_dict
= r
["requirer"]
3198 elif "entities" in r
:
3199 provider_id
= r
["entities"][0]["id"]
3202 "vnf-profile-id": vnf_profile_id
,
3203 "endpoint": r
["entities"][0]["endpoint"],
3205 if provider_id
!= vnfd_id
:
3206 provider_dict
["vdu-profile-id"] = provider_id
3207 requirer_id
= r
["entities"][1]["id"]
3210 "vnf-profile-id": vnf_profile_id
,
3211 "endpoint": r
["entities"][1]["endpoint"],
3213 if requirer_id
!= vnfd_id
:
3214 requirer_dict
["vdu-profile-id"] = requirer_id
3217 "provider/requirer or entities must be included in the relation."
3219 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3220 nsr_id
, nsd
, provider_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3222 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3223 nsr_id
, nsd
, requirer_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3225 provider
= EERelation(relation_provider
)
3226 requirer
= EERelation(relation_requirer
)
3227 relation
= Relation(r
["name"], provider
, requirer
)
3228 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3230 relations
.append(relation
)
3233 def _get_kdu_resource_data(
3235 ee_relation
: EERelation
,
3236 db_nsr
: Dict
[str, Any
],
3237 cached_vnfds
: Dict
[str, Any
],
3238 ) -> DeployedK8sResource
:
3239 nsd
= get_nsd(db_nsr
)
3240 vnf_profiles
= get_vnf_profiles(nsd
)
3241 vnfd_id
= find_in_list(
3243 lambda vnf_profile
: vnf_profile
["id"] == ee_relation
.vnf_profile_id
,
3245 project
= nsd
["_admin"]["projects_read"][0]
3246 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3247 kdu_resource_profile
= get_kdu_resource_profile(
3248 db_vnfd
, ee_relation
.kdu_resource_profile_id
3250 kdu_name
= kdu_resource_profile
["kdu-name"]
3251 deployed_kdu
, _
= get_deployed_kdu(
3252 db_nsr
.get("_admin", ()).get("deployed", ()),
3254 ee_relation
.vnf_profile_id
,
3256 deployed_kdu
.update({"resource-name": kdu_resource_profile
["resource-name"]})
3259 def _get_deployed_component(
3261 ee_relation
: EERelation
,
3262 db_nsr
: Dict
[str, Any
],
3263 cached_vnfds
: Dict
[str, Any
],
3264 ) -> DeployedComponent
:
3265 nsr_id
= db_nsr
["_id"]
3266 deployed_component
= None
3267 ee_level
= EELevel
.get_level(ee_relation
)
3268 if ee_level
== EELevel
.NS
:
3269 vca
= get_deployed_vca(db_nsr
, {"vdu_id": None, "member-vnf-index": None})
3271 deployed_component
= DeployedVCA(nsr_id
, vca
)
3272 elif ee_level
== EELevel
.VNF
:
3273 vca
= get_deployed_vca(
3277 "member-vnf-index": ee_relation
.vnf_profile_id
,
3278 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3282 deployed_component
= DeployedVCA(nsr_id
, vca
)
3283 elif ee_level
== EELevel
.VDU
:
3284 vca
= get_deployed_vca(
3287 "vdu_id": ee_relation
.vdu_profile_id
,
3288 "member-vnf-index": ee_relation
.vnf_profile_id
,
3289 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3293 deployed_component
= DeployedVCA(nsr_id
, vca
)
3294 elif ee_level
== EELevel
.KDU
:
3295 kdu_resource_data
= self
._get
_kdu
_resource
_data
(
3296 ee_relation
, db_nsr
, cached_vnfds
3298 if kdu_resource_data
:
3299 deployed_component
= DeployedK8sResource(kdu_resource_data
)
3300 return deployed_component
3302 async def _add_relation(
3306 db_nsr
: Dict
[str, Any
],
3307 cached_vnfds
: Dict
[str, Any
],
3308 cached_vnfrs
: Dict
[str, Any
],
3310 deployed_provider
= self
._get
_deployed
_component
(
3311 relation
.provider
, db_nsr
, cached_vnfds
3313 deployed_requirer
= self
._get
_deployed
_component
(
3314 relation
.requirer
, db_nsr
, cached_vnfds
3318 and deployed_requirer
3319 and deployed_provider
.config_sw_installed
3320 and deployed_requirer
.config_sw_installed
3322 provider_db_vnfr
= (
3324 relation
.provider
.nsr_id
,
3325 relation
.provider
.vnf_profile_id
,
3328 if relation
.provider
.vnf_profile_id
3331 requirer_db_vnfr
= (
3333 relation
.requirer
.nsr_id
,
3334 relation
.requirer
.vnf_profile_id
,
3337 if relation
.requirer
.vnf_profile_id
3340 provider_vca_id
= self
.get_vca_id(provider_db_vnfr
, db_nsr
)
3341 requirer_vca_id
= self
.get_vca_id(requirer_db_vnfr
, db_nsr
)
3342 provider_relation_endpoint
= RelationEndpoint(
3343 deployed_provider
.ee_id
,
3345 relation
.provider
.endpoint
,
3347 requirer_relation_endpoint
= RelationEndpoint(
3348 deployed_requirer
.ee_id
,
3350 relation
.requirer
.endpoint
,
3353 await self
.vca_map
[vca_type
].add_relation(
3354 provider
=provider_relation_endpoint
,
3355 requirer
=requirer_relation_endpoint
,
3357 except N2VCException
as exception
:
3358 self
.logger
.error(exception
)
3359 raise LcmException(exception
)
3363 async def _add_vca_relations(
3369 timeout
: int = 3600,
3372 # 1. find all relations for this VCA
3373 # 2. wait for other peers related
3377 # STEP 1: find all relations for this VCA
3380 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3381 nsd
= get_nsd(db_nsr
)
3384 deployed_vca_dict
= get_deployed_vca_list(db_nsr
)[vca_index
]
3385 my_vca
= DeployedVCA(nsr_id
, deployed_vca_dict
)
3390 relations
.extend(self
._get
_ns
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3391 relations
.extend(self
._get
_vnf
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3393 # if no relations, terminate
3395 self
.logger
.debug(logging_text
+ " No relations")
3398 self
.logger
.debug(logging_text
+ " adding relations {}".format(relations
))
3405 if now
- start
>= timeout
:
3406 self
.logger
.error(logging_text
+ " : timeout adding relations")
3409 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3410 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3412 # for each relation, find the VCA's related
3413 for relation
in relations
.copy():
3414 added
= await self
._add
_relation
(
3422 relations
.remove(relation
)
3425 self
.logger
.debug("Relations added")
3427 await asyncio
.sleep(5.0)
3431 except Exception as e
:
3432 self
.logger
.warn(logging_text
+ " ERROR adding relations: {}".format(e
))
3435 async def _install_kdu(
3443 k8s_instance_info
: dict,
3444 k8params
: dict = None,
3449 k8sclustertype
= k8s_instance_info
["k8scluster-type"]
3452 "collection": "nsrs",
3453 "filter": {"_id": nsr_id
},
3454 "path": nsr_db_path
,
3457 if k8s_instance_info
.get("kdu-deployment-name"):
3458 kdu_instance
= k8s_instance_info
.get("kdu-deployment-name")
3460 kdu_instance
= self
.k8scluster_map
[
3462 ].generate_kdu_instance_name(
3463 db_dict
=db_dict_install
,
3464 kdu_model
=k8s_instance_info
["kdu-model"],
3465 kdu_name
=k8s_instance_info
["kdu-name"],
3468 # Update the nsrs table with the kdu-instance value
3472 _desc
={nsr_db_path
+ ".kdu-instance": kdu_instance
},
3475 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3476 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3477 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3478 # namespace, this first verification could be removed, and the next step would be done for any kind
3480 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3481 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3482 if k8sclustertype
in ("juju", "juju-bundle"):
3483 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3484 # that the user passed a namespace which he wants its KDU to be deployed in)
3490 "_admin.projects_write": k8s_instance_info
["namespace"],
3491 "_admin.projects_read": k8s_instance_info
["namespace"],
3497 f
"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3502 _desc
={f
"{nsr_db_path}.namespace": kdu_instance
},
3504 k8s_instance_info
["namespace"] = kdu_instance
3506 await self
.k8scluster_map
[k8sclustertype
].install(
3507 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3508 kdu_model
=k8s_instance_info
["kdu-model"],
3511 db_dict
=db_dict_install
,
3513 kdu_name
=k8s_instance_info
["kdu-name"],
3514 namespace
=k8s_instance_info
["namespace"],
3515 kdu_instance
=kdu_instance
,
3519 # Obtain services to obtain management service ip
3520 services
= await self
.k8scluster_map
[k8sclustertype
].get_services(
3521 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3522 kdu_instance
=kdu_instance
,
3523 namespace
=k8s_instance_info
["namespace"],
3526 # Obtain management service info (if exists)
3527 vnfr_update_dict
= {}
3528 kdu_config
= get_configuration(vnfd
, kdud
["name"])
3530 target_ee_list
= kdu_config
.get("execution-environment-list", [])
3535 vnfr_update_dict
["kdur.{}.services".format(kdu_index
)] = services
3538 for service
in kdud
.get("service", [])
3539 if service
.get("mgmt-service")
3541 for mgmt_service
in mgmt_services
:
3542 for service
in services
:
3543 if service
["name"].startswith(mgmt_service
["name"]):
3544 # Mgmt service found, Obtain service ip
3545 ip
= service
.get("external_ip", service
.get("cluster_ip"))
3546 if isinstance(ip
, list) and len(ip
) == 1:
3550 "kdur.{}.ip-address".format(kdu_index
)
3553 # Check if must update also mgmt ip at the vnf
3554 service_external_cp
= mgmt_service
.get(
3555 "external-connection-point-ref"
3557 if service_external_cp
:
3559 deep_get(vnfd
, ("mgmt-interface", "cp"))
3560 == service_external_cp
3562 vnfr_update_dict
["ip-address"] = ip
3567 "external-connection-point-ref", ""
3569 == service_external_cp
,
3572 "kdur.{}.ip-address".format(kdu_index
)
3577 "Mgmt service name: {} not found".format(
3578 mgmt_service
["name"]
3582 vnfr_update_dict
["kdur.{}.status".format(kdu_index
)] = "READY"
3583 self
.update_db_2("vnfrs", vnfr_data
.get("_id"), vnfr_update_dict
)
3585 kdu_config
= get_configuration(vnfd
, k8s_instance_info
["kdu-name"])
3588 and kdu_config
.get("initial-config-primitive")
3589 and get_juju_ee_ref(vnfd
, k8s_instance_info
["kdu-name"]) is None
3591 initial_config_primitive_list
= kdu_config
.get(
3592 "initial-config-primitive"
3594 initial_config_primitive_list
.sort(key
=lambda val
: int(val
["seq"]))
3596 for initial_config_primitive
in initial_config_primitive_list
:
3597 primitive_params_
= self
._map
_primitive
_params
(
3598 initial_config_primitive
, {}, {}
3601 await asyncio
.wait_for(
3602 self
.k8scluster_map
[k8sclustertype
].exec_primitive(
3603 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3604 kdu_instance
=kdu_instance
,
3605 primitive_name
=initial_config_primitive
["name"],
3606 params
=primitive_params_
,
3607 db_dict
=db_dict_install
,
3613 except Exception as e
:
3614 # Prepare update db with error and raise exception
3617 "nsrs", nsr_id
, {nsr_db_path
+ ".detailed-status": str(e
)}
3621 vnfr_data
.get("_id"),
3622 {"kdur.{}.status".format(kdu_index
): "ERROR"},
3625 # ignore to keep original exception
3627 # reraise original error
3632 async def deploy_kdus(
3639 task_instantiation_info
,
3641 # Launch kdus if present in the descriptor
3643 k8scluster_id_2_uuic
= {
3644 "helm-chart-v3": {},
3649 async def _get_cluster_id(cluster_id
, cluster_type
):
3650 nonlocal k8scluster_id_2_uuic
3651 if cluster_id
in k8scluster_id_2_uuic
[cluster_type
]:
3652 return k8scluster_id_2_uuic
[cluster_type
][cluster_id
]
3654 # check if K8scluster is creating and wait look if previous tasks in process
3655 task_name
, task_dependency
= self
.lcm_tasks
.lookfor_related(
3656 "k8scluster", cluster_id
3659 text
= "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3660 task_name
, cluster_id
3662 self
.logger
.debug(logging_text
+ text
)
3663 await asyncio
.wait(task_dependency
, timeout
=3600)
3665 db_k8scluster
= self
.db
.get_one(
3666 "k8sclusters", {"_id": cluster_id
}, fail_on_empty
=False
3668 if not db_k8scluster
:
3669 raise LcmException("K8s cluster {} cannot be found".format(cluster_id
))
3671 k8s_id
= deep_get(db_k8scluster
, ("_admin", cluster_type
, "id"))
3673 if cluster_type
== "helm-chart-v3":
3675 # backward compatibility for existing clusters that have not been initialized for helm v3
3676 k8s_credentials
= yaml
.safe_dump(
3677 db_k8scluster
.get("credentials")
3679 k8s_id
, uninstall_sw
= await self
.k8sclusterhelm3
.init_env(
3680 k8s_credentials
, reuse_cluster_uuid
=cluster_id
3682 db_k8scluster_update
= {}
3683 db_k8scluster_update
["_admin.helm-chart-v3.error_msg"] = None
3684 db_k8scluster_update
["_admin.helm-chart-v3.id"] = k8s_id
3685 db_k8scluster_update
[
3686 "_admin.helm-chart-v3.created"
3688 db_k8scluster_update
[
3689 "_admin.helm-chart-v3.operationalState"
3692 "k8sclusters", cluster_id
, db_k8scluster_update
3694 except Exception as e
:
3697 + "error initializing helm-v3 cluster: {}".format(str(e
))
3700 "K8s cluster '{}' has not been initialized for '{}'".format(
3701 cluster_id
, cluster_type
3706 "K8s cluster '{}' has not been initialized for '{}'".format(
3707 cluster_id
, cluster_type
3710 k8scluster_id_2_uuic
[cluster_type
][cluster_id
] = k8s_id
3713 logging_text
+= "Deploy kdus: "
3716 db_nsr_update
= {"_admin.deployed.K8s": []}
3717 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3720 updated_cluster_list
= []
3721 updated_v3_cluster_list
= []
3723 for vnfr_data
in db_vnfrs
.values():
3724 vca_id
= self
.get_vca_id(vnfr_data
, {})
3725 for kdu_index
, kdur
in enumerate(get_iterable(vnfr_data
, "kdur")):
3726 # Step 0: Prepare and set parameters
3727 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
3728 vnfd_id
= vnfr_data
.get("vnfd-id")
3729 vnfd_with_id
= find_in_list(
3730 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3734 for kdud
in vnfd_with_id
["kdu"]
3735 if kdud
["name"] == kdur
["kdu-name"]
3737 namespace
= kdur
.get("k8s-namespace")
3738 kdu_deployment_name
= kdur
.get("kdu-deployment-name")
3739 if kdur
.get("helm-chart"):
3740 kdumodel
= kdur
["helm-chart"]
3741 # Default version: helm3, if helm-version is v2 assign v2
3742 k8sclustertype
= "helm-chart-v3"
3743 self
.logger
.debug("kdur: {}".format(kdur
))
3745 kdur
.get("helm-version")
3746 and kdur
.get("helm-version") == "v2"
3748 k8sclustertype
= "helm-chart"
3749 elif kdur
.get("juju-bundle"):
3750 kdumodel
= kdur
["juju-bundle"]
3751 k8sclustertype
= "juju-bundle"
3754 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3755 "juju-bundle. Maybe an old NBI version is running".format(
3756 vnfr_data
["member-vnf-index-ref"], kdur
["kdu-name"]
3759 # check if kdumodel is a file and exists
3761 vnfd_with_id
= find_in_list(
3762 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3764 storage
= deep_get(vnfd_with_id
, ("_admin", "storage"))
3765 if storage
: # may be not present if vnfd has not artifacts
3766 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3767 if storage
["pkg-dir"]:
3768 filename
= "{}/{}/{}s/{}".format(
3775 filename
= "{}/Scripts/{}s/{}".format(
3780 if self
.fs
.file_exists(
3781 filename
, mode
="file"
3782 ) or self
.fs
.file_exists(filename
, mode
="dir"):
3783 kdumodel
= self
.fs
.path
+ filename
3784 except (asyncio
.TimeoutError
, asyncio
.CancelledError
):
3786 except Exception: # it is not a file
3789 k8s_cluster_id
= kdur
["k8s-cluster"]["id"]
3790 step
= "Synchronize repos for k8s cluster '{}'".format(
3793 cluster_uuid
= await _get_cluster_id(k8s_cluster_id
, k8sclustertype
)
3797 k8sclustertype
== "helm-chart"
3798 and cluster_uuid
not in updated_cluster_list
3800 k8sclustertype
== "helm-chart-v3"
3801 and cluster_uuid
not in updated_v3_cluster_list
3803 del_repo_list
, added_repo_dict
= await asyncio
.ensure_future(
3804 self
.k8scluster_map
[k8sclustertype
].synchronize_repos(
3805 cluster_uuid
=cluster_uuid
3808 if del_repo_list
or added_repo_dict
:
3809 if k8sclustertype
== "helm-chart":
3811 "_admin.helm_charts_added." + item
: None
3812 for item
in del_repo_list
3815 "_admin.helm_charts_added." + item
: name
3816 for item
, name
in added_repo_dict
.items()
3818 updated_cluster_list
.append(cluster_uuid
)
3819 elif k8sclustertype
== "helm-chart-v3":
3821 "_admin.helm_charts_v3_added." + item
: None
3822 for item
in del_repo_list
3825 "_admin.helm_charts_v3_added." + item
: name
3826 for item
, name
in added_repo_dict
.items()
3828 updated_v3_cluster_list
.append(cluster_uuid
)
3830 logging_text
+ "repos synchronized on k8s cluster "
3831 "'{}' to_delete: {}, to_add: {}".format(
3832 k8s_cluster_id
, del_repo_list
, added_repo_dict
3837 {"_id": k8s_cluster_id
},
3843 step
= "Instantiating KDU {}.{} in k8s cluster {}".format(
3844 vnfr_data
["member-vnf-index-ref"],
3848 k8s_instance_info
= {
3849 "kdu-instance": None,
3850 "k8scluster-uuid": cluster_uuid
,
3851 "k8scluster-type": k8sclustertype
,
3852 "member-vnf-index": vnfr_data
["member-vnf-index-ref"],
3853 "kdu-name": kdur
["kdu-name"],
3854 "kdu-model": kdumodel
,
3855 "namespace": namespace
,
3856 "kdu-deployment-name": kdu_deployment_name
,
3858 db_path
= "_admin.deployed.K8s.{}".format(index
)
3859 db_nsr_update
[db_path
] = k8s_instance_info
3860 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3861 vnfd_with_id
= find_in_list(
3862 db_vnfds
, lambda vnf
: vnf
["_id"] == vnfd_id
3864 task
= asyncio
.ensure_future(
3873 k8params
=desc_params
,
3878 self
.lcm_tasks
.register(
3882 "instantiate_KDU-{}".format(index
),
3885 task_instantiation_info
[task
] = "Deploying KDU {}".format(
3891 except (LcmException
, asyncio
.CancelledError
):
3893 except Exception as e
:
3894 msg
= "Exception {} while {}: {}".format(type(e
).__name
__, step
, e
)
3895 if isinstance(e
, (N2VCException
, DbException
)):
3896 self
.logger
.error(logging_text
+ msg
)
3898 self
.logger
.critical(logging_text
+ msg
, exc_info
=True)
3899 raise LcmException(msg
)
3902 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3922 task_instantiation_info
,
3925 # launch instantiate_N2VC in a asyncio task and register task object
3926 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3927 # if not found, create one entry and update database
3928 # fill db_nsr._admin.deployed.VCA.<index>
3931 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
3935 get_charm_name
= False
3936 if "execution-environment-list" in descriptor_config
:
3937 ee_list
= descriptor_config
.get("execution-environment-list", [])
3938 elif "juju" in descriptor_config
:
3939 ee_list
= [descriptor_config
] # ns charms
3940 if "execution-environment-list" not in descriptor_config
:
3941 # charm name is only required for ns charms
3942 get_charm_name
= True
3943 else: # other types as script are not supported
3946 for ee_item
in ee_list
:
3949 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3950 ee_item
.get("juju"), ee_item
.get("helm-chart")
3953 ee_descriptor_id
= ee_item
.get("id")
3954 if ee_item
.get("juju"):
3955 vca_name
= ee_item
["juju"].get("charm")
3957 charm_name
= self
.find_charm_name(db_nsr
, str(vca_name
))
3960 if ee_item
["juju"].get("charm") is not None
3963 if ee_item
["juju"].get("cloud") == "k8s":
3964 vca_type
= "k8s_proxy_charm"
3965 elif ee_item
["juju"].get("proxy") is False:
3966 vca_type
= "native_charm"
3967 elif ee_item
.get("helm-chart"):
3968 vca_name
= ee_item
["helm-chart"]
3969 if ee_item
.get("helm-version") and ee_item
.get("helm-version") == "v2":
3972 vca_type
= "helm-v3"
3975 logging_text
+ "skipping non juju neither charm configuration"
3980 for vca_index
, vca_deployed
in enumerate(
3981 db_nsr
["_admin"]["deployed"]["VCA"]
3983 if not vca_deployed
:
3986 vca_deployed
.get("member-vnf-index") == member_vnf_index
3987 and vca_deployed
.get("vdu_id") == vdu_id
3988 and vca_deployed
.get("kdu_name") == kdu_name
3989 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
3990 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
3994 # not found, create one.
3996 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
3999 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
4001 target
+= "/kdu/{}".format(kdu_name
)
4003 "target_element": target
,
4004 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4005 "member-vnf-index": member_vnf_index
,
4007 "kdu_name": kdu_name
,
4008 "vdu_count_index": vdu_index
,
4009 "operational-status": "init", # TODO revise
4010 "detailed-status": "", # TODO revise
4011 "step": "initial-deploy", # TODO revise
4013 "vdu_name": vdu_name
,
4015 "ee_descriptor_id": ee_descriptor_id
,
4016 "charm_name": charm_name
,
4020 # create VCA and configurationStatus in db
4022 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
4023 "configurationStatus.{}".format(vca_index
): dict(),
4025 self
.update_db_2("nsrs", nsr_id
, db_dict
)
4027 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
4029 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
4030 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
4031 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
4034 task_n2vc
= asyncio
.ensure_future(
4035 self
.instantiate_N2VC(
4036 logging_text
=logging_text
,
4037 vca_index
=vca_index
,
4043 vdu_index
=vdu_index
,
4044 kdu_index
=kdu_index
,
4045 deploy_params
=deploy_params
,
4046 config_descriptor
=descriptor_config
,
4047 base_folder
=base_folder
,
4048 nslcmop_id
=nslcmop_id
,
4052 ee_config_descriptor
=ee_item
,
4055 self
.lcm_tasks
.register(
4059 "instantiate_N2VC-{}".format(vca_index
),
4062 task_instantiation_info
[
4064 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
4065 member_vnf_index
or "", vdu_id
or ""
4069 def _create_nslcmop(nsr_id
, operation
, params
):
4071 Creates a ns-lcm-opp content to be stored at database.
4072 :param nsr_id: internal id of the instance
4073 :param operation: instantiate, terminate, scale, action, ...
4074 :param params: user parameters for the operation
4075 :return: dictionary following SOL005 format
4077 # Raise exception if invalid arguments
4078 if not (nsr_id
and operation
and params
):
4080 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
4087 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
4088 "operationState": "PROCESSING",
4089 "statusEnteredTime": now
,
4090 "nsInstanceId": nsr_id
,
4091 "lcmOperationType": operation
,
4093 "isAutomaticInvocation": False,
4094 "operationParams": params
,
4095 "isCancelPending": False,
4097 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id
,
4098 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id
,
4103 def _format_additional_params(self
, params
):
4104 params
= params
or {}
4105 for key
, value
in params
.items():
4106 if str(value
).startswith("!!yaml "):
4107 params
[key
] = yaml
.safe_load(value
[7:])
4110 def _get_terminate_primitive_params(self
, seq
, vnf_index
):
4111 primitive
= seq
.get("name")
4112 primitive_params
= {}
4114 "member_vnf_index": vnf_index
,
4115 "primitive": primitive
,
4116 "primitive_params": primitive_params
,
4119 return self
._map
_primitive
_params
(seq
, params
, desc_params
)
4123 def _retry_or_skip_suboperation(self
, db_nslcmop
, op_index
):
4124 op
= deep_get(db_nslcmop
, ("_admin", "operations"), [])[op_index
]
4125 if op
.get("operationState") == "COMPLETED":
4126 # b. Skip sub-operation
4127 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4128 return self
.SUBOPERATION_STATUS_SKIP
4130 # c. retry executing sub-operation
4131 # The sub-operation exists, and operationState != 'COMPLETED'
4132 # Update operationState = 'PROCESSING' to indicate a retry.
4133 operationState
= "PROCESSING"
4134 detailed_status
= "In progress"
4135 self
._update
_suboperation
_status
(
4136 db_nslcmop
, op_index
, operationState
, detailed_status
4138 # Return the sub-operation index
4139 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4140 # with arguments extracted from the sub-operation
4143 # Find a sub-operation where all keys in a matching dictionary must match
4144 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4145 def _find_suboperation(self
, db_nslcmop
, match
):
4146 if db_nslcmop
and match
:
4147 op_list
= db_nslcmop
.get("_admin", {}).get("operations", [])
4148 for i
, op
in enumerate(op_list
):
4149 if all(op
.get(k
) == match
[k
] for k
in match
):
4151 return self
.SUBOPERATION_STATUS_NOT_FOUND
4153 # Update status for a sub-operation given its index
4154 def _update_suboperation_status(
4155 self
, db_nslcmop
, op_index
, operationState
, detailed_status
4157 # Update DB for HA tasks
4158 q_filter
= {"_id": db_nslcmop
["_id"]}
4160 "_admin.operations.{}.operationState".format(op_index
): operationState
,
4161 "_admin.operations.{}.detailed-status".format(op_index
): detailed_status
,
4164 "nslcmops", q_filter
=q_filter
, update_dict
=update_dict
, fail_on_empty
=False
4167 # Add sub-operation, return the index of the added sub-operation
4168 # Optionally, set operationState, detailed-status, and operationType
4169 # Status and type are currently set for 'scale' sub-operations:
4170 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4171 # 'detailed-status' : status message
4172 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4173 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4174 def _add_suboperation(
4182 mapped_primitive_params
,
4183 operationState
=None,
4184 detailed_status
=None,
4187 RO_scaling_info
=None,
4190 return self
.SUBOPERATION_STATUS_NOT_FOUND
4191 # Get the "_admin.operations" list, if it exists
4192 db_nslcmop_admin
= db_nslcmop
.get("_admin", {})
4193 op_list
= db_nslcmop_admin
.get("operations")
4194 # Create or append to the "_admin.operations" list
4196 "member_vnf_index": vnf_index
,
4198 "vdu_count_index": vdu_count_index
,
4199 "primitive": primitive
,
4200 "primitive_params": mapped_primitive_params
,
4203 new_op
["operationState"] = operationState
4205 new_op
["detailed-status"] = detailed_status
4207 new_op
["lcmOperationType"] = operationType
4209 new_op
["RO_nsr_id"] = RO_nsr_id
4211 new_op
["RO_scaling_info"] = RO_scaling_info
4213 # No existing operations, create key 'operations' with current operation as first list element
4214 db_nslcmop_admin
.update({"operations": [new_op
]})
4215 op_list
= db_nslcmop_admin
.get("operations")
4217 # Existing operations, append operation to list
4218 op_list
.append(new_op
)
4220 db_nslcmop_update
= {"_admin.operations": op_list
}
4221 self
.update_db_2("nslcmops", db_nslcmop
["_id"], db_nslcmop_update
)
4222 op_index
= len(op_list
) - 1
4225 # Helper methods for scale() sub-operations
4227 # pre-scale/post-scale:
4228 # Check for 3 different cases:
4229 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4230 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4231 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4232 def _check_or_add_scale_suboperation(
4236 vnf_config_primitive
,
4240 RO_scaling_info
=None,
4242 # Find this sub-operation
4243 if RO_nsr_id
and RO_scaling_info
:
4244 operationType
= "SCALE-RO"
4246 "member_vnf_index": vnf_index
,
4247 "RO_nsr_id": RO_nsr_id
,
4248 "RO_scaling_info": RO_scaling_info
,
4252 "member_vnf_index": vnf_index
,
4253 "primitive": vnf_config_primitive
,
4254 "primitive_params": primitive_params
,
4255 "lcmOperationType": operationType
,
4257 op_index
= self
._find
_suboperation
(db_nslcmop
, match
)
4258 if op_index
== self
.SUBOPERATION_STATUS_NOT_FOUND
:
4259 # a. New sub-operation
4260 # The sub-operation does not exist, add it.
4261 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4262 # The following parameters are set to None for all kind of scaling:
4264 vdu_count_index
= None
4266 if RO_nsr_id
and RO_scaling_info
:
4267 vnf_config_primitive
= None
4268 primitive_params
= None
4271 RO_scaling_info
= None
4272 # Initial status for sub-operation
4273 operationState
= "PROCESSING"
4274 detailed_status
= "In progress"
4275 # Add sub-operation for pre/post-scaling (zero or more operations)
4276 self
._add
_suboperation
(
4282 vnf_config_primitive
,
4290 return self
.SUBOPERATION_STATUS_NEW
4292 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4293 # or op_index (operationState != 'COMPLETED')
4294 return self
._retry
_or
_skip
_suboperation
(db_nslcmop
, op_index
)
4296 # Function to return execution_environment id
4298 def _get_ee_id(self
, vnf_index
, vdu_id
, vca_deployed_list
):
4299 # TODO vdu_index_count
4300 for vca
in vca_deployed_list
:
4301 if vca
["member-vnf-index"] == vnf_index
and vca
["vdu_id"] == vdu_id
:
4304 async def destroy_N2VC(
4312 exec_primitives
=True,
4317 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4318 :param logging_text:
4320 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4321 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4322 :param vca_index: index in the database _admin.deployed.VCA
4323 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4324 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4325 not executed properly
4326 :param scaling_in: True destroys the application, False destroys the model
4327 :return: None or exception
4332 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4333 vca_index
, vca_deployed
, config_descriptor
, destroy_ee
4337 vca_type
= vca_deployed
.get("type", "lxc_proxy_charm")
4339 # execute terminate_primitives
4341 terminate_primitives
= get_ee_sorted_terminate_config_primitive_list(
4342 config_descriptor
.get("terminate-config-primitive"),
4343 vca_deployed
.get("ee_descriptor_id"),
4345 vdu_id
= vca_deployed
.get("vdu_id")
4346 vdu_count_index
= vca_deployed
.get("vdu_count_index")
4347 vdu_name
= vca_deployed
.get("vdu_name")
4348 vnf_index
= vca_deployed
.get("member-vnf-index")
4349 if terminate_primitives
and vca_deployed
.get("needed_terminate"):
4350 for seq
in terminate_primitives
:
4351 # For each sequence in list, get primitive and call _ns_execute_primitive()
4352 step
= "Calling terminate action for vnf_member_index={} primitive={}".format(
4353 vnf_index
, seq
.get("name")
4355 self
.logger
.debug(logging_text
+ step
)
4356 # Create the primitive for each sequence, i.e. "primitive": "touch"
4357 primitive
= seq
.get("name")
4358 mapped_primitive_params
= self
._get
_terminate
_primitive
_params
(
4363 self
._add
_suboperation
(
4370 mapped_primitive_params
,
4372 # Sub-operations: Call _ns_execute_primitive() instead of action()
4374 result
, result_detail
= await self
._ns
_execute
_primitive
(
4375 vca_deployed
["ee_id"],
4377 mapped_primitive_params
,
4381 except LcmException
:
4382 # this happens when VCA is not deployed. In this case it is not needed to terminate
4384 result_ok
= ["COMPLETED", "PARTIALLY_COMPLETED"]
4385 if result
not in result_ok
:
4387 "terminate_primitive {} for vnf_member_index={} fails with "
4388 "error {}".format(seq
.get("name"), vnf_index
, result_detail
)
4390 # set that this VCA do not need terminated
4391 db_update_entry
= "_admin.deployed.VCA.{}.needed_terminate".format(
4395 "nsrs", db_nslcmop
["nsInstanceId"], {db_update_entry
: False}
4398 # Delete Prometheus Jobs if any
4399 # This uses NSR_ID, so it will destroy any jobs under this index
4400 self
.db
.del_list("prometheus_jobs", {"nsr_id": db_nslcmop
["nsInstanceId"]})
4403 await self
.vca_map
[vca_type
].delete_execution_environment(
4404 vca_deployed
["ee_id"],
4405 scaling_in
=scaling_in
,
4410 async def _delete_all_N2VC(self
, db_nsr
: dict, vca_id
: str = None):
4411 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="TERMINATING")
4412 namespace
= "." + db_nsr
["_id"]
4414 await self
.n2vc
.delete_namespace(
4415 namespace
=namespace
,
4416 total_timeout
=self
.timeout
.charm_delete
,
4419 except N2VCNotFound
: # already deleted. Skip
4421 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="DELETED")
4423 async def terminate(self
, nsr_id
, nslcmop_id
):
4424 # Try to lock HA task here
4425 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
4426 if not task_is_locked_by_me
:
4429 logging_text
= "Task ns={} terminate={} ".format(nsr_id
, nslcmop_id
)
4430 self
.logger
.debug(logging_text
+ "Enter")
4431 timeout_ns_terminate
= self
.timeout
.ns_terminate
4434 operation_params
= None
4436 error_list
= [] # annotates all failed error messages
4437 db_nslcmop_update
= {}
4438 autoremove
= False # autoremove after terminated
4439 tasks_dict_info
= {}
4442 "Stage 1/3: Preparing task.",
4443 "Waiting for previous operations to terminate.",
4446 # ^ contains [stage, step, VIM-status]
4448 # wait for any previous tasks in process
4449 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
4451 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
4452 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
4453 operation_params
= db_nslcmop
.get("operationParams") or {}
4454 if operation_params
.get("timeout_ns_terminate"):
4455 timeout_ns_terminate
= operation_params
["timeout_ns_terminate"]
4456 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
4457 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
4459 db_nsr_update
["operational-status"] = "terminating"
4460 db_nsr_update
["config-status"] = "terminating"
4461 self
._write
_ns
_status
(
4463 ns_state
="TERMINATING",
4464 current_operation
="TERMINATING",
4465 current_operation_id
=nslcmop_id
,
4466 other_update
=db_nsr_update
,
4468 self
._write
_op
_status
(op_id
=nslcmop_id
, queuePosition
=0, stage
=stage
)
4469 nsr_deployed
= deepcopy(db_nsr
["_admin"].get("deployed")) or {}
4470 if db_nsr
["_admin"]["nsState"] == "NOT_INSTANTIATED":
4473 stage
[1] = "Getting vnf descriptors from db."
4474 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
4476 db_vnfr
["member-vnf-index-ref"]: db_vnfr
for db_vnfr
in db_vnfrs_list
4478 db_vnfds_from_id
= {}
4479 db_vnfds_from_member_index
= {}
4481 for vnfr
in db_vnfrs_list
:
4482 vnfd_id
= vnfr
["vnfd-id"]
4483 if vnfd_id
not in db_vnfds_from_id
:
4484 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
4485 db_vnfds_from_id
[vnfd_id
] = vnfd
4486 db_vnfds_from_member_index
[
4487 vnfr
["member-vnf-index-ref"]
4488 ] = db_vnfds_from_id
[vnfd_id
]
4490 # Destroy individual execution environments when there are terminating primitives.
4491 # Rest of EE will be deleted at once
4492 # TODO - check before calling _destroy_N2VC
4493 # if not operation_params.get("skip_terminate_primitives"):#
4494 # or not vca.get("needed_terminate"):
4495 stage
[0] = "Stage 2/3 execute terminating primitives."
4496 self
.logger
.debug(logging_text
+ stage
[0])
4497 stage
[1] = "Looking execution environment that needs terminate."
4498 self
.logger
.debug(logging_text
+ stage
[1])
4500 for vca_index
, vca
in enumerate(get_iterable(nsr_deployed
, "VCA")):
4501 config_descriptor
= None
4502 vca_member_vnf_index
= vca
.get("member-vnf-index")
4503 vca_id
= self
.get_vca_id(
4504 db_vnfrs_dict
.get(vca_member_vnf_index
)
4505 if vca_member_vnf_index
4509 if not vca
or not vca
.get("ee_id"):
4511 if not vca
.get("member-vnf-index"):
4513 config_descriptor
= db_nsr
.get("ns-configuration")
4514 elif vca
.get("vdu_id"):
4515 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4516 config_descriptor
= get_configuration(db_vnfd
, vca
.get("vdu_id"))
4517 elif vca
.get("kdu_name"):
4518 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4519 config_descriptor
= get_configuration(db_vnfd
, vca
.get("kdu_name"))
4521 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4522 config_descriptor
= get_configuration(db_vnfd
, db_vnfd
["id"])
4523 vca_type
= vca
.get("type")
4524 exec_terminate_primitives
= not operation_params
.get(
4525 "skip_terminate_primitives"
4526 ) and vca
.get("needed_terminate")
4527 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4528 # pending native charms
4530 True if vca_type
in ("helm", "helm-v3", "native_charm") else False
4532 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4533 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4534 task
= asyncio
.ensure_future(
4542 exec_terminate_primitives
,
4546 tasks_dict_info
[task
] = "Terminating VCA {}".format(vca
.get("ee_id"))
4548 # wait for pending tasks of terminate primitives
4552 + "Waiting for tasks {}".format(list(tasks_dict_info
.keys()))
4554 error_list
= await self
._wait
_for
_tasks
(
4557 min(self
.timeout
.charm_delete
, timeout_ns_terminate
),
4561 tasks_dict_info
.clear()
4563 return # raise LcmException("; ".join(error_list))
4565 # remove All execution environments at once
4566 stage
[0] = "Stage 3/3 delete all."
4568 if nsr_deployed
.get("VCA"):
4569 stage
[1] = "Deleting all execution environments."
4570 self
.logger
.debug(logging_text
+ stage
[1])
4571 vca_id
= self
.get_vca_id({}, db_nsr
)
4572 task_delete_ee
= asyncio
.ensure_future(
4574 self
._delete
_all
_N
2VC
(db_nsr
=db_nsr
, vca_id
=vca_id
),
4575 timeout
=self
.timeout
.charm_delete
,
4578 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4579 tasks_dict_info
[task_delete_ee
] = "Terminating all VCA"
4581 # Delete Namespace and Certificates if necessary
4582 if check_helm_ee_in_ns(list(db_vnfds_from_member_index
.values())):
4583 await self
.vca_map
["helm-v3"].delete_tls_certificate(
4584 certificate_name
=db_nslcmop
["nsInstanceId"],
4586 # TODO: Delete namespace
4588 # Delete from k8scluster
4589 stage
[1] = "Deleting KDUs."
4590 self
.logger
.debug(logging_text
+ stage
[1])
4591 # print(nsr_deployed)
4592 for kdu
in get_iterable(nsr_deployed
, "K8s"):
4593 if not kdu
or not kdu
.get("kdu-instance"):
4595 kdu_instance
= kdu
.get("kdu-instance")
4596 if kdu
.get("k8scluster-type") in self
.k8scluster_map
:
4597 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4598 vca_id
= self
.get_vca_id({}, db_nsr
)
4599 task_delete_kdu_instance
= asyncio
.ensure_future(
4600 self
.k8scluster_map
[kdu
["k8scluster-type"]].uninstall(
4601 cluster_uuid
=kdu
.get("k8scluster-uuid"),
4602 kdu_instance
=kdu_instance
,
4604 namespace
=kdu
.get("namespace"),
4610 + "Unknown k8s deployment type {}".format(
4611 kdu
.get("k8scluster-type")
4616 task_delete_kdu_instance
4617 ] = "Terminating KDU '{}'".format(kdu
.get("kdu-name"))
4620 stage
[1] = "Deleting ns from VIM."
4621 if self
.ro_config
.ng
:
4622 task_delete_ro
= asyncio
.ensure_future(
4623 self
._terminate
_ng
_ro
(
4624 logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
4627 tasks_dict_info
[task_delete_ro
] = "Removing deployment from VIM"
4629 # rest of staff will be done at finally
4632 ROclient
.ROClientException
,
4637 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
4639 except asyncio
.CancelledError
:
4641 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
4643 exc
= "Operation was cancelled"
4644 except Exception as e
:
4645 exc
= traceback
.format_exc()
4646 self
.logger
.critical(
4647 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
4652 error_list
.append(str(exc
))
4654 # wait for pending tasks
4656 stage
[1] = "Waiting for terminate pending tasks."
4657 self
.logger
.debug(logging_text
+ stage
[1])
4658 error_list
+= await self
._wait
_for
_tasks
(
4661 timeout_ns_terminate
,
4665 stage
[1] = stage
[2] = ""
4666 except asyncio
.CancelledError
:
4667 error_list
.append("Cancelled")
4668 # TODO cancell all tasks
4669 except Exception as exc
:
4670 error_list
.append(str(exc
))
4671 # update status at database
4673 error_detail
= "; ".join(error_list
)
4674 # self.logger.error(logging_text + error_detail)
4675 error_description_nslcmop
= "{} Detail: {}".format(
4676 stage
[0], error_detail
4678 error_description_nsr
= "Operation: TERMINATING.{}, {}.".format(
4679 nslcmop_id
, stage
[0]
4682 db_nsr_update
["operational-status"] = "failed"
4683 db_nsr_update
["detailed-status"] = (
4684 error_description_nsr
+ " Detail: " + error_detail
4686 db_nslcmop_update
["detailed-status"] = error_detail
4687 nslcmop_operation_state
= "FAILED"
4691 error_description_nsr
= error_description_nslcmop
= None
4692 ns_state
= "NOT_INSTANTIATED"
4693 db_nsr_update
["operational-status"] = "terminated"
4694 db_nsr_update
["detailed-status"] = "Done"
4695 db_nsr_update
["_admin.nsState"] = "NOT_INSTANTIATED"
4696 db_nslcmop_update
["detailed-status"] = "Done"
4697 nslcmop_operation_state
= "COMPLETED"
4700 self
._write
_ns
_status
(
4703 current_operation
="IDLE",
4704 current_operation_id
=None,
4705 error_description
=error_description_nsr
,
4706 error_detail
=error_detail
,
4707 other_update
=db_nsr_update
,
4709 self
._write
_op
_status
(
4712 error_message
=error_description_nslcmop
,
4713 operation_state
=nslcmop_operation_state
,
4714 other_update
=db_nslcmop_update
,
4716 if ns_state
== "NOT_INSTANTIATED":
4720 {"nsr-id-ref": nsr_id
},
4721 {"_admin.nsState": "NOT_INSTANTIATED"},
4723 except DbException
as e
:
4726 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4730 if operation_params
:
4731 autoremove
= operation_params
.get("autoremove", False)
4732 if nslcmop_operation_state
:
4734 await self
.msg
.aiowrite(
4739 "nslcmop_id": nslcmop_id
,
4740 "operationState": nslcmop_operation_state
,
4741 "autoremove": autoremove
,
4745 except Exception as e
:
4747 logging_text
+ "kafka_write notification Exception {}".format(e
)
4749 self
.logger
.debug(f
"Deleting alerts: ns_id={nsr_id}")
4750 self
.db
.del_list("alerts", {"tags.ns_id": nsr_id
})
4752 self
.logger
.debug(logging_text
+ "Exit")
4753 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_terminate")
4755 async def _wait_for_tasks(
4756 self
, logging_text
, created_tasks_info
, timeout
, stage
, nslcmop_id
, nsr_id
=None
4759 error_detail_list
= []
4761 pending_tasks
= list(created_tasks_info
.keys())
4762 num_tasks
= len(pending_tasks
)
4764 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4765 self
._write
_op
_status
(nslcmop_id
, stage
)
4766 while pending_tasks
:
4768 _timeout
= timeout
+ time_start
- time()
4769 done
, pending_tasks
= await asyncio
.wait(
4770 pending_tasks
, timeout
=_timeout
, return_when
=asyncio
.FIRST_COMPLETED
4772 num_done
+= len(done
)
4773 if not done
: # Timeout
4774 for task
in pending_tasks
:
4775 new_error
= created_tasks_info
[task
] + ": Timeout"
4776 error_detail_list
.append(new_error
)
4777 error_list
.append(new_error
)
4780 if task
.cancelled():
4783 exc
= task
.exception()
4785 if isinstance(exc
, asyncio
.TimeoutError
):
4787 new_error
= created_tasks_info
[task
] + ": {}".format(exc
)
4788 error_list
.append(created_tasks_info
[task
])
4789 error_detail_list
.append(new_error
)
4796 ROclient
.ROClientException
,
4802 self
.logger
.error(logging_text
+ new_error
)
4804 exc_traceback
= "".join(
4805 traceback
.format_exception(None, exc
, exc
.__traceback
__)
4809 + created_tasks_info
[task
]
4815 logging_text
+ created_tasks_info
[task
] + ": Done"
4817 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4819 stage
[1] += " Errors: " + ". ".join(error_detail_list
) + "."
4820 if nsr_id
: # update also nsr
4825 "errorDescription": "Error at: " + ", ".join(error_list
),
4826 "errorDetail": ". ".join(error_detail_list
),
4829 self
._write
_op
_status
(nslcmop_id
, stage
)
4830 return error_detail_list
4833 def _map_primitive_params(primitive_desc
, params
, instantiation_params
):
4835 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4836 The default-value is used. If it is between < > it look for a value at instantiation_params
4837 :param primitive_desc: portion of VNFD/NSD that describes primitive
4838 :param params: Params provided by user
4839 :param instantiation_params: Instantiation params provided by user
4840 :return: a dictionary with the calculated params
4842 calculated_params
= {}
4843 for parameter
in primitive_desc
.get("parameter", ()):
4844 param_name
= parameter
["name"]
4845 if param_name
in params
:
4846 calculated_params
[param_name
] = params
[param_name
]
4847 elif "default-value" in parameter
or "value" in parameter
:
4848 if "value" in parameter
:
4849 calculated_params
[param_name
] = parameter
["value"]
4851 calculated_params
[param_name
] = parameter
["default-value"]
4853 isinstance(calculated_params
[param_name
], str)
4854 and calculated_params
[param_name
].startswith("<")
4855 and calculated_params
[param_name
].endswith(">")
4857 if calculated_params
[param_name
][1:-1] in instantiation_params
:
4858 calculated_params
[param_name
] = instantiation_params
[
4859 calculated_params
[param_name
][1:-1]
4863 "Parameter {} needed to execute primitive {} not provided".format(
4864 calculated_params
[param_name
], primitive_desc
["name"]
4869 "Parameter {} needed to execute primitive {} not provided".format(
4870 param_name
, primitive_desc
["name"]
4874 if isinstance(calculated_params
[param_name
], (dict, list, tuple)):
4875 calculated_params
[param_name
] = yaml
.safe_dump(
4876 calculated_params
[param_name
], default_flow_style
=True, width
=256
4878 elif isinstance(calculated_params
[param_name
], str) and calculated_params
[
4880 ].startswith("!!yaml "):
4881 calculated_params
[param_name
] = calculated_params
[param_name
][7:]
4882 if parameter
.get("data-type") == "INTEGER":
4884 calculated_params
[param_name
] = int(calculated_params
[param_name
])
4885 except ValueError: # error converting string to int
4887 "Parameter {} of primitive {} must be integer".format(
4888 param_name
, primitive_desc
["name"]
4891 elif parameter
.get("data-type") == "BOOLEAN":
4892 calculated_params
[param_name
] = not (
4893 (str(calculated_params
[param_name
])).lower() == "false"
4896 # add always ns_config_info if primitive name is config
4897 if primitive_desc
["name"] == "config":
4898 if "ns_config_info" in instantiation_params
:
4899 calculated_params
["ns_config_info"] = instantiation_params
[
4902 return calculated_params
4904 def _look_for_deployed_vca(
4911 ee_descriptor_id
=None,
4913 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4914 for vca
in deployed_vca
:
4917 if member_vnf_index
!= vca
["member-vnf-index"] or vdu_id
!= vca
["vdu_id"]:
4920 vdu_count_index
is not None
4921 and vdu_count_index
!= vca
["vdu_count_index"]
4924 if kdu_name
and kdu_name
!= vca
["kdu_name"]:
4926 if ee_descriptor_id
and ee_descriptor_id
!= vca
["ee_descriptor_id"]:
4930 # vca_deployed not found
4932 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4933 " is not deployed".format(
4942 ee_id
= vca
.get("ee_id")
4944 "type", "lxc_proxy_charm"
4945 ) # default value for backward compatibility - proxy charm
4948 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4949 "execution environment".format(
4950 member_vnf_index
, vdu_id
, kdu_name
, vdu_count_index
4953 return ee_id
, vca_type
4955 async def _ns_execute_primitive(
4961 retries_interval
=30,
4968 if primitive
== "config":
4969 primitive_params
= {"params": primitive_params
}
4971 vca_type
= vca_type
or "lxc_proxy_charm"
4975 output
= await asyncio
.wait_for(
4976 self
.vca_map
[vca_type
].exec_primitive(
4978 primitive_name
=primitive
,
4979 params_dict
=primitive_params
,
4980 progress_timeout
=self
.timeout
.progress_primitive
,
4981 total_timeout
=self
.timeout
.primitive
,
4986 timeout
=timeout
or self
.timeout
.primitive
,
4990 except asyncio
.CancelledError
:
4992 except Exception as e
:
4996 "Error executing action {} on {} -> {}".format(
5001 await asyncio
.sleep(retries_interval
, loop
=self
.loop
)
5003 if isinstance(e
, asyncio
.TimeoutError
):
5005 message
="Timed out waiting for action to complete"
5007 return "FAILED", getattr(e
, "message", repr(e
))
5009 return "COMPLETED", output
5011 except (LcmException
, asyncio
.CancelledError
):
5013 except Exception as e
:
5014 return "FAIL", "Error executing action {}: {}".format(primitive
, e
)
5016 async def vca_status_refresh(self
, nsr_id
, nslcmop_id
):
5018 Updating the vca_status with latest juju information in nsrs record
5019 :param: nsr_id: Id of the nsr
5020 :param: nslcmop_id: Id of the nslcmop
5024 self
.logger
.debug("Task ns={} action={} Enter".format(nsr_id
, nslcmop_id
))
5025 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5026 vca_id
= self
.get_vca_id({}, db_nsr
)
5027 if db_nsr
["_admin"]["deployed"]["K8s"]:
5028 for _
, k8s
in enumerate(db_nsr
["_admin"]["deployed"]["K8s"]):
5029 cluster_uuid
, kdu_instance
, cluster_type
= (
5030 k8s
["k8scluster-uuid"],
5031 k8s
["kdu-instance"],
5032 k8s
["k8scluster-type"],
5034 await self
._on
_update
_k
8s
_db
(
5035 cluster_uuid
=cluster_uuid
,
5036 kdu_instance
=kdu_instance
,
5037 filter={"_id": nsr_id
},
5039 cluster_type
=cluster_type
,
5042 for vca_index
, _
in enumerate(db_nsr
["_admin"]["deployed"]["VCA"]):
5043 table
, filter = "nsrs", {"_id": nsr_id
}
5044 path
= "_admin.deployed.VCA.{}.".format(vca_index
)
5045 await self
._on
_update
_n
2vc
_db
(table
, filter, path
, {})
5047 self
.logger
.debug("Task ns={} action={} Exit".format(nsr_id
, nslcmop_id
))
5048 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_vca_status_refresh")
5050 async def action(self
, nsr_id
, nslcmop_id
):
5051 # Try to lock HA task here
5052 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5053 if not task_is_locked_by_me
:
5056 logging_text
= "Task ns={} action={} ".format(nsr_id
, nslcmop_id
)
5057 self
.logger
.debug(logging_text
+ "Enter")
5058 # get all needed from database
5062 db_nslcmop_update
= {}
5063 nslcmop_operation_state
= None
5064 error_description_nslcmop
= None
5068 # wait for any previous tasks in process
5069 step
= "Waiting for previous operations to terminate"
5070 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5072 self
._write
_ns
_status
(
5075 current_operation
="RUNNING ACTION",
5076 current_operation_id
=nslcmop_id
,
5079 step
= "Getting information from database"
5080 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5081 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5082 if db_nslcmop
["operationParams"].get("primitive_params"):
5083 db_nslcmop
["operationParams"]["primitive_params"] = json
.loads(
5084 db_nslcmop
["operationParams"]["primitive_params"]
5087 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5088 vnf_index
= db_nslcmop
["operationParams"].get("member_vnf_index")
5089 vdu_id
= db_nslcmop
["operationParams"].get("vdu_id")
5090 kdu_name
= db_nslcmop
["operationParams"].get("kdu_name")
5091 vdu_count_index
= db_nslcmop
["operationParams"].get("vdu_count_index")
5092 primitive
= db_nslcmop
["operationParams"]["primitive"]
5093 primitive_params
= db_nslcmop
["operationParams"]["primitive_params"]
5094 timeout_ns_action
= db_nslcmop
["operationParams"].get(
5095 "timeout_ns_action", self
.timeout
.primitive
5099 step
= "Getting vnfr from database"
5100 db_vnfr
= self
.db
.get_one(
5101 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
5103 if db_vnfr
.get("kdur"):
5105 for kdur
in db_vnfr
["kdur"]:
5106 if kdur
.get("additionalParams"):
5107 kdur
["additionalParams"] = json
.loads(
5108 kdur
["additionalParams"]
5110 kdur_list
.append(kdur
)
5111 db_vnfr
["kdur"] = kdur_list
5112 step
= "Getting vnfd from database"
5113 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
5115 # Sync filesystem before running a primitive
5116 self
.fs
.sync(db_vnfr
["vnfd-id"])
5118 step
= "Getting nsd from database"
5119 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
5121 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
5122 # for backward compatibility
5123 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
5124 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
5125 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
5126 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5128 # look for primitive
5129 config_primitive_desc
= descriptor_configuration
= None
5131 descriptor_configuration
= get_configuration(db_vnfd
, vdu_id
)
5133 descriptor_configuration
= get_configuration(db_vnfd
, kdu_name
)
5135 descriptor_configuration
= get_configuration(db_vnfd
, db_vnfd
["id"])
5137 descriptor_configuration
= db_nsd
.get("ns-configuration")
5139 if descriptor_configuration
and descriptor_configuration
.get(
5142 for config_primitive
in descriptor_configuration
["config-primitive"]:
5143 if config_primitive
["name"] == primitive
:
5144 config_primitive_desc
= config_primitive
5147 if not config_primitive_desc
:
5148 if not (kdu_name
and primitive
in ("upgrade", "rollback", "status")):
5150 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5154 primitive_name
= primitive
5155 ee_descriptor_id
= None
5157 primitive_name
= config_primitive_desc
.get(
5158 "execution-environment-primitive", primitive
5160 ee_descriptor_id
= config_primitive_desc
.get(
5161 "execution-environment-ref"
5167 (x
for x
in db_vnfr
["vdur"] if x
["vdu-id-ref"] == vdu_id
), None
5169 desc_params
= parse_yaml_strings(vdur
.get("additionalParams"))
5172 (x
for x
in db_vnfr
["kdur"] if x
["kdu-name"] == kdu_name
), None
5174 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
5176 desc_params
= parse_yaml_strings(
5177 db_vnfr
.get("additionalParamsForVnf")
5180 desc_params
= parse_yaml_strings(db_nsr
.get("additionalParamsForNs"))
5181 if kdu_name
and get_configuration(db_vnfd
, kdu_name
):
5182 kdu_configuration
= get_configuration(db_vnfd
, kdu_name
)
5184 for primitive
in kdu_configuration
.get("initial-config-primitive", []):
5185 actions
.add(primitive
["name"])
5186 for primitive
in kdu_configuration
.get("config-primitive", []):
5187 actions
.add(primitive
["name"])
5189 nsr_deployed
["K8s"],
5190 lambda kdu
: kdu_name
== kdu
["kdu-name"]
5191 and kdu
["member-vnf-index"] == vnf_index
,
5195 if primitive_name
in actions
5196 and kdu
["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5200 # TODO check if ns is in a proper status
5202 primitive_name
in ("upgrade", "rollback", "status") or kdu_action
5204 # kdur and desc_params already set from before
5205 if primitive_params
:
5206 desc_params
.update(primitive_params
)
5207 # TODO Check if we will need something at vnf level
5208 for index
, kdu
in enumerate(get_iterable(nsr_deployed
, "K8s")):
5210 kdu_name
== kdu
["kdu-name"]
5211 and kdu
["member-vnf-index"] == vnf_index
5216 "KDU '{}' for vnf '{}' not deployed".format(kdu_name
, vnf_index
)
5219 if kdu
.get("k8scluster-type") not in self
.k8scluster_map
:
5220 msg
= "unknown k8scluster-type '{}'".format(
5221 kdu
.get("k8scluster-type")
5223 raise LcmException(msg
)
5226 "collection": "nsrs",
5227 "filter": {"_id": nsr_id
},
5228 "path": "_admin.deployed.K8s.{}".format(index
),
5232 + "Exec k8s {} on {}.{}".format(primitive_name
, vnf_index
, kdu_name
)
5234 step
= "Executing kdu {}".format(primitive_name
)
5235 if primitive_name
== "upgrade":
5236 if desc_params
.get("kdu_model"):
5237 kdu_model
= desc_params
.get("kdu_model")
5238 del desc_params
["kdu_model"]
5240 kdu_model
= kdu
.get("kdu-model")
5241 if kdu_model
.count("/") < 2: # helm chart is not embedded
5242 parts
= kdu_model
.split(sep
=":")
5244 kdu_model
= parts
[0]
5245 if desc_params
.get("kdu_atomic_upgrade"):
5246 atomic_upgrade
= desc_params
.get(
5247 "kdu_atomic_upgrade"
5248 ).lower() in ("yes", "true", "1")
5249 del desc_params
["kdu_atomic_upgrade"]
5251 atomic_upgrade
= True
5253 detailed_status
= await asyncio
.wait_for(
5254 self
.k8scluster_map
[kdu
["k8scluster-type"]].upgrade(
5255 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5256 kdu_instance
=kdu
.get("kdu-instance"),
5257 atomic
=atomic_upgrade
,
5258 kdu_model
=kdu_model
,
5261 timeout
=timeout_ns_action
,
5263 timeout
=timeout_ns_action
+ 10,
5266 logging_text
+ " Upgrade of kdu {} done".format(detailed_status
)
5268 elif primitive_name
== "rollback":
5269 detailed_status
= await asyncio
.wait_for(
5270 self
.k8scluster_map
[kdu
["k8scluster-type"]].rollback(
5271 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5272 kdu_instance
=kdu
.get("kdu-instance"),
5275 timeout
=timeout_ns_action
,
5277 elif primitive_name
== "status":
5278 detailed_status
= await asyncio
.wait_for(
5279 self
.k8scluster_map
[kdu
["k8scluster-type"]].status_kdu(
5280 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5281 kdu_instance
=kdu
.get("kdu-instance"),
5284 timeout
=timeout_ns_action
,
5287 kdu_instance
= kdu
.get("kdu-instance") or "{}-{}".format(
5288 kdu
["kdu-name"], nsr_id
5290 params
= self
._map
_primitive
_params
(
5291 config_primitive_desc
, primitive_params
, desc_params
5294 detailed_status
= await asyncio
.wait_for(
5295 self
.k8scluster_map
[kdu
["k8scluster-type"]].exec_primitive(
5296 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5297 kdu_instance
=kdu_instance
,
5298 primitive_name
=primitive_name
,
5301 timeout
=timeout_ns_action
,
5304 timeout
=timeout_ns_action
,
5308 nslcmop_operation_state
= "COMPLETED"
5310 detailed_status
= ""
5311 nslcmop_operation_state
= "FAILED"
5313 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
5314 nsr_deployed
["VCA"],
5315 member_vnf_index
=vnf_index
,
5317 vdu_count_index
=vdu_count_index
,
5318 ee_descriptor_id
=ee_descriptor_id
,
5320 for vca_index
, vca_deployed
in enumerate(
5321 db_nsr
["_admin"]["deployed"]["VCA"]
5323 if vca_deployed
.get("member-vnf-index") == vnf_index
:
5325 "collection": "nsrs",
5326 "filter": {"_id": nsr_id
},
5327 "path": "_admin.deployed.VCA.{}.".format(vca_index
),
5331 nslcmop_operation_state
,
5333 ) = await self
._ns
_execute
_primitive
(
5335 primitive
=primitive_name
,
5336 primitive_params
=self
._map
_primitive
_params
(
5337 config_primitive_desc
, primitive_params
, desc_params
5339 timeout
=timeout_ns_action
,
5345 db_nslcmop_update
["detailed-status"] = detailed_status
5346 error_description_nslcmop
= (
5347 detailed_status
if nslcmop_operation_state
== "FAILED" else ""
5351 + "Done with result {} {}".format(
5352 nslcmop_operation_state
, detailed_status
5355 return # database update is called inside finally
5357 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
5358 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
5360 except asyncio
.CancelledError
:
5362 logging_text
+ "Cancelled Exception while '{}'".format(step
)
5364 exc
= "Operation was cancelled"
5365 except asyncio
.TimeoutError
:
5366 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
5368 except Exception as e
:
5369 exc
= traceback
.format_exc()
5370 self
.logger
.critical(
5371 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
5380 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
5381 nslcmop_operation_state
= "FAILED"
5383 self
._write
_ns
_status
(
5387 ], # TODO check if degraded. For the moment use previous status
5388 current_operation
="IDLE",
5389 current_operation_id
=None,
5390 # error_description=error_description_nsr,
5391 # error_detail=error_detail,
5392 other_update
=db_nsr_update
,
5395 self
._write
_op
_status
(
5398 error_message
=error_description_nslcmop
,
5399 operation_state
=nslcmop_operation_state
,
5400 other_update
=db_nslcmop_update
,
5403 if nslcmop_operation_state
:
5405 await self
.msg
.aiowrite(
5410 "nslcmop_id": nslcmop_id
,
5411 "operationState": nslcmop_operation_state
,
5415 except Exception as e
:
5417 logging_text
+ "kafka_write notification Exception {}".format(e
)
5419 self
.logger
.debug(logging_text
+ "Exit")
5420 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_action")
5421 return nslcmop_operation_state
, detailed_status
5423 async def terminate_vdus(
5424 self
, db_vnfr
, member_vnf_index
, db_nsr
, update_db_nslcmops
, stage
, logging_text
5426 """This method terminates VDUs
5429 db_vnfr: VNF instance record
5430 member_vnf_index: VNF index to identify the VDUs to be removed
5431 db_nsr: NS instance record
5432 update_db_nslcmops: Nslcmop update record
5434 vca_scaling_info
= []
5435 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5436 scaling_info
["scaling_direction"] = "IN"
5437 scaling_info
["vdu-delete"] = {}
5438 scaling_info
["kdu-delete"] = {}
5439 db_vdur
= db_vnfr
.get("vdur")
5440 vdur_list
= copy(db_vdur
)
5442 for index
, vdu
in enumerate(vdur_list
):
5443 vca_scaling_info
.append(
5445 "osm_vdu_id": vdu
["vdu-id-ref"],
5446 "member-vnf-index": member_vnf_index
,
5448 "vdu_index": count_index
,
5451 scaling_info
["vdu-delete"][vdu
["vdu-id-ref"]] = count_index
5452 scaling_info
["vdu"].append(
5454 "name": vdu
.get("name") or vdu
.get("vdu-name"),
5455 "vdu_id": vdu
["vdu-id-ref"],
5459 for interface
in vdu
["interfaces"]:
5460 scaling_info
["vdu"][index
]["interface"].append(
5462 "name": interface
["name"],
5463 "ip_address": interface
["ip-address"],
5464 "mac_address": interface
.get("mac-address"),
5467 self
.logger
.info("NS update scaling info{}".format(scaling_info
))
5468 stage
[2] = "Terminating VDUs"
5469 if scaling_info
.get("vdu-delete"):
5470 # scale_process = "RO"
5471 if self
.ro_config
.ng
:
5472 await self
._scale
_ng
_ro
(
5481 async def remove_vnf(self
, nsr_id
, nslcmop_id
, vnf_instance_id
):
5482 """This method is to Remove VNF instances from NS.
5485 nsr_id: NS instance id
5486 nslcmop_id: nslcmop id of update
5487 vnf_instance_id: id of the VNF instance to be removed
5490 result: (str, str) COMPLETED/FAILED, details
5494 logging_text
= "Task ns={} update ".format(nsr_id
)
5495 check_vnfr_count
= len(self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}))
5496 self
.logger
.info("check_vnfr_count {}".format(check_vnfr_count
))
5497 if check_vnfr_count
> 1:
5498 stage
= ["", "", ""]
5499 step
= "Getting nslcmop from database"
5501 step
+ " after having waited for previous tasks to be completed"
5503 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5504 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5505 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
5506 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5507 """ db_vnfr = self.db.get_one(
5508 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5510 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5511 await self
.terminate_vdus(
5520 constituent_vnfr
= db_nsr
.get("constituent-vnfr-ref")
5521 constituent_vnfr
.remove(db_vnfr
.get("_id"))
5522 db_nsr_update
["constituent-vnfr-ref"] = db_nsr
.get(
5523 "constituent-vnfr-ref"
5525 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5526 self
.db
.del_one("vnfrs", {"_id": db_vnfr
.get("_id")})
5527 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5528 return "COMPLETED", "Done"
5530 step
= "Terminate VNF Failed with"
5532 "{} Cannot terminate the last VNF in this NS.".format(
5536 except (LcmException
, asyncio
.CancelledError
):
5538 except Exception as e
:
5539 self
.logger
.debug("Error removing VNF {}".format(e
))
5540 return "FAILED", "Error removing VNF {}".format(e
)
5542 async def _ns_redeploy_vnf(
5550 """This method updates and redeploys VNF instances
5553 nsr_id: NS instance id
5554 nslcmop_id: nslcmop id
5555 db_vnfd: VNF descriptor
5556 db_vnfr: VNF instance record
5557 db_nsr: NS instance record
5560 result: (str, str) COMPLETED/FAILED, details
5564 stage
= ["", "", ""]
5565 logging_text
= "Task ns={} update ".format(nsr_id
)
5566 latest_vnfd_revision
= db_vnfd
["_admin"].get("revision")
5567 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5569 # Terminate old VNF resources
5570 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5571 await self
.terminate_vdus(
5580 # old_vnfd_id = db_vnfr["vnfd-id"]
5581 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5582 new_db_vnfd
= db_vnfd
5583 # new_vnfd_ref = new_db_vnfd["id"]
5584 # new_vnfd_id = vnfd_id
5588 for cp
in new_db_vnfd
.get("ext-cpd", ()):
5590 "name": cp
.get("id"),
5591 "connection-point-id": cp
.get("int-cpd", {}).get("cpd"),
5592 "connection-point-vdu-id": cp
.get("int-cpd", {}).get("vdu-id"),
5595 new_vnfr_cp
.append(vnf_cp
)
5596 new_vdur
= update_db_nslcmops
["operationParams"]["newVdur"]
5597 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5598 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5600 "revision": latest_vnfd_revision
,
5601 "connection-point": new_vnfr_cp
,
5605 self
.update_db_2("vnfrs", db_vnfr
["_id"], new_vnfr_update
)
5606 updated_db_vnfr
= self
.db
.get_one(
5608 {"member-vnf-index-ref": member_vnf_index
, "nsr-id-ref": nsr_id
},
5611 # Instantiate new VNF resources
5612 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5613 vca_scaling_info
= []
5614 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5615 scaling_info
["scaling_direction"] = "OUT"
5616 scaling_info
["vdu-create"] = {}
5617 scaling_info
["kdu-create"] = {}
5618 vdud_instantiate_list
= db_vnfd
["vdu"]
5619 for index
, vdud
in enumerate(vdud_instantiate_list
):
5620 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(vdud
, db_vnfd
)
5622 additional_params
= (
5623 self
._get
_vdu
_additional
_params
(updated_db_vnfr
, vdud
["id"])
5626 cloud_init_list
= []
5628 # TODO Information of its own ip is not available because db_vnfr is not updated.
5629 additional_params
["OSM"] = get_osm_params(
5630 updated_db_vnfr
, vdud
["id"], 1
5632 cloud_init_list
.append(
5633 self
._parse
_cloud
_init
(
5640 vca_scaling_info
.append(
5642 "osm_vdu_id": vdud
["id"],
5643 "member-vnf-index": member_vnf_index
,
5645 "vdu_index": count_index
,
5648 scaling_info
["vdu-create"][vdud
["id"]] = count_index
5649 if self
.ro_config
.ng
:
5651 "New Resources to be deployed: {}".format(scaling_info
)
5653 await self
._scale
_ng
_ro
(
5661 return "COMPLETED", "Done"
5662 except (LcmException
, asyncio
.CancelledError
):
5664 except Exception as e
:
5665 self
.logger
.debug("Error updating VNF {}".format(e
))
5666 return "FAILED", "Error updating VNF {}".format(e
)
5668 async def _ns_charm_upgrade(
5674 timeout
: float = None,
5676 """This method upgrade charms in VNF instances
5679 ee_id: Execution environment id
5680 path: Local path to the charm
5682 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5683 timeout: (Float) Timeout for the ns update operation
5686 result: (str, str) COMPLETED/FAILED, details
5689 charm_type
= charm_type
or "lxc_proxy_charm"
5690 output
= await self
.vca_map
[charm_type
].upgrade_charm(
5694 charm_type
=charm_type
,
5695 timeout
=timeout
or self
.timeout
.ns_update
,
5699 return "COMPLETED", output
5701 except (LcmException
, asyncio
.CancelledError
):
5704 except Exception as e
:
5705 self
.logger
.debug("Error upgrading charm {}".format(path
))
5707 return "FAILED", "Error upgrading charm {}: {}".format(path
, e
)
5709 async def update(self
, nsr_id
, nslcmop_id
):
5710 """Update NS according to different update types
5712 This method performs upgrade of VNF instances then updates the revision
5713 number in VNF record
5716 nsr_id: Network service will be updated
5717 nslcmop_id: ns lcm operation id
5720 It may raise DbException, LcmException, N2VCException, K8sException
5723 # Try to lock HA task here
5724 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5725 if not task_is_locked_by_me
:
5728 logging_text
= "Task ns={} update={} ".format(nsr_id
, nslcmop_id
)
5729 self
.logger
.debug(logging_text
+ "Enter")
5731 # Set the required variables to be filled up later
5733 db_nslcmop_update
= {}
5735 nslcmop_operation_state
= None
5737 error_description_nslcmop
= ""
5739 change_type
= "updated"
5740 detailed_status
= ""
5741 member_vnf_index
= None
5744 # wait for any previous tasks in process
5745 step
= "Waiting for previous operations to terminate"
5746 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5747 self
._write
_ns
_status
(
5750 current_operation
="UPDATING",
5751 current_operation_id
=nslcmop_id
,
5754 step
= "Getting nslcmop from database"
5755 db_nslcmop
= self
.db
.get_one(
5756 "nslcmops", {"_id": nslcmop_id
}, fail_on_empty
=False
5758 update_type
= db_nslcmop
["operationParams"]["updateType"]
5760 step
= "Getting nsr from database"
5761 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5762 old_operational_status
= db_nsr
["operational-status"]
5763 db_nsr_update
["operational-status"] = "updating"
5764 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5765 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5767 if update_type
== "CHANGE_VNFPKG":
5768 # Get the input parameters given through update request
5769 vnf_instance_id
= db_nslcmop
["operationParams"][
5770 "changeVnfPackageData"
5771 ].get("vnfInstanceId")
5773 vnfd_id
= db_nslcmop
["operationParams"]["changeVnfPackageData"].get(
5776 timeout_seconds
= db_nslcmop
["operationParams"].get("timeout_ns_update")
5778 step
= "Getting vnfr from database"
5779 db_vnfr
= self
.db
.get_one(
5780 "vnfrs", {"_id": vnf_instance_id
}, fail_on_empty
=False
5783 step
= "Getting vnfds from database"
5785 latest_vnfd
= self
.db
.get_one(
5786 "vnfds", {"_id": vnfd_id
}, fail_on_empty
=False
5788 latest_vnfd_revision
= latest_vnfd
["_admin"].get("revision")
5791 current_vnf_revision
= db_vnfr
.get("revision", 1)
5792 current_vnfd
= self
.db
.get_one(
5794 {"_id": vnfd_id
+ ":" + str(current_vnf_revision
)},
5795 fail_on_empty
=False,
5797 # Charm artifact paths will be filled up later
5799 current_charm_artifact_path
,
5800 target_charm_artifact_path
,
5801 charm_artifact_paths
,
5803 ) = ([], [], [], [])
5805 step
= "Checking if revision has changed in VNFD"
5806 if current_vnf_revision
!= latest_vnfd_revision
:
5807 change_type
= "policy_updated"
5809 # There is new revision of VNFD, update operation is required
5810 current_vnfd_path
= vnfd_id
+ ":" + str(current_vnf_revision
)
5811 latest_vnfd_path
= vnfd_id
+ ":" + str(latest_vnfd_revision
)
5813 step
= "Removing the VNFD packages if they exist in the local path"
5814 shutil
.rmtree(self
.fs
.path
+ current_vnfd_path
, ignore_errors
=True)
5815 shutil
.rmtree(self
.fs
.path
+ latest_vnfd_path
, ignore_errors
=True)
5817 step
= "Get the VNFD packages from FSMongo"
5818 self
.fs
.sync(from_path
=latest_vnfd_path
)
5819 self
.fs
.sync(from_path
=current_vnfd_path
)
5822 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5824 current_base_folder
= current_vnfd
["_admin"]["storage"]
5825 latest_base_folder
= latest_vnfd
["_admin"]["storage"]
5827 for vca_index
, vca_deployed
in enumerate(
5828 get_iterable(nsr_deployed
, "VCA")
5830 vnf_index
= db_vnfr
.get("member-vnf-index-ref")
5832 # Getting charm-id and charm-type
5833 if vca_deployed
.get("member-vnf-index") == vnf_index
:
5834 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
5835 vca_type
= vca_deployed
.get("type")
5836 vdu_count_index
= vca_deployed
.get("vdu_count_index")
5839 ee_id
= vca_deployed
.get("ee_id")
5841 step
= "Getting descriptor config"
5842 if current_vnfd
.get("kdu"):
5843 search_key
= "kdu_name"
5845 search_key
= "vnfd_id"
5847 entity_id
= vca_deployed
.get(search_key
)
5849 descriptor_config
= get_configuration(
5850 current_vnfd
, entity_id
5853 if "execution-environment-list" in descriptor_config
:
5854 ee_list
= descriptor_config
.get(
5855 "execution-environment-list", []
5860 # There could be several charm used in the same VNF
5861 for ee_item
in ee_list
:
5862 if ee_item
.get("juju"):
5863 step
= "Getting charm name"
5864 charm_name
= ee_item
["juju"].get("charm")
5866 step
= "Setting Charm artifact paths"
5867 current_charm_artifact_path
.append(
5868 get_charm_artifact_path(
5869 current_base_folder
,
5872 current_vnf_revision
,
5875 target_charm_artifact_path
.append(
5876 get_charm_artifact_path(
5880 latest_vnfd_revision
,
5883 elif ee_item
.get("helm-chart"):
5884 # add chart to list and all parameters
5885 step
= "Getting helm chart name"
5886 chart_name
= ee_item
.get("helm-chart")
5888 ee_item
.get("helm-version")
5889 and ee_item
.get("helm-version") == "v2"
5893 vca_type
= "helm-v3"
5894 step
= "Setting Helm chart artifact paths"
5896 helm_artifacts
.append(
5898 "current_artifact_path": get_charm_artifact_path(
5899 current_base_folder
,
5902 current_vnf_revision
,
5904 "target_artifact_path": get_charm_artifact_path(
5908 latest_vnfd_revision
,
5911 "vca_index": vca_index
,
5912 "vdu_index": vdu_count_index
,
5916 charm_artifact_paths
= zip(
5917 current_charm_artifact_path
, target_charm_artifact_path
5920 step
= "Checking if software version has changed in VNFD"
5921 if find_software_version(current_vnfd
) != find_software_version(
5924 step
= "Checking if existing VNF has charm"
5925 for current_charm_path
, target_charm_path
in list(
5926 charm_artifact_paths
5928 if current_charm_path
:
5930 "Software version change is not supported as VNF instance {} has charm.".format(
5935 # There is no change in the charm package, then redeploy the VNF
5936 # based on new descriptor
5937 step
= "Redeploying VNF"
5938 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5939 (result
, detailed_status
) = await self
._ns
_redeploy
_vnf
(
5940 nsr_id
, nslcmop_id
, latest_vnfd
, db_vnfr
, db_nsr
5942 if result
== "FAILED":
5943 nslcmop_operation_state
= result
5944 error_description_nslcmop
= detailed_status
5945 db_nslcmop_update
["detailed-status"] = detailed_status
5948 + " step {} Done with result {} {}".format(
5949 step
, nslcmop_operation_state
, detailed_status
5954 step
= "Checking if any charm package has changed or not"
5955 for current_charm_path
, target_charm_path
in list(
5956 charm_artifact_paths
5960 and target_charm_path
5961 and self
.check_charm_hash_changed(
5962 current_charm_path
, target_charm_path
5965 step
= "Checking whether VNF uses juju bundle"
5966 if check_juju_bundle_existence(current_vnfd
):
5968 "Charm upgrade is not supported for the instance which"
5969 " uses juju-bundle: {}".format(
5970 check_juju_bundle_existence(current_vnfd
)
5974 step
= "Upgrading Charm"
5978 ) = await self
._ns
_charm
_upgrade
(
5981 charm_type
=vca_type
,
5982 path
=self
.fs
.path
+ target_charm_path
,
5983 timeout
=timeout_seconds
,
5986 if result
== "FAILED":
5987 nslcmop_operation_state
= result
5988 error_description_nslcmop
= detailed_status
5990 db_nslcmop_update
["detailed-status"] = detailed_status
5993 + " step {} Done with result {} {}".format(
5994 step
, nslcmop_operation_state
, detailed_status
5998 step
= "Updating policies"
5999 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6000 result
= "COMPLETED"
6001 detailed_status
= "Done"
6002 db_nslcmop_update
["detailed-status"] = "Done"
6005 for item
in helm_artifacts
:
6007 item
["current_artifact_path"]
6008 and item
["target_artifact_path"]
6009 and self
.check_charm_hash_changed(
6010 item
["current_artifact_path"],
6011 item
["target_artifact_path"],
6015 db_update_entry
= "_admin.deployed.VCA.{}.".format(
6018 vnfr_id
= db_vnfr
["_id"]
6019 osm_config
= {"osm": {"ns_id": nsr_id
, "vnf_id": vnfr_id
}}
6021 "collection": "nsrs",
6022 "filter": {"_id": nsr_id
},
6023 "path": db_update_entry
,
6025 vca_type
, namespace
, helm_id
= get_ee_id_parts(item
["ee_id"])
6026 await self
.vca_map
[vca_type
].upgrade_execution_environment(
6027 namespace
=namespace
,
6031 artifact_path
=item
["target_artifact_path"],
6034 vnf_id
= db_vnfr
.get("vnfd-ref")
6035 config_descriptor
= get_configuration(latest_vnfd
, vnf_id
)
6036 self
.logger
.debug("get ssh key block")
6040 ("config-access", "ssh-access", "required"),
6042 # Needed to inject a ssh key
6045 ("config-access", "ssh-access", "default-user"),
6048 "Install configuration Software, getting public ssh key"
6050 pub_key
= await self
.vca_map
[
6052 ].get_ee_ssh_public__key(
6053 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
6057 "Insert public key into VM user={} ssh_key={}".format(
6061 self
.logger
.debug(logging_text
+ step
)
6063 # wait for RO (ip-address) Insert pub_key into VM
6064 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
6074 initial_config_primitive_list
= config_descriptor
.get(
6075 "initial-config-primitive"
6077 config_primitive
= next(
6080 for p
in initial_config_primitive_list
6081 if p
["name"] == "config"
6085 if not config_primitive
:
6088 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
6090 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
6091 if db_vnfr
.get("additionalParamsForVnf"):
6092 deploy_params
.update(
6094 db_vnfr
["additionalParamsForVnf"].copy()
6097 primitive_params_
= self
._map
_primitive
_params
(
6098 config_primitive
, {}, deploy_params
6101 step
= "execute primitive '{}' params '{}'".format(
6102 config_primitive
["name"], primitive_params_
6104 self
.logger
.debug(logging_text
+ step
)
6105 await self
.vca_map
[vca_type
].exec_primitive(
6107 primitive_name
=config_primitive
["name"],
6108 params_dict
=primitive_params_
,
6114 step
= "Updating policies"
6115 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6116 detailed_status
= "Done"
6117 db_nslcmop_update
["detailed-status"] = "Done"
6119 # If nslcmop_operation_state is None, so any operation is not failed.
6120 if not nslcmop_operation_state
:
6121 nslcmop_operation_state
= "COMPLETED"
6123 # If update CHANGE_VNFPKG nslcmop_operation is successful
6124 # vnf revision need to be updated
6125 vnfr_update
["revision"] = latest_vnfd_revision
6126 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
6130 + " task Done with result {} {}".format(
6131 nslcmop_operation_state
, detailed_status
6134 elif update_type
== "REMOVE_VNF":
6135 # This part is included in https://osm.etsi.org/gerrit/11876
6136 vnf_instance_id
= db_nslcmop
["operationParams"]["removeVnfInstanceId"]
6137 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
6138 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6139 step
= "Removing VNF"
6140 (result
, detailed_status
) = await self
.remove_vnf(
6141 nsr_id
, nslcmop_id
, vnf_instance_id
6143 if result
== "FAILED":
6144 nslcmop_operation_state
= result
6145 error_description_nslcmop
= detailed_status
6146 db_nslcmop_update
["detailed-status"] = detailed_status
6147 change_type
= "vnf_terminated"
6148 if not nslcmop_operation_state
:
6149 nslcmop_operation_state
= "COMPLETED"
6152 + " task Done with result {} {}".format(
6153 nslcmop_operation_state
, detailed_status
6157 elif update_type
== "OPERATE_VNF":
6158 vnf_id
= db_nslcmop
["operationParams"]["operateVnfData"][
6161 operation_type
= db_nslcmop
["operationParams"]["operateVnfData"][
6164 additional_param
= db_nslcmop
["operationParams"]["operateVnfData"][
6167 (result
, detailed_status
) = await self
.rebuild_start_stop(
6168 nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
6170 if result
== "FAILED":
6171 nslcmop_operation_state
= result
6172 error_description_nslcmop
= detailed_status
6173 db_nslcmop_update
["detailed-status"] = detailed_status
6174 if not nslcmop_operation_state
:
6175 nslcmop_operation_state
= "COMPLETED"
6178 + " task Done with result {} {}".format(
6179 nslcmop_operation_state
, detailed_status
6183 # If nslcmop_operation_state is None, so any operation is not failed.
6184 # All operations are executed in overall.
6185 if not nslcmop_operation_state
:
6186 nslcmop_operation_state
= "COMPLETED"
6187 db_nsr_update
["operational-status"] = old_operational_status
6189 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
6190 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
6192 except asyncio
.CancelledError
:
6194 logging_text
+ "Cancelled Exception while '{}'".format(step
)
6196 exc
= "Operation was cancelled"
6197 except asyncio
.TimeoutError
:
6198 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
6200 except Exception as e
:
6201 exc
= traceback
.format_exc()
6202 self
.logger
.critical(
6203 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
6212 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
6213 nslcmop_operation_state
= "FAILED"
6214 db_nsr_update
["operational-status"] = old_operational_status
6216 self
._write
_ns
_status
(
6218 ns_state
=db_nsr
["nsState"],
6219 current_operation
="IDLE",
6220 current_operation_id
=None,
6221 other_update
=db_nsr_update
,
6224 self
._write
_op
_status
(
6227 error_message
=error_description_nslcmop
,
6228 operation_state
=nslcmop_operation_state
,
6229 other_update
=db_nslcmop_update
,
6232 if nslcmop_operation_state
:
6236 "nslcmop_id": nslcmop_id
,
6237 "operationState": nslcmop_operation_state
,
6240 change_type
in ("vnf_terminated", "policy_updated")
6241 and member_vnf_index
6243 msg
.update({"vnf_member_index": member_vnf_index
})
6244 await self
.msg
.aiowrite("ns", change_type
, msg
, loop
=self
.loop
)
6245 except Exception as e
:
6247 logging_text
+ "kafka_write notification Exception {}".format(e
)
6249 self
.logger
.debug(logging_text
+ "Exit")
6250 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_update")
6251 return nslcmop_operation_state
, detailed_status
6253 async def scale(self
, nsr_id
, nslcmop_id
):
6254 # Try to lock HA task here
6255 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
6256 if not task_is_locked_by_me
:
6259 logging_text
= "Task ns={} scale={} ".format(nsr_id
, nslcmop_id
)
6260 stage
= ["", "", ""]
6261 tasks_dict_info
= {}
6262 # ^ stage, step, VIM progress
6263 self
.logger
.debug(logging_text
+ "Enter")
6264 # get all needed from database
6266 db_nslcmop_update
= {}
6269 # in case of error, indicates what part of scale was failed to put nsr at error status
6270 scale_process
= None
6271 old_operational_status
= ""
6272 old_config_status
= ""
6275 # wait for any previous tasks in process
6276 step
= "Waiting for previous operations to terminate"
6277 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
6278 self
._write
_ns
_status
(
6281 current_operation
="SCALING",
6282 current_operation_id
=nslcmop_id
,
6285 step
= "Getting nslcmop from database"
6287 step
+ " after having waited for previous tasks to be completed"
6289 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
6291 step
= "Getting nsr from database"
6292 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
6293 old_operational_status
= db_nsr
["operational-status"]
6294 old_config_status
= db_nsr
["config-status"]
6296 step
= "Parsing scaling parameters"
6297 db_nsr_update
["operational-status"] = "scaling"
6298 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6299 nsr_deployed
= db_nsr
["_admin"].get("deployed")
6301 vnf_index
= db_nslcmop
["operationParams"]["scaleVnfData"][
6303 ]["member-vnf-index"]
6304 scaling_group
= db_nslcmop
["operationParams"]["scaleVnfData"][
6306 ]["scaling-group-descriptor"]
6307 scaling_type
= db_nslcmop
["operationParams"]["scaleVnfData"]["scaleVnfType"]
6308 # for backward compatibility
6309 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
6310 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
6311 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
6312 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6314 step
= "Getting vnfr from database"
6315 db_vnfr
= self
.db
.get_one(
6316 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
6319 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
6321 step
= "Getting vnfd from database"
6322 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
6324 base_folder
= db_vnfd
["_admin"]["storage"]
6326 step
= "Getting scaling-group-descriptor"
6327 scaling_descriptor
= find_in_list(
6328 get_scaling_aspect(db_vnfd
),
6329 lambda scale_desc
: scale_desc
["name"] == scaling_group
,
6331 if not scaling_descriptor
:
6333 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6334 "at vnfd:scaling-group-descriptor".format(scaling_group
)
6337 step
= "Sending scale order to VIM"
6338 # TODO check if ns is in a proper status
6340 if not db_nsr
["_admin"].get("scaling-group"):
6345 "_admin.scaling-group": [
6346 {"name": scaling_group
, "nb-scale-op": 0}
6350 admin_scale_index
= 0
6352 for admin_scale_index
, admin_scale_info
in enumerate(
6353 db_nsr
["_admin"]["scaling-group"]
6355 if admin_scale_info
["name"] == scaling_group
:
6356 nb_scale_op
= admin_scale_info
.get("nb-scale-op", 0)
6358 else: # not found, set index one plus last element and add new entry with the name
6359 admin_scale_index
+= 1
6361 "_admin.scaling-group.{}.name".format(admin_scale_index
)
6364 vca_scaling_info
= []
6365 scaling_info
= {"scaling_group_name": scaling_group
, "vdu": [], "kdu": []}
6366 if scaling_type
== "SCALE_OUT":
6367 if "aspect-delta-details" not in scaling_descriptor
:
6369 "Aspect delta details not fount in scaling descriptor {}".format(
6370 scaling_descriptor
["name"]
6373 # count if max-instance-count is reached
6374 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6376 scaling_info
["scaling_direction"] = "OUT"
6377 scaling_info
["vdu-create"] = {}
6378 scaling_info
["kdu-create"] = {}
6379 for delta
in deltas
:
6380 for vdu_delta
in delta
.get("vdu-delta", {}):
6381 vdud
= get_vdu(db_vnfd
, vdu_delta
["id"])
6382 # vdu_index also provides the number of instance of the targeted vdu
6383 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6384 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(
6388 additional_params
= (
6389 self
._get
_vdu
_additional
_params
(db_vnfr
, vdud
["id"])
6392 cloud_init_list
= []
6394 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6395 max_instance_count
= 10
6396 if vdu_profile
and "max-number-of-instances" in vdu_profile
:
6397 max_instance_count
= vdu_profile
.get(
6398 "max-number-of-instances", 10
6401 default_instance_num
= get_number_of_instances(
6404 instances_number
= vdu_delta
.get("number-of-instances", 1)
6405 nb_scale_op
+= instances_number
6407 new_instance_count
= nb_scale_op
+ default_instance_num
6408 # Control if new count is over max and vdu count is less than max.
6409 # Then assign new instance count
6410 if new_instance_count
> max_instance_count
> vdu_count
:
6411 instances_number
= new_instance_count
- max_instance_count
6413 instances_number
= instances_number
6415 if new_instance_count
> max_instance_count
:
6417 "reached the limit of {} (max-instance-count) "
6418 "scaling-out operations for the "
6419 "scaling-group-descriptor '{}'".format(
6420 nb_scale_op
, scaling_group
6423 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6425 # TODO Information of its own ip is not available because db_vnfr is not updated.
6426 additional_params
["OSM"] = get_osm_params(
6427 db_vnfr
, vdu_delta
["id"], vdu_index
+ x
6429 cloud_init_list
.append(
6430 self
._parse
_cloud
_init
(
6437 vca_scaling_info
.append(
6439 "osm_vdu_id": vdu_delta
["id"],
6440 "member-vnf-index": vnf_index
,
6442 "vdu_index": vdu_index
+ x
,
6445 scaling_info
["vdu-create"][vdu_delta
["id"]] = instances_number
6446 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6447 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6448 kdu_name
= kdu_profile
["kdu-name"]
6449 resource_name
= kdu_profile
.get("resource-name", "")
6451 # Might have different kdus in the same delta
6452 # Should have list for each kdu
6453 if not scaling_info
["kdu-create"].get(kdu_name
, None):
6454 scaling_info
["kdu-create"][kdu_name
] = []
6456 kdur
= get_kdur(db_vnfr
, kdu_name
)
6457 if kdur
.get("helm-chart"):
6458 k8s_cluster_type
= "helm-chart-v3"
6459 self
.logger
.debug("kdur: {}".format(kdur
))
6461 kdur
.get("helm-version")
6462 and kdur
.get("helm-version") == "v2"
6464 k8s_cluster_type
= "helm-chart"
6465 elif kdur
.get("juju-bundle"):
6466 k8s_cluster_type
= "juju-bundle"
6469 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6470 "juju-bundle. Maybe an old NBI version is running".format(
6471 db_vnfr
["member-vnf-index-ref"], kdu_name
6475 max_instance_count
= 10
6476 if kdu_profile
and "max-number-of-instances" in kdu_profile
:
6477 max_instance_count
= kdu_profile
.get(
6478 "max-number-of-instances", 10
6481 nb_scale_op
+= kdu_delta
.get("number-of-instances", 1)
6482 deployed_kdu
, _
= get_deployed_kdu(
6483 nsr_deployed
, kdu_name
, vnf_index
6485 if deployed_kdu
is None:
6487 "KDU '{}' for vnf '{}' not deployed".format(
6491 kdu_instance
= deployed_kdu
.get("kdu-instance")
6492 instance_num
= await self
.k8scluster_map
[
6498 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6499 kdu_model
=deployed_kdu
.get("kdu-model"),
6501 kdu_replica_count
= instance_num
+ kdu_delta
.get(
6502 "number-of-instances", 1
6505 # Control if new count is over max and instance_num is less than max.
6506 # Then assign max instance number to kdu replica count
6507 if kdu_replica_count
> max_instance_count
> instance_num
:
6508 kdu_replica_count
= max_instance_count
6509 if kdu_replica_count
> max_instance_count
:
6511 "reached the limit of {} (max-instance-count) "
6512 "scaling-out operations for the "
6513 "scaling-group-descriptor '{}'".format(
6514 instance_num
, scaling_group
6518 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6519 vca_scaling_info
.append(
6521 "osm_kdu_id": kdu_name
,
6522 "member-vnf-index": vnf_index
,
6524 "kdu_index": instance_num
+ x
- 1,
6527 scaling_info
["kdu-create"][kdu_name
].append(
6529 "member-vnf-index": vnf_index
,
6531 "k8s-cluster-type": k8s_cluster_type
,
6532 "resource-name": resource_name
,
6533 "scale": kdu_replica_count
,
6536 elif scaling_type
== "SCALE_IN":
6537 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6539 scaling_info
["scaling_direction"] = "IN"
6540 scaling_info
["vdu-delete"] = {}
6541 scaling_info
["kdu-delete"] = {}
6543 for delta
in deltas
:
6544 for vdu_delta
in delta
.get("vdu-delta", {}):
6545 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6546 min_instance_count
= 0
6547 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6548 if vdu_profile
and "min-number-of-instances" in vdu_profile
:
6549 min_instance_count
= vdu_profile
["min-number-of-instances"]
6551 default_instance_num
= get_number_of_instances(
6552 db_vnfd
, vdu_delta
["id"]
6554 instance_num
= vdu_delta
.get("number-of-instances", 1)
6555 nb_scale_op
-= instance_num
6557 new_instance_count
= nb_scale_op
+ default_instance_num
6559 if new_instance_count
< min_instance_count
< vdu_count
:
6560 instances_number
= min_instance_count
- new_instance_count
6562 instances_number
= instance_num
6564 if new_instance_count
< min_instance_count
:
6566 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6567 "scaling-group-descriptor '{}'".format(
6568 nb_scale_op
, scaling_group
6571 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6572 vca_scaling_info
.append(
6574 "osm_vdu_id": vdu_delta
["id"],
6575 "member-vnf-index": vnf_index
,
6577 "vdu_index": vdu_index
- 1 - x
,
6580 scaling_info
["vdu-delete"][vdu_delta
["id"]] = instances_number
6581 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6582 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6583 kdu_name
= kdu_profile
["kdu-name"]
6584 resource_name
= kdu_profile
.get("resource-name", "")
6586 if not scaling_info
["kdu-delete"].get(kdu_name
, None):
6587 scaling_info
["kdu-delete"][kdu_name
] = []
6589 kdur
= get_kdur(db_vnfr
, kdu_name
)
6590 if kdur
.get("helm-chart"):
6591 k8s_cluster_type
= "helm-chart-v3"
6592 self
.logger
.debug("kdur: {}".format(kdur
))
6594 kdur
.get("helm-version")
6595 and kdur
.get("helm-version") == "v2"
6597 k8s_cluster_type
= "helm-chart"
6598 elif kdur
.get("juju-bundle"):
6599 k8s_cluster_type
= "juju-bundle"
6602 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6603 "juju-bundle. Maybe an old NBI version is running".format(
6604 db_vnfr
["member-vnf-index-ref"], kdur
["kdu-name"]
6608 min_instance_count
= 0
6609 if kdu_profile
and "min-number-of-instances" in kdu_profile
:
6610 min_instance_count
= kdu_profile
["min-number-of-instances"]
6612 nb_scale_op
-= kdu_delta
.get("number-of-instances", 1)
6613 deployed_kdu
, _
= get_deployed_kdu(
6614 nsr_deployed
, kdu_name
, vnf_index
6616 if deployed_kdu
is None:
6618 "KDU '{}' for vnf '{}' not deployed".format(
6622 kdu_instance
= deployed_kdu
.get("kdu-instance")
6623 instance_num
= await self
.k8scluster_map
[
6629 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6630 kdu_model
=deployed_kdu
.get("kdu-model"),
6632 kdu_replica_count
= instance_num
- kdu_delta
.get(
6633 "number-of-instances", 1
6636 if kdu_replica_count
< min_instance_count
< instance_num
:
6637 kdu_replica_count
= min_instance_count
6638 if kdu_replica_count
< min_instance_count
:
6640 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6641 "scaling-group-descriptor '{}'".format(
6642 instance_num
, scaling_group
6646 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6647 vca_scaling_info
.append(
6649 "osm_kdu_id": kdu_name
,
6650 "member-vnf-index": vnf_index
,
6652 "kdu_index": instance_num
- x
- 1,
6655 scaling_info
["kdu-delete"][kdu_name
].append(
6657 "member-vnf-index": vnf_index
,
6659 "k8s-cluster-type": k8s_cluster_type
,
6660 "resource-name": resource_name
,
6661 "scale": kdu_replica_count
,
6665 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6666 vdu_delete
= copy(scaling_info
.get("vdu-delete"))
6667 if scaling_info
["scaling_direction"] == "IN":
6668 for vdur
in reversed(db_vnfr
["vdur"]):
6669 if vdu_delete
.get(vdur
["vdu-id-ref"]):
6670 vdu_delete
[vdur
["vdu-id-ref"]] -= 1
6671 scaling_info
["vdu"].append(
6673 "name": vdur
.get("name") or vdur
.get("vdu-name"),
6674 "vdu_id": vdur
["vdu-id-ref"],
6678 for interface
in vdur
["interfaces"]:
6679 scaling_info
["vdu"][-1]["interface"].append(
6681 "name": interface
["name"],
6682 "ip_address": interface
["ip-address"],
6683 "mac_address": interface
.get("mac-address"),
6686 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6689 step
= "Executing pre-scale vnf-config-primitive"
6690 if scaling_descriptor
.get("scaling-config-action"):
6691 for scaling_config_action
in scaling_descriptor
[
6692 "scaling-config-action"
6695 scaling_config_action
.get("trigger") == "pre-scale-in"
6696 and scaling_type
== "SCALE_IN"
6698 scaling_config_action
.get("trigger") == "pre-scale-out"
6699 and scaling_type
== "SCALE_OUT"
6701 vnf_config_primitive
= scaling_config_action
[
6702 "vnf-config-primitive-name-ref"
6704 step
= db_nslcmop_update
[
6706 ] = "executing pre-scale scaling-config-action '{}'".format(
6707 vnf_config_primitive
6710 # look for primitive
6711 for config_primitive
in (
6712 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
6713 ).get("config-primitive", ()):
6714 if config_primitive
["name"] == vnf_config_primitive
:
6718 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6719 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6720 "primitive".format(scaling_group
, vnf_config_primitive
)
6723 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
6724 if db_vnfr
.get("additionalParamsForVnf"):
6725 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
6727 scale_process
= "VCA"
6728 db_nsr_update
["config-status"] = "configuring pre-scaling"
6729 primitive_params
= self
._map
_primitive
_params
(
6730 config_primitive
, {}, vnfr_params
6733 # Pre-scale retry check: Check if this sub-operation has been executed before
6734 op_index
= self
._check
_or
_add
_scale
_suboperation
(
6737 vnf_config_primitive
,
6741 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
6742 # Skip sub-operation
6743 result
= "COMPLETED"
6744 result_detail
= "Done"
6747 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6748 vnf_config_primitive
, result
, result_detail
6752 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
6753 # New sub-operation: Get index of this sub-operation
6755 len(db_nslcmop
.get("_admin", {}).get("operations"))
6760 + "vnf_config_primitive={} New sub-operation".format(
6761 vnf_config_primitive
6765 # retry: Get registered params for this existing sub-operation
6766 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
6769 vnf_index
= op
.get("member_vnf_index")
6770 vnf_config_primitive
= op
.get("primitive")
6771 primitive_params
= op
.get("primitive_params")
6774 + "vnf_config_primitive={} Sub-operation retry".format(
6775 vnf_config_primitive
6778 # Execute the primitive, either with new (first-time) or registered (reintent) args
6779 ee_descriptor_id
= config_primitive
.get(
6780 "execution-environment-ref"
6782 primitive_name
= config_primitive
.get(
6783 "execution-environment-primitive", vnf_config_primitive
6785 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
6786 nsr_deployed
["VCA"],
6787 member_vnf_index
=vnf_index
,
6789 vdu_count_index
=None,
6790 ee_descriptor_id
=ee_descriptor_id
,
6792 result
, result_detail
= await self
._ns
_execute
_primitive
(
6801 + "vnf_config_primitive={} Done with result {} {}".format(
6802 vnf_config_primitive
, result
, result_detail
6805 # Update operationState = COMPLETED | FAILED
6806 self
._update
_suboperation
_status
(
6807 db_nslcmop
, op_index
, result
, result_detail
6810 if result
== "FAILED":
6811 raise LcmException(result_detail
)
6812 db_nsr_update
["config-status"] = old_config_status
6813 scale_process
= None
6817 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index
)
6820 "_admin.scaling-group.{}.time".format(admin_scale_index
)
6823 # SCALE-IN VCA - BEGIN
6824 if vca_scaling_info
:
6825 step
= db_nslcmop_update
[
6827 ] = "Deleting the execution environments"
6828 scale_process
= "VCA"
6829 for vca_info
in vca_scaling_info
:
6830 if vca_info
["type"] == "delete" and not vca_info
.get("osm_kdu_id"):
6831 member_vnf_index
= str(vca_info
["member-vnf-index"])
6833 logging_text
+ "vdu info: {}".format(vca_info
)
6835 if vca_info
.get("osm_vdu_id"):
6836 vdu_id
= vca_info
["osm_vdu_id"]
6837 vdu_index
= int(vca_info
["vdu_index"])
6840 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6841 member_vnf_index
, vdu_id
, vdu_index
6843 stage
[2] = step
= "Scaling in VCA"
6844 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
6845 vca_update
= db_nsr
["_admin"]["deployed"]["VCA"]
6846 config_update
= db_nsr
["configurationStatus"]
6847 for vca_index
, vca
in enumerate(vca_update
):
6849 (vca
or vca
.get("ee_id"))
6850 and vca
["member-vnf-index"] == member_vnf_index
6851 and vca
["vdu_count_index"] == vdu_index
6853 if vca
.get("vdu_id"):
6854 config_descriptor
= get_configuration(
6855 db_vnfd
, vca
.get("vdu_id")
6857 elif vca
.get("kdu_name"):
6858 config_descriptor
= get_configuration(
6859 db_vnfd
, vca
.get("kdu_name")
6862 config_descriptor
= get_configuration(
6863 db_vnfd
, db_vnfd
["id"]
6865 operation_params
= (
6866 db_nslcmop
.get("operationParams") or {}
6868 exec_terminate_primitives
= not operation_params
.get(
6869 "skip_terminate_primitives"
6870 ) and vca
.get("needed_terminate")
6871 task
= asyncio
.ensure_future(
6880 exec_primitives
=exec_terminate_primitives
,
6884 timeout
=self
.timeout
.charm_delete
,
6887 tasks_dict_info
[task
] = "Terminating VCA {}".format(
6890 del vca_update
[vca_index
]
6891 del config_update
[vca_index
]
6892 # wait for pending tasks of terminate primitives
6896 + "Waiting for tasks {}".format(
6897 list(tasks_dict_info
.keys())
6900 error_list
= await self
._wait
_for
_tasks
(
6904 self
.timeout
.charm_delete
, self
.timeout
.ns_terminate
6909 tasks_dict_info
.clear()
6911 raise LcmException("; ".join(error_list
))
6913 db_vca_and_config_update
= {
6914 "_admin.deployed.VCA": vca_update
,
6915 "configurationStatus": config_update
,
6918 "nsrs", db_nsr
["_id"], db_vca_and_config_update
6920 scale_process
= None
6921 # SCALE-IN VCA - END
6924 if scaling_info
.get("vdu-create") or scaling_info
.get("vdu-delete"):
6925 scale_process
= "RO"
6926 if self
.ro_config
.ng
:
6927 await self
._scale
_ng
_ro
(
6928 logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, scaling_info
, stage
6930 scaling_info
.pop("vdu-create", None)
6931 scaling_info
.pop("vdu-delete", None)
6933 scale_process
= None
6937 if scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete"):
6938 scale_process
= "KDU"
6939 await self
._scale
_kdu
(
6940 logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
6942 scaling_info
.pop("kdu-create", None)
6943 scaling_info
.pop("kdu-delete", None)
6945 scale_process
= None
6949 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6951 # SCALE-UP VCA - BEGIN
6952 if vca_scaling_info
:
6953 step
= db_nslcmop_update
[
6955 ] = "Creating new execution environments"
6956 scale_process
= "VCA"
6957 for vca_info
in vca_scaling_info
:
6958 if vca_info
["type"] == "create" and not vca_info
.get("osm_kdu_id"):
6959 member_vnf_index
= str(vca_info
["member-vnf-index"])
6961 logging_text
+ "vdu info: {}".format(vca_info
)
6963 vnfd_id
= db_vnfr
["vnfd-ref"]
6964 if vca_info
.get("osm_vdu_id"):
6965 vdu_index
= int(vca_info
["vdu_index"])
6966 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
6967 if db_vnfr
.get("additionalParamsForVnf"):
6968 deploy_params
.update(
6970 db_vnfr
["additionalParamsForVnf"].copy()
6973 descriptor_config
= get_configuration(
6974 db_vnfd
, db_vnfd
["id"]
6976 if descriptor_config
:
6982 logging_text
=logging_text
6983 + "member_vnf_index={} ".format(member_vnf_index
),
6986 nslcmop_id
=nslcmop_id
,
6992 kdu_index
=kdu_index
,
6993 member_vnf_index
=member_vnf_index
,
6994 vdu_index
=vdu_index
,
6996 deploy_params
=deploy_params
,
6997 descriptor_config
=descriptor_config
,
6998 base_folder
=base_folder
,
6999 task_instantiation_info
=tasks_dict_info
,
7002 vdu_id
= vca_info
["osm_vdu_id"]
7003 vdur
= find_in_list(
7004 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
7006 descriptor_config
= get_configuration(db_vnfd
, vdu_id
)
7007 if vdur
.get("additionalParams"):
7008 deploy_params_vdu
= parse_yaml_strings(
7009 vdur
["additionalParams"]
7012 deploy_params_vdu
= deploy_params
7013 deploy_params_vdu
["OSM"] = get_osm_params(
7014 db_vnfr
, vdu_id
, vdu_count_index
=vdu_index
7016 if descriptor_config
:
7022 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7023 member_vnf_index
, vdu_id
, vdu_index
7025 stage
[2] = step
= "Scaling out VCA"
7026 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
7028 logging_text
=logging_text
7029 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7030 member_vnf_index
, vdu_id
, vdu_index
7034 nslcmop_id
=nslcmop_id
,
7040 member_vnf_index
=member_vnf_index
,
7041 vdu_index
=vdu_index
,
7042 kdu_index
=kdu_index
,
7044 deploy_params
=deploy_params_vdu
,
7045 descriptor_config
=descriptor_config
,
7046 base_folder
=base_folder
,
7047 task_instantiation_info
=tasks_dict_info
,
7050 # SCALE-UP VCA - END
7051 scale_process
= None
7054 # execute primitive service POST-SCALING
7055 step
= "Executing post-scale vnf-config-primitive"
7056 if scaling_descriptor
.get("scaling-config-action"):
7057 for scaling_config_action
in scaling_descriptor
[
7058 "scaling-config-action"
7061 scaling_config_action
.get("trigger") == "post-scale-in"
7062 and scaling_type
== "SCALE_IN"
7064 scaling_config_action
.get("trigger") == "post-scale-out"
7065 and scaling_type
== "SCALE_OUT"
7067 vnf_config_primitive
= scaling_config_action
[
7068 "vnf-config-primitive-name-ref"
7070 step
= db_nslcmop_update
[
7072 ] = "executing post-scale scaling-config-action '{}'".format(
7073 vnf_config_primitive
7076 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
7077 if db_vnfr
.get("additionalParamsForVnf"):
7078 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
7080 # look for primitive
7081 for config_primitive
in (
7082 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
7083 ).get("config-primitive", ()):
7084 if config_primitive
["name"] == vnf_config_primitive
:
7088 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7089 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7090 "config-primitive".format(
7091 scaling_group
, vnf_config_primitive
7094 scale_process
= "VCA"
7095 db_nsr_update
["config-status"] = "configuring post-scaling"
7096 primitive_params
= self
._map
_primitive
_params
(
7097 config_primitive
, {}, vnfr_params
7100 # Post-scale retry check: Check if this sub-operation has been executed before
7101 op_index
= self
._check
_or
_add
_scale
_suboperation
(
7104 vnf_config_primitive
,
7108 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
7109 # Skip sub-operation
7110 result
= "COMPLETED"
7111 result_detail
= "Done"
7114 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7115 vnf_config_primitive
, result
, result_detail
7119 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
7120 # New sub-operation: Get index of this sub-operation
7122 len(db_nslcmop
.get("_admin", {}).get("operations"))
7127 + "vnf_config_primitive={} New sub-operation".format(
7128 vnf_config_primitive
7132 # retry: Get registered params for this existing sub-operation
7133 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
7136 vnf_index
= op
.get("member_vnf_index")
7137 vnf_config_primitive
= op
.get("primitive")
7138 primitive_params
= op
.get("primitive_params")
7141 + "vnf_config_primitive={} Sub-operation retry".format(
7142 vnf_config_primitive
7145 # Execute the primitive, either with new (first-time) or registered (reintent) args
7146 ee_descriptor_id
= config_primitive
.get(
7147 "execution-environment-ref"
7149 primitive_name
= config_primitive
.get(
7150 "execution-environment-primitive", vnf_config_primitive
7152 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
7153 nsr_deployed
["VCA"],
7154 member_vnf_index
=vnf_index
,
7156 vdu_count_index
=None,
7157 ee_descriptor_id
=ee_descriptor_id
,
7159 result
, result_detail
= await self
._ns
_execute
_primitive
(
7168 + "vnf_config_primitive={} Done with result {} {}".format(
7169 vnf_config_primitive
, result
, result_detail
7172 # Update operationState = COMPLETED | FAILED
7173 self
._update
_suboperation
_status
(
7174 db_nslcmop
, op_index
, result
, result_detail
7177 if result
== "FAILED":
7178 raise LcmException(result_detail
)
7179 db_nsr_update
["config-status"] = old_config_status
7180 scale_process
= None
7185 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7186 db_nsr_update
["operational-status"] = (
7188 if old_operational_status
== "failed"
7189 else old_operational_status
7191 db_nsr_update
["config-status"] = old_config_status
7194 ROclient
.ROClientException
,
7199 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
7201 except asyncio
.CancelledError
:
7203 logging_text
+ "Cancelled Exception while '{}'".format(step
)
7205 exc
= "Operation was cancelled"
7206 except Exception as e
:
7207 exc
= traceback
.format_exc()
7208 self
.logger
.critical(
7209 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
7213 self
._write
_ns
_status
(
7216 current_operation
="IDLE",
7217 current_operation_id
=None,
7220 stage
[1] = "Waiting for instantiate pending tasks."
7221 self
.logger
.debug(logging_text
+ stage
[1])
7222 exc
= await self
._wait
_for
_tasks
(
7225 self
.timeout
.ns_deploy
,
7233 ] = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
7234 nslcmop_operation_state
= "FAILED"
7236 db_nsr_update
["operational-status"] = old_operational_status
7237 db_nsr_update
["config-status"] = old_config_status
7238 db_nsr_update
["detailed-status"] = ""
7240 if "VCA" in scale_process
:
7241 db_nsr_update
["config-status"] = "failed"
7242 if "RO" in scale_process
:
7243 db_nsr_update
["operational-status"] = "failed"
7246 ] = "FAILED scaling nslcmop={} {}: {}".format(
7247 nslcmop_id
, step
, exc
7250 error_description_nslcmop
= None
7251 nslcmop_operation_state
= "COMPLETED"
7252 db_nslcmop_update
["detailed-status"] = "Done"
7254 self
._write
_op
_status
(
7257 error_message
=error_description_nslcmop
,
7258 operation_state
=nslcmop_operation_state
,
7259 other_update
=db_nslcmop_update
,
7262 self
._write
_ns
_status
(
7265 current_operation
="IDLE",
7266 current_operation_id
=None,
7267 other_update
=db_nsr_update
,
7270 if nslcmop_operation_state
:
7274 "nslcmop_id": nslcmop_id
,
7275 "operationState": nslcmop_operation_state
,
7277 await self
.msg
.aiowrite("ns", "scaled", msg
, loop
=self
.loop
)
7278 except Exception as e
:
7280 logging_text
+ "kafka_write notification Exception {}".format(e
)
7282 self
.logger
.debug(logging_text
+ "Exit")
7283 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_scale")
7285 async def _scale_kdu(
7286 self
, logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
7288 _scaling_info
= scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete")
7289 for kdu_name
in _scaling_info
:
7290 for kdu_scaling_info
in _scaling_info
[kdu_name
]:
7291 deployed_kdu
, index
= get_deployed_kdu(
7292 nsr_deployed
, kdu_name
, kdu_scaling_info
["member-vnf-index"]
7294 cluster_uuid
= deployed_kdu
["k8scluster-uuid"]
7295 kdu_instance
= deployed_kdu
["kdu-instance"]
7296 kdu_model
= deployed_kdu
.get("kdu-model")
7297 scale
= int(kdu_scaling_info
["scale"])
7298 k8s_cluster_type
= kdu_scaling_info
["k8s-cluster-type"]
7301 "collection": "nsrs",
7302 "filter": {"_id": nsr_id
},
7303 "path": "_admin.deployed.K8s.{}".format(index
),
7306 step
= "scaling application {}".format(
7307 kdu_scaling_info
["resource-name"]
7309 self
.logger
.debug(logging_text
+ step
)
7311 if kdu_scaling_info
["type"] == "delete":
7312 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7315 and kdu_config
.get("terminate-config-primitive")
7316 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7318 terminate_config_primitive_list
= kdu_config
.get(
7319 "terminate-config-primitive"
7321 terminate_config_primitive_list
.sort(
7322 key
=lambda val
: int(val
["seq"])
7326 terminate_config_primitive
7327 ) in terminate_config_primitive_list
:
7328 primitive_params_
= self
._map
_primitive
_params
(
7329 terminate_config_primitive
, {}, {}
7331 step
= "execute terminate config primitive"
7332 self
.logger
.debug(logging_text
+ step
)
7333 await asyncio
.wait_for(
7334 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7335 cluster_uuid
=cluster_uuid
,
7336 kdu_instance
=kdu_instance
,
7337 primitive_name
=terminate_config_primitive
["name"],
7338 params
=primitive_params_
,
7340 total_timeout
=self
.timeout
.primitive
,
7343 timeout
=self
.timeout
.primitive
7344 * self
.timeout
.primitive_outer_factor
,
7347 await asyncio
.wait_for(
7348 self
.k8scluster_map
[k8s_cluster_type
].scale(
7349 kdu_instance
=kdu_instance
,
7351 resource_name
=kdu_scaling_info
["resource-name"],
7352 total_timeout
=self
.timeout
.scale_on_error
,
7354 cluster_uuid
=cluster_uuid
,
7355 kdu_model
=kdu_model
,
7359 timeout
=self
.timeout
.scale_on_error
7360 * self
.timeout
.scale_on_error_outer_factor
,
7363 if kdu_scaling_info
["type"] == "create":
7364 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7367 and kdu_config
.get("initial-config-primitive")
7368 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7370 initial_config_primitive_list
= kdu_config
.get(
7371 "initial-config-primitive"
7373 initial_config_primitive_list
.sort(
7374 key
=lambda val
: int(val
["seq"])
7377 for initial_config_primitive
in initial_config_primitive_list
:
7378 primitive_params_
= self
._map
_primitive
_params
(
7379 initial_config_primitive
, {}, {}
7381 step
= "execute initial config primitive"
7382 self
.logger
.debug(logging_text
+ step
)
7383 await asyncio
.wait_for(
7384 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7385 cluster_uuid
=cluster_uuid
,
7386 kdu_instance
=kdu_instance
,
7387 primitive_name
=initial_config_primitive
["name"],
7388 params
=primitive_params_
,
7395 async def _scale_ng_ro(
7396 self
, logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, vdu_scaling_info
, stage
7398 nsr_id
= db_nslcmop
["nsInstanceId"]
7399 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
7402 # read from db: vnfd's for every vnf
7405 # for each vnf in ns, read vnfd
7406 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
7407 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
7408 vnfd_id
= vnfr
["vnfd-id"] # vnfd uuid for this vnf
7409 # if we haven't this vnfd, read it from db
7410 if not find_in_list(db_vnfds
, lambda a_vnfd
: a_vnfd
["id"] == vnfd_id
):
7412 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
7413 db_vnfds
.append(vnfd
)
7414 n2vc_key
= self
.n2vc
.get_public_key()
7415 n2vc_key_list
= [n2vc_key
]
7418 vdu_scaling_info
.get("vdu-create"),
7419 vdu_scaling_info
.get("vdu-delete"),
7422 # db_vnfr has been updated, update db_vnfrs to use it
7423 db_vnfrs
[db_vnfr
["member-vnf-index-ref"]] = db_vnfr
7424 await self
._instantiate
_ng
_ro
(
7434 start_deploy
=time(),
7435 timeout_ns_deploy
=self
.timeout
.ns_deploy
,
7437 if vdu_scaling_info
.get("vdu-delete"):
7439 db_vnfr
, None, vdu_scaling_info
["vdu-delete"], mark_delete
=False
7442 async def extract_prometheus_scrape_jobs(
7446 ee_config_descriptor
: dict,
7451 vnf_member_index
: str = "",
7453 vdu_index
: int = None,
7455 kdu_index
: int = None,
7457 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7458 This method will wait until the corresponding VDU or KDU is fully instantiated
7461 ee_id (str): Execution Environment ID
7462 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7463 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7464 vnfr_id (str): VNFR ID where this EE applies
7465 nsr_id (str): NSR ID where this EE applies
7466 target_ip (str): VDU/KDU instance IP address
7467 element_type (str): NS or VNF or VDU or KDU
7468 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7469 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7470 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7471 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7472 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7475 LcmException: When the VDU or KDU instance was not found in an hour
7478 _type_: Prometheus jobs
7480 # default the vdur and kdur names to an empty string, to avoid any later
7481 # problem with Prometheus when the element type is not VDU or KDU
7485 # look if exist a file called 'prometheus*.j2' and
7486 artifact_content
= self
.fs
.dir_ls(artifact_path
)
7490 for f
in artifact_content
7491 if f
.startswith("prometheus") and f
.endswith(".j2")
7497 with self
.fs
.file_open((artifact_path
, job_file
), "r") as f
:
7500 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7501 if element_type
in ("VDU", "KDU"):
7502 for _
in range(360):
7503 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
7504 if vdu_id
and vdu_index
is not None:
7508 for x
in get_iterable(db_vnfr
, "vdur")
7510 x
.get("vdu-id-ref") == vdu_id
7511 and x
.get("count-index") == vdu_index
7516 if vdur
.get("name"):
7517 vdur_name
= vdur
.get("name")
7519 if kdu_name
and kdu_index
is not None:
7523 for x
in get_iterable(db_vnfr
, "kdur")
7525 x
.get("kdu-name") == kdu_name
7526 and x
.get("count-index") == kdu_index
7531 if kdur
.get("name"):
7532 kdur_name
= kdur
.get("name")
7535 await asyncio
.sleep(10, loop
=self
.loop
)
7537 if vdu_id
and vdu_index
is not None:
7539 f
"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7541 if kdu_name
and kdu_index
is not None:
7543 f
"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7547 _
, _
, service
= ee_id
.partition(".") # remove prefix "namespace."
7548 host_name
= "{}-{}".format(service
, ee_config_descriptor
["metric-service"])
7550 vnfr_id
= vnfr_id
.replace("-", "")
7552 "JOB_NAME": vnfr_id
,
7553 "TARGET_IP": target_ip
,
7554 "EXPORTER_POD_IP": host_name
,
7555 "EXPORTER_POD_PORT": host_port
,
7557 "VNF_MEMBER_INDEX": vnf_member_index
,
7558 "VDUR_NAME": vdur_name
,
7559 "KDUR_NAME": kdur_name
,
7560 "ELEMENT_TYPE": element_type
,
7562 job_list
= parse_job(job_data
, variables
)
7563 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7564 for job
in job_list
:
7566 not isinstance(job
.get("job_name"), str)
7567 or vnfr_id
not in job
["job_name"]
7569 job
["job_name"] = vnfr_id
+ "_" + str(randint(1, 10000))
7570 job
["nsr_id"] = nsr_id
7571 job
["vnfr_id"] = vnfr_id
7574 async def rebuild_start_stop(
7575 self
, nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
7577 logging_text
= "Task ns={} {}={} ".format(nsr_id
, operation_type
, nslcmop_id
)
7578 self
.logger
.info(logging_text
+ "Enter")
7579 stage
= ["Preparing the environment", ""]
7580 # database nsrs record
7584 # in case of error, indicates what part of scale was failed to put nsr at error status
7585 start_deploy
= time()
7587 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_id
})
7588 vim_account_id
= db_vnfr
.get("vim-account-id")
7589 vim_info_key
= "vim:" + vim_account_id
7590 vdu_id
= additional_param
["vdu_id"]
7591 vdurs
= [item
for item
in db_vnfr
["vdur"] if item
["vdu-id-ref"] == vdu_id
]
7592 vdur
= find_in_list(
7593 vdurs
, lambda vdu
: vdu
["count-index"] == additional_param
["count-index"]
7596 vdu_vim_name
= vdur
["name"]
7597 vim_vm_id
= vdur
["vim_info"][vim_info_key
]["vim_id"]
7598 target_vim
, _
= next(k_v
for k_v
in vdur
["vim_info"].items())
7600 raise LcmException("Target vdu is not found")
7601 self
.logger
.info("vdu_vim_name >> {} ".format(vdu_vim_name
))
7602 # wait for any previous tasks in process
7603 stage
[1] = "Waiting for previous operations to terminate"
7604 self
.logger
.info(stage
[1])
7605 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7607 stage
[1] = "Reading from database."
7608 self
.logger
.info(stage
[1])
7609 self
._write
_ns
_status
(
7612 current_operation
=operation_type
.upper(),
7613 current_operation_id
=nslcmop_id
,
7615 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7618 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
7619 db_nsr_update
["operational-status"] = operation_type
7620 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7624 "vim_vm_id": vim_vm_id
,
7626 "vdu_index": additional_param
["count-index"],
7627 "vdu_id": vdur
["id"],
7628 "target_vim": target_vim
,
7629 "vim_account_id": vim_account_id
,
7632 stage
[1] = "Sending rebuild request to RO... {}".format(desc
)
7633 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7634 self
.logger
.info("ro nsr id: {}".format(nsr_id
))
7635 result_dict
= await self
.RO
.operate(nsr_id
, desc
, operation_type
)
7636 self
.logger
.info("response from RO: {}".format(result_dict
))
7637 action_id
= result_dict
["action_id"]
7638 await self
._wait
_ng
_ro
(
7643 self
.timeout
.operate
,
7645 "start_stop_rebuild",
7647 return "COMPLETED", "Done"
7648 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7649 self
.logger
.error("Exit Exception {}".format(e
))
7651 except asyncio
.CancelledError
:
7652 self
.logger
.error("Cancelled Exception while '{}'".format(stage
))
7653 exc
= "Operation was cancelled"
7654 except Exception as e
:
7655 exc
= traceback
.format_exc()
7656 self
.logger
.critical(
7657 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7659 return "FAILED", "Error in operate VNF {}".format(exc
)
7661 def get_vca_cloud_and_credentials(self
, vim_account_id
: str) -> (str, str):
7663 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7665 :param: vim_account_id: VIM Account ID
7667 :return: (cloud_name, cloud_credential)
7669 config
= VimAccountDB
.get_vim_account_with_id(vim_account_id
).get("config", {})
7670 return config
.get("vca_cloud"), config
.get("vca_cloud_credential")
7672 def get_vca_k8s_cloud_and_credentials(self
, vim_account_id
: str) -> (str, str):
7674 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7676 :param: vim_account_id: VIM Account ID
7678 :return: (cloud_name, cloud_credential)
7680 config
= VimAccountDB
.get_vim_account_with_id(vim_account_id
).get("config", {})
7681 return config
.get("vca_k8s_cloud"), config
.get("vca_k8s_cloud_credential")
7683 async def migrate(self
, nsr_id
, nslcmop_id
):
7685 Migrate VNFs and VDUs instances in a NS
7687 :param: nsr_id: NS Instance ID
7688 :param: nslcmop_id: nslcmop ID of migrate
7691 # Try to lock HA task here
7692 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7693 if not task_is_locked_by_me
:
7695 logging_text
= "Task ns={} migrate ".format(nsr_id
)
7696 self
.logger
.debug(logging_text
+ "Enter")
7697 # get all needed from database
7699 db_nslcmop_update
= {}
7700 nslcmop_operation_state
= None
7704 # in case of error, indicates what part of scale was failed to put nsr at error status
7705 start_deploy
= time()
7708 # wait for any previous tasks in process
7709 step
= "Waiting for previous operations to terminate"
7710 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7712 self
._write
_ns
_status
(
7715 current_operation
="MIGRATING",
7716 current_operation_id
=nslcmop_id
,
7718 step
= "Getting nslcmop from database"
7720 step
+ " after having waited for previous tasks to be completed"
7722 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
7723 migrate_params
= db_nslcmop
.get("operationParams")
7726 target
.update(migrate_params
)
7727 desc
= await self
.RO
.migrate(nsr_id
, target
)
7728 self
.logger
.debug("RO return > {}".format(desc
))
7729 action_id
= desc
["action_id"]
7730 await self
._wait
_ng
_ro
(
7735 self
.timeout
.migrate
,
7736 operation
="migrate",
7738 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7739 self
.logger
.error("Exit Exception {}".format(e
))
7741 except asyncio
.CancelledError
:
7742 self
.logger
.error("Cancelled Exception while '{}'".format(step
))
7743 exc
= "Operation was cancelled"
7744 except Exception as e
:
7745 exc
= traceback
.format_exc()
7746 self
.logger
.critical(
7747 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7750 self
._write
_ns
_status
(
7753 current_operation
="IDLE",
7754 current_operation_id
=None,
7757 db_nslcmop_update
["detailed-status"] = "FAILED {}: {}".format(step
, exc
)
7758 nslcmop_operation_state
= "FAILED"
7760 nslcmop_operation_state
= "COMPLETED"
7761 db_nslcmop_update
["detailed-status"] = "Done"
7762 db_nsr_update
["detailed-status"] = "Done"
7764 self
._write
_op
_status
(
7768 operation_state
=nslcmop_operation_state
,
7769 other_update
=db_nslcmop_update
,
7771 if nslcmop_operation_state
:
7775 "nslcmop_id": nslcmop_id
,
7776 "operationState": nslcmop_operation_state
,
7778 await self
.msg
.aiowrite("ns", "migrated", msg
, loop
=self
.loop
)
7779 except Exception as e
:
7781 logging_text
+ "kafka_write notification Exception {}".format(e
)
7783 self
.logger
.debug(logging_text
+ "Exit")
7784 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_migrate")
7786 async def heal(self
, nsr_id
, nslcmop_id
):
7790 :param nsr_id: ns instance to heal
7791 :param nslcmop_id: operation to run
7795 # Try to lock HA task here
7796 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7797 if not task_is_locked_by_me
:
7800 logging_text
= "Task ns={} heal={} ".format(nsr_id
, nslcmop_id
)
7801 stage
= ["", "", ""]
7802 tasks_dict_info
= {}
7803 # ^ stage, step, VIM progress
7804 self
.logger
.debug(logging_text
+ "Enter")
7805 # get all needed from database
7807 db_nslcmop_update
= {}
7809 db_vnfrs
= {} # vnf's info indexed by _id
7811 old_operational_status
= ""
7812 old_config_status
= ""
7815 # wait for any previous tasks in process
7816 step
= "Waiting for previous operations to terminate"
7817 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7818 self
._write
_ns
_status
(
7821 current_operation
="HEALING",
7822 current_operation_id
=nslcmop_id
,
7825 step
= "Getting nslcmop from database"
7827 step
+ " after having waited for previous tasks to be completed"
7829 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
7831 step
= "Getting nsr from database"
7832 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
7833 old_operational_status
= db_nsr
["operational-status"]
7834 old_config_status
= db_nsr
["config-status"]
7837 "_admin.deployed.RO.operational-status": "healing",
7839 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7841 step
= "Sending heal order to VIM"
7843 logging_text
=logging_text
,
7845 db_nslcmop
=db_nslcmop
,
7850 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
7851 self
.logger
.debug(logging_text
+ stage
[1])
7852 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
7853 self
.fs
.sync(db_nsr
["nsd-id"])
7855 # read from db: vnfr's of this ns
7856 step
= "Getting vnfrs from db"
7857 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
7858 for vnfr
in db_vnfrs_list
:
7859 db_vnfrs
[vnfr
["_id"]] = vnfr
7860 self
.logger
.debug("ns.heal db_vnfrs={}".format(db_vnfrs
))
7862 # Check for each target VNF
7863 target_list
= db_nslcmop
.get("operationParams", {}).get("healVnfData", {})
7864 for target_vnf
in target_list
:
7865 # Find this VNF in the list from DB
7866 vnfr_id
= target_vnf
.get("vnfInstanceId", None)
7868 db_vnfr
= db_vnfrs
[vnfr_id
]
7869 vnfd_id
= db_vnfr
.get("vnfd-id")
7870 vnfd_ref
= db_vnfr
.get("vnfd-ref")
7871 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
7872 base_folder
= vnfd
["_admin"]["storage"]
7877 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
7878 member_vnf_index
= db_vnfr
.get("member-vnf-index-ref")
7880 # Check each target VDU and deploy N2VC
7881 target_vdu_list
= target_vnf
.get("additionalParams", {}).get(
7884 if not target_vdu_list
:
7885 # Codigo nuevo para crear diccionario
7886 target_vdu_list
= []
7887 for existing_vdu
in db_vnfr
.get("vdur"):
7888 vdu_name
= existing_vdu
.get("vdu-name", None)
7889 vdu_index
= existing_vdu
.get("count-index", 0)
7890 vdu_run_day1
= target_vnf
.get("additionalParams", {}).get(
7893 vdu_to_be_healed
= {
7895 "count-index": vdu_index
,
7896 "run-day1": vdu_run_day1
,
7898 target_vdu_list
.append(vdu_to_be_healed
)
7899 for target_vdu
in target_vdu_list
:
7900 deploy_params_vdu
= target_vdu
7901 # Set run-day1 vnf level value if not vdu level value exists
7902 if not deploy_params_vdu
.get("run-day1") and target_vnf
[
7905 deploy_params_vdu
["run-day1"] = target_vnf
[
7908 vdu_name
= target_vdu
.get("vdu-id", None)
7909 # TODO: Get vdu_id from vdud.
7911 # For multi instance VDU count-index is mandatory
7912 # For single session VDU count-indes is 0
7913 vdu_index
= target_vdu
.get("count-index", 0)
7915 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7916 stage
[1] = "Deploying Execution Environments."
7917 self
.logger
.debug(logging_text
+ stage
[1])
7919 # VNF Level charm. Normal case when proxy charms.
7920 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7921 descriptor_config
= get_configuration(vnfd
, vnfd_ref
)
7922 if descriptor_config
:
7923 # Continue if healed machine is management machine
7924 vnf_ip_address
= db_vnfr
.get("ip-address")
7925 target_instance
= None
7926 for instance
in db_vnfr
.get("vdur", None):
7928 instance
["vdu-name"] == vdu_name
7929 and instance
["count-index"] == vdu_index
7931 target_instance
= instance
7933 if vnf_ip_address
== target_instance
.get("ip-address"):
7935 logging_text
=logging_text
7936 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7937 member_vnf_index
, vdu_name
, vdu_index
7941 nslcmop_id
=nslcmop_id
,
7947 member_vnf_index
=member_vnf_index
,
7950 deploy_params
=deploy_params_vdu
,
7951 descriptor_config
=descriptor_config
,
7952 base_folder
=base_folder
,
7953 task_instantiation_info
=tasks_dict_info
,
7957 # VDU Level charm. Normal case with native charms.
7958 descriptor_config
= get_configuration(vnfd
, vdu_name
)
7959 if descriptor_config
:
7961 logging_text
=logging_text
7962 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7963 member_vnf_index
, vdu_name
, vdu_index
7967 nslcmop_id
=nslcmop_id
,
7973 member_vnf_index
=member_vnf_index
,
7974 vdu_index
=vdu_index
,
7976 deploy_params
=deploy_params_vdu
,
7977 descriptor_config
=descriptor_config
,
7978 base_folder
=base_folder
,
7979 task_instantiation_info
=tasks_dict_info
,
7984 ROclient
.ROClientException
,
7989 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
7991 except asyncio
.CancelledError
:
7993 logging_text
+ "Cancelled Exception while '{}'".format(step
)
7995 exc
= "Operation was cancelled"
7996 except Exception as e
:
7997 exc
= traceback
.format_exc()
7998 self
.logger
.critical(
7999 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
8004 stage
[1] = "Waiting for healing pending tasks."
8005 self
.logger
.debug(logging_text
+ stage
[1])
8006 exc
= await self
._wait
_for
_tasks
(
8009 self
.timeout
.ns_deploy
,
8017 ] = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
8018 nslcmop_operation_state
= "FAILED"
8020 db_nsr_update
["operational-status"] = old_operational_status
8021 db_nsr_update
["config-status"] = old_config_status
8024 ] = "FAILED healing nslcmop={} {}: {}".format(nslcmop_id
, step
, exc
)
8025 for task
, task_name
in tasks_dict_info
.items():
8026 if not task
.done() or task
.cancelled() or task
.exception():
8027 if task_name
.startswith(self
.task_name_deploy_vca
):
8028 # A N2VC task is pending
8029 db_nsr_update
["config-status"] = "failed"
8031 # RO task is pending
8032 db_nsr_update
["operational-status"] = "failed"
8034 error_description_nslcmop
= None
8035 nslcmop_operation_state
= "COMPLETED"
8036 db_nslcmop_update
["detailed-status"] = "Done"
8037 db_nsr_update
["detailed-status"] = "Done"
8038 db_nsr_update
["operational-status"] = "running"
8039 db_nsr_update
["config-status"] = "configured"
8041 self
._write
_op
_status
(
8044 error_message
=error_description_nslcmop
,
8045 operation_state
=nslcmop_operation_state
,
8046 other_update
=db_nslcmop_update
,
8049 self
._write
_ns
_status
(
8052 current_operation
="IDLE",
8053 current_operation_id
=None,
8054 other_update
=db_nsr_update
,
8057 if nslcmop_operation_state
:
8061 "nslcmop_id": nslcmop_id
,
8062 "operationState": nslcmop_operation_state
,
8064 await self
.msg
.aiowrite("ns", "healed", msg
, loop
=self
.loop
)
8065 except Exception as e
:
8067 logging_text
+ "kafka_write notification Exception {}".format(e
)
8069 self
.logger
.debug(logging_text
+ "Exit")
8070 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_heal")
8081 :param logging_text: preffix text to use at logging
8082 :param nsr_id: nsr identity
8083 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8084 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8085 :return: None or exception
8088 def get_vim_account(vim_account_id
):
8090 if vim_account_id
in db_vims
:
8091 return db_vims
[vim_account_id
]
8092 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
8093 db_vims
[vim_account_id
] = db_vim
8098 ns_params
= db_nslcmop
.get("operationParams")
8099 if ns_params
and ns_params
.get("timeout_ns_heal"):
8100 timeout_ns_heal
= ns_params
["timeout_ns_heal"]
8102 timeout_ns_heal
= self
.timeout
.ns_heal
8106 nslcmop_id
= db_nslcmop
["_id"]
8108 "action_id": nslcmop_id
,
8110 self
.logger
.warning(
8111 "db_nslcmop={} and timeout_ns_heal={}".format(
8112 db_nslcmop
, timeout_ns_heal
8115 target
.update(db_nslcmop
.get("operationParams", {}))
8117 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
8118 desc
= await self
.RO
.recreate(nsr_id
, target
)
8119 self
.logger
.debug("RO return > {}".format(desc
))
8120 action_id
= desc
["action_id"]
8121 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8122 await self
._wait
_ng
_ro
(
8129 operation
="healing",
8134 "_admin.deployed.RO.operational-status": "running",
8135 "detailed-status": " ".join(stage
),
8137 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
8138 self
._write
_op
_status
(nslcmop_id
, stage
)
8140 logging_text
+ "ns healed at RO. RO_id={}".format(action_id
)
8143 except Exception as e
:
8144 stage
[2] = "ERROR healing at VIM"
8145 # self.set_vnfr_at_error(db_vnfrs, str(e))
8147 "Error healing at VIM {}".format(e
),
8148 exc_info
=not isinstance(
8151 ROclient
.ROClientException
,
8177 task_instantiation_info
,
8180 # launch instantiate_N2VC in a asyncio task and register task object
8181 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8182 # if not found, create one entry and update database
8183 # fill db_nsr._admin.deployed.VCA.<index>
8186 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
8190 get_charm_name
= False
8191 if "execution-environment-list" in descriptor_config
:
8192 ee_list
= descriptor_config
.get("execution-environment-list", [])
8193 elif "juju" in descriptor_config
:
8194 ee_list
= [descriptor_config
] # ns charms
8195 if "execution-environment-list" not in descriptor_config
:
8196 # charm name is only required for ns charms
8197 get_charm_name
= True
8198 else: # other types as script are not supported
8201 for ee_item
in ee_list
:
8204 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8205 ee_item
.get("juju"), ee_item
.get("helm-chart")
8208 ee_descriptor_id
= ee_item
.get("id")
8209 if ee_item
.get("juju"):
8210 vca_name
= ee_item
["juju"].get("charm")
8212 charm_name
= self
.find_charm_name(db_nsr
, str(vca_name
))
8215 if ee_item
["juju"].get("charm") is not None
8218 if ee_item
["juju"].get("cloud") == "k8s":
8219 vca_type
= "k8s_proxy_charm"
8220 elif ee_item
["juju"].get("proxy") is False:
8221 vca_type
= "native_charm"
8222 elif ee_item
.get("helm-chart"):
8223 vca_name
= ee_item
["helm-chart"]
8224 if ee_item
.get("helm-version") and ee_item
.get("helm-version") == "v2":
8227 vca_type
= "helm-v3"
8230 logging_text
+ "skipping non juju neither charm configuration"
8235 for vca_index
, vca_deployed
in enumerate(
8236 db_nsr
["_admin"]["deployed"]["VCA"]
8238 if not vca_deployed
:
8241 vca_deployed
.get("member-vnf-index") == member_vnf_index
8242 and vca_deployed
.get("vdu_id") == vdu_id
8243 and vca_deployed
.get("kdu_name") == kdu_name
8244 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
8245 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
8249 # not found, create one.
8251 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
8254 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
8256 target
+= "/kdu/{}".format(kdu_name
)
8258 "target_element": target
,
8259 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8260 "member-vnf-index": member_vnf_index
,
8262 "kdu_name": kdu_name
,
8263 "vdu_count_index": vdu_index
,
8264 "operational-status": "init", # TODO revise
8265 "detailed-status": "", # TODO revise
8266 "step": "initial-deploy", # TODO revise
8268 "vdu_name": vdu_name
,
8270 "ee_descriptor_id": ee_descriptor_id
,
8271 "charm_name": charm_name
,
8275 # create VCA and configurationStatus in db
8277 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
8278 "configurationStatus.{}".format(vca_index
): dict(),
8280 self
.update_db_2("nsrs", nsr_id
, db_dict
)
8282 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
8284 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
8285 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
8286 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
8289 task_n2vc
= asyncio
.ensure_future(
8291 logging_text
=logging_text
,
8292 vca_index
=vca_index
,
8298 vdu_index
=vdu_index
,
8299 deploy_params
=deploy_params
,
8300 config_descriptor
=descriptor_config
,
8301 base_folder
=base_folder
,
8302 nslcmop_id
=nslcmop_id
,
8306 ee_config_descriptor
=ee_item
,
8309 self
.lcm_tasks
.register(
8313 "instantiate_N2VC-{}".format(vca_index
),
8316 task_instantiation_info
[
8318 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
8319 member_vnf_index
or "", vdu_id
or ""
8322 async def heal_N2VC(
8339 ee_config_descriptor
,
8341 nsr_id
= db_nsr
["_id"]
8342 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
8343 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
8344 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
8345 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
8347 "collection": "nsrs",
8348 "filter": {"_id": nsr_id
},
8349 "path": db_update_entry
,
8354 element_under_configuration
= nsr_id
8358 vnfr_id
= db_vnfr
["_id"]
8359 osm_config
["osm"]["vnf_id"] = vnfr_id
8361 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
8363 if vca_type
== "native_charm":
8366 index_number
= vdu_index
or 0
8369 element_type
= "VNF"
8370 element_under_configuration
= vnfr_id
8371 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
8373 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
8374 element_type
= "VDU"
8375 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
8376 osm_config
["osm"]["vdu_id"] = vdu_id
8378 namespace
+= ".{}".format(kdu_name
)
8379 element_type
= "KDU"
8380 element_under_configuration
= kdu_name
8381 osm_config
["osm"]["kdu_name"] = kdu_name
8384 if base_folder
["pkg-dir"]:
8385 artifact_path
= "{}/{}/{}/{}".format(
8386 base_folder
["folder"],
8387 base_folder
["pkg-dir"],
8390 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8395 artifact_path
= "{}/Scripts/{}/{}/".format(
8396 base_folder
["folder"],
8399 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8404 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
8406 # get initial_config_primitive_list that applies to this element
8407 initial_config_primitive_list
= config_descriptor
.get(
8408 "initial-config-primitive"
8412 "Initial config primitive list > {}".format(
8413 initial_config_primitive_list
8417 # add config if not present for NS charm
8418 ee_descriptor_id
= ee_config_descriptor
.get("id")
8419 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
8420 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
8421 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
8425 "Initial config primitive list #2 > {}".format(
8426 initial_config_primitive_list
8429 # n2vc_redesign STEP 3.1
8430 # find old ee_id if exists
8431 ee_id
= vca_deployed
.get("ee_id")
8433 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
8434 # create or register execution environment in VCA. Only for native charms when healing
8435 if vca_type
== "native_charm":
8436 step
= "Waiting to VM being up and getting IP address"
8437 self
.logger
.debug(logging_text
+ step
)
8438 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8447 credentials
= {"hostname": rw_mgmt_ip
}
8449 username
= deep_get(
8450 config_descriptor
, ("config-access", "ssh-access", "default-user")
8452 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8453 # merged. Meanwhile let's get username from initial-config-primitive
8454 if not username
and initial_config_primitive_list
:
8455 for config_primitive
in initial_config_primitive_list
:
8456 for param
in config_primitive
.get("parameter", ()):
8457 if param
["name"] == "ssh-username":
8458 username
= param
["value"]
8462 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8463 "'config-access.ssh-access.default-user'"
8465 credentials
["username"] = username
8467 # n2vc_redesign STEP 3.2
8468 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8469 self
._write
_configuration
_status
(
8471 vca_index
=vca_index
,
8472 status
="REGISTERING",
8473 element_under_configuration
=element_under_configuration
,
8474 element_type
=element_type
,
8477 step
= "register execution environment {}".format(credentials
)
8478 self
.logger
.debug(logging_text
+ step
)
8479 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
8480 credentials
=credentials
,
8481 namespace
=namespace
,
8486 # update ee_id en db
8488 "_admin.deployed.VCA.{}.ee_id".format(vca_index
): ee_id
,
8490 self
.update_db_2("nsrs", nsr_id
, db_dict_ee_id
)
8492 # for compatibility with MON/POL modules, the need model and application name at database
8493 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8494 # Not sure if this need to be done when healing
8496 ee_id_parts = ee_id.split(".")
8497 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8498 if len(ee_id_parts) >= 2:
8499 model_name = ee_id_parts[0]
8500 application_name = ee_id_parts[1]
8501 db_nsr_update[db_update_entry + "model"] = model_name
8502 db_nsr_update[db_update_entry + "application"] = application_name
8505 # n2vc_redesign STEP 3.3
8506 # Install configuration software. Only for native charms.
8507 step
= "Install configuration Software"
8509 self
._write
_configuration
_status
(
8511 vca_index
=vca_index
,
8512 status
="INSTALLING SW",
8513 element_under_configuration
=element_under_configuration
,
8514 element_type
=element_type
,
8515 # other_update=db_nsr_update,
8519 # TODO check if already done
8520 self
.logger
.debug(logging_text
+ step
)
8522 if vca_type
== "native_charm":
8523 config_primitive
= next(
8524 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
8527 if config_primitive
:
8528 config
= self
._map
_primitive
_params
(
8529 config_primitive
, {}, deploy_params
8531 await self
.vca_map
[vca_type
].install_configuration_sw(
8533 artifact_path
=artifact_path
,
8541 # write in db flag of configuration_sw already installed
8543 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
8546 # Not sure if this need to be done when healing
8548 # add relations for this VCA (wait for other peers related with this VCA)
8549 await self._add_vca_relations(
8550 logging_text=logging_text,
8553 vca_index=vca_index,
8557 # if SSH access is required, then get execution environment SSH public
8558 # if native charm we have waited already to VM be UP
8559 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8562 # self.logger.debug("get ssh key block")
8564 config_descriptor
, ("config-access", "ssh-access", "required")
8566 # self.logger.debug("ssh key needed")
8567 # Needed to inject a ssh key
8570 ("config-access", "ssh-access", "default-user"),
8572 step
= "Install configuration Software, getting public ssh key"
8573 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
8574 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
8577 step
= "Insert public key into VM user={} ssh_key={}".format(
8581 # self.logger.debug("no need to get ssh key")
8582 step
= "Waiting to VM being up and getting IP address"
8583 self
.logger
.debug(logging_text
+ step
)
8585 # n2vc_redesign STEP 5.1
8586 # wait for RO (ip-address) Insert pub_key into VM
8587 # IMPORTANT: We need do wait for RO to complete healing operation.
8588 await self
._wait
_heal
_ro
(nsr_id
, self
.timeout
.ns_heal
)
8591 rw_mgmt_ip
= await self
.wait_kdu_up(
8592 logging_text
, nsr_id
, vnfr_id
, kdu_name
8595 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8605 rw_mgmt_ip
= None # This is for a NS configuration
8607 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
8609 # store rw_mgmt_ip in deploy params for later replacement
8610 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
8613 # get run-day1 operation parameter
8614 runDay1
= deploy_params
.get("run-day1", False)
8616 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id
, vdu_id
, runDay1
)
8619 # n2vc_redesign STEP 6 Execute initial config primitive
8620 step
= "execute initial config primitive"
8622 # wait for dependent primitives execution (NS -> VNF -> VDU)
8623 if initial_config_primitive_list
:
8624 await self
._wait
_dependent
_n
2vc
(
8625 nsr_id
, vca_deployed_list
, vca_index
8628 # stage, in function of element type: vdu, kdu, vnf or ns
8629 my_vca
= vca_deployed_list
[vca_index
]
8630 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
8632 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
8633 elif my_vca
.get("member-vnf-index"):
8635 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
8638 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
8640 self
._write
_configuration
_status
(
8641 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
8644 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
8646 check_if_terminated_needed
= True
8647 for initial_config_primitive
in initial_config_primitive_list
:
8648 # adding information on the vca_deployed if it is a NS execution environment
8649 if not vca_deployed
["member-vnf-index"]:
8650 deploy_params
["ns_config_info"] = json
.dumps(
8651 self
._get
_ns
_config
_info
(nsr_id
)
8653 # TODO check if already done
8654 primitive_params_
= self
._map
_primitive
_params
(
8655 initial_config_primitive
, {}, deploy_params
8658 step
= "execute primitive '{}' params '{}'".format(
8659 initial_config_primitive
["name"], primitive_params_
8661 self
.logger
.debug(logging_text
+ step
)
8662 await self
.vca_map
[vca_type
].exec_primitive(
8664 primitive_name
=initial_config_primitive
["name"],
8665 params_dict
=primitive_params_
,
8670 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8671 if check_if_terminated_needed
:
8672 if config_descriptor
.get("terminate-config-primitive"):
8676 {db_update_entry
+ "needed_terminate": True},
8678 check_if_terminated_needed
= False
8680 # TODO register in database that primitive is done
8682 # STEP 7 Configure metrics
8683 # Not sure if this need to be done when healing
8685 if vca_type == "helm" or vca_type == "helm-v3":
8686 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8688 artifact_path=artifact_path,
8689 ee_config_descriptor=ee_config_descriptor,
8692 target_ip=rw_mgmt_ip,
8698 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8701 for job in prometheus_jobs:
8704 {"job_name": job["job_name"]},
8707 fail_on_empty=False,
8711 step
= "instantiated at VCA"
8712 self
.logger
.debug(logging_text
+ step
)
8714 self
._write
_configuration
_status
(
8715 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
8718 except Exception as e
: # TODO not use Exception but N2VC exception
8719 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8721 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
8724 "Exception while {} : {}".format(step
, e
), exc_info
=True
8726 self
._write
_configuration
_status
(
8727 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
8729 raise LcmException("{} {}".format(step
, e
)) from e
8731 async def _wait_heal_ro(
8737 while time() <= start_time
+ timeout
:
8738 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8739 operational_status_ro
= db_nsr
["_admin"]["deployed"]["RO"][
8740 "operational-status"
8742 self
.logger
.debug("Wait Heal RO > {}".format(operational_status_ro
))
8743 if operational_status_ro
!= "healing":
8745 await asyncio
.sleep(15, loop
=self
.loop
)
8746 else: # timeout_ns_deploy
8747 raise NgRoException("Timeout waiting ns to deploy")
8749 async def vertical_scale(self
, nsr_id
, nslcmop_id
):
8751 Vertical Scale the VDUs in a NS
8753 :param: nsr_id: NS Instance ID
8754 :param: nslcmop_id: nslcmop ID of migrate
8757 # Try to lock HA task here
8758 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
8759 if not task_is_locked_by_me
:
8761 logging_text
= "Task ns={} vertical scale ".format(nsr_id
)
8762 self
.logger
.debug(logging_text
+ "Enter")
8763 # get all needed from database
8765 db_nslcmop_update
= {}
8766 nslcmop_operation_state
= None
8770 # in case of error, indicates what part of scale was failed to put nsr at error status
8771 start_deploy
= time()
8774 # wait for any previous tasks in process
8775 step
= "Waiting for previous operations to terminate"
8776 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
8778 self
._write
_ns
_status
(
8781 current_operation
="VerticalScale",
8782 current_operation_id
=nslcmop_id
,
8784 step
= "Getting nslcmop from database"
8786 step
+ " after having waited for previous tasks to be completed"
8788 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
8789 operationParams
= db_nslcmop
.get("operationParams")
8791 target
.update(operationParams
)
8792 desc
= await self
.RO
.vertical_scale(nsr_id
, target
)
8793 self
.logger
.debug("RO return > {}".format(desc
))
8794 action_id
= desc
["action_id"]
8795 await self
._wait
_ng
_ro
(
8800 self
.timeout
.verticalscale
,
8801 operation
="verticalscale",
8803 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
8804 self
.logger
.error("Exit Exception {}".format(e
))
8806 except asyncio
.CancelledError
:
8807 self
.logger
.error("Cancelled Exception while '{}'".format(step
))
8808 exc
= "Operation was cancelled"
8809 except Exception as e
:
8810 exc
= traceback
.format_exc()
8811 self
.logger
.critical(
8812 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
8815 self
._write
_ns
_status
(
8818 current_operation
="IDLE",
8819 current_operation_id
=None,
8822 db_nslcmop_update
["detailed-status"] = "FAILED {}: {}".format(step
, exc
)
8823 nslcmop_operation_state
= "FAILED"
8825 nslcmop_operation_state
= "COMPLETED"
8826 db_nslcmop_update
["detailed-status"] = "Done"
8827 db_nsr_update
["detailed-status"] = "Done"
8829 self
._write
_op
_status
(
8833 operation_state
=nslcmop_operation_state
,
8834 other_update
=db_nslcmop_update
,
8836 if nslcmop_operation_state
:
8840 "nslcmop_id": nslcmop_id
,
8841 "operationState": nslcmop_operation_state
,
8843 await self
.msg
.aiowrite("ns", "verticalscaled", msg
, loop
=self
.loop
)
8844 except Exception as e
:
8846 logging_text
+ "kafka_write notification Exception {}".format(e
)
8848 self
.logger
.debug(logging_text
+ "Exit")
8849 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_verticalscale")