1 # -*- coding: utf-8 -*-
4 # Copyright 2018 Telefonica S.A.
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
21 from typing
import Any
, Dict
, List
24 import logging
.handlers
37 from osm_lcm
import ROclient
38 from osm_lcm
.data_utils
.lcm_config
import LcmCfg
39 from osm_lcm
.data_utils
.nsr
import (
42 get_deployed_vca_list
,
45 from osm_lcm
.data_utils
.vca
import (
54 from osm_lcm
.ng_ro
import NgRoClient
, NgRoException
55 from osm_lcm
.lcm_utils
import (
61 check_juju_bundle_existence
,
62 get_charm_artifact_path
,
66 from osm_lcm
.data_utils
.nsd
import (
67 get_ns_configuration_relation_list
,
71 from osm_lcm
.data_utils
.vnfd
import (
77 get_ee_sorted_initial_config_primitive_list
,
78 get_ee_sorted_terminate_config_primitive_list
,
80 get_virtual_link_profiles
,
85 get_number_of_instances
,
87 get_kdu_resource_profile
,
88 find_software_version
,
91 from osm_lcm
.data_utils
.list_utils
import find_in_list
92 from osm_lcm
.data_utils
.vnfr
import (
96 get_volumes_from_instantiation_params
,
98 from osm_lcm
.data_utils
.dict_utils
import parse_yaml_strings
99 from osm_lcm
.data_utils
.database
.vim_account
import VimAccountDB
100 from n2vc
.definitions
import RelationEndpoint
101 from n2vc
.k8s_helm3_conn
import K8sHelm3Connector
102 from n2vc
.k8s_juju_conn
import K8sJujuConnector
104 from osm_common
.dbbase
import DbException
105 from osm_common
.fsbase
import FsException
107 from osm_lcm
.data_utils
.database
.database
import Database
108 from osm_lcm
.data_utils
.filesystem
.filesystem
import Filesystem
109 from osm_lcm
.data_utils
.wim
import (
111 get_target_wim_attrs
,
112 select_feasible_wim_account
,
115 from n2vc
.n2vc_juju_conn
import N2VCJujuConnector
116 from n2vc
.exceptions
import N2VCException
, N2VCNotFound
, K8sException
118 from osm_lcm
.lcm_helm_conn
import LCMHelmConn
119 from osm_lcm
.osm_config
import OsmConfigBuilder
120 from osm_lcm
.prometheus
import parse_job
122 from copy
import copy
, deepcopy
123 from time
import time
124 from uuid
import uuid4
126 from random
import SystemRandom
128 __author__
= "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
131 class NsLcm(LcmBase
):
132 SUBOPERATION_STATUS_NOT_FOUND
= -1
133 SUBOPERATION_STATUS_NEW
= -2
134 SUBOPERATION_STATUS_SKIP
= -3
135 EE_TLS_NAME
= "ee-tls"
136 task_name_deploy_vca
= "Deploying VCA"
137 rel_operation_types
= {
146 def __init__(self
, msg
, lcm_tasks
, config
: LcmCfg
):
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
152 super().__init
__(msg
=msg
, logger
=logging
.getLogger("lcm.ns"))
154 self
.db
= Database().instance
.db
155 self
.fs
= Filesystem().instance
.fs
156 self
.lcm_tasks
= lcm_tasks
157 self
.timeout
= config
.timeout
158 self
.ro_config
= config
.RO
159 self
.vca_config
= config
.VCA
161 # create N2VC connector
162 self
.n2vc
= N2VCJujuConnector(
164 on_update_db
=self
._on
_update
_n
2vc
_db
,
169 self
.conn_helm_ee
= LCMHelmConn(
171 vca_config
=self
.vca_config
,
172 on_update_db
=self
._on
_update
_n
2vc
_db
,
175 self
.k8sclusterhelm3
= K8sHelm3Connector(
176 kubectl_command
=self
.vca_config
.kubectlpath
,
177 helm_command
=self
.vca_config
.helm3path
,
184 self
.k8sclusterjuju
= K8sJujuConnector(
185 kubectl_command
=self
.vca_config
.kubectlpath
,
186 juju_command
=self
.vca_config
.jujupath
,
188 on_update_db
=self
._on
_update
_k
8s
_db
,
193 self
.k8scluster_map
= {
194 "helm-chart-v3": self
.k8sclusterhelm3
,
195 "chart": self
.k8sclusterhelm3
,
196 "juju-bundle": self
.k8sclusterjuju
,
197 "juju": self
.k8sclusterjuju
,
201 "lxc_proxy_charm": self
.n2vc
,
202 "native_charm": self
.n2vc
,
203 "k8s_proxy_charm": self
.n2vc
,
204 "helm": self
.conn_helm_ee
,
205 "helm-v3": self
.conn_helm_ee
,
209 self
.RO
= NgRoClient(**self
.ro_config
.to_dict())
211 self
.op_status_map
= {
212 "instantiation": self
.RO
.status
,
213 "termination": self
.RO
.status
,
214 "migrate": self
.RO
.status
,
215 "healing": self
.RO
.recreate_status
,
216 "verticalscale": self
.RO
.status
,
217 "start_stop_rebuild": self
.RO
.status
,
221 def increment_ip_mac(ip_mac
, vm_index
=1):
222 if not isinstance(ip_mac
, str):
227 dual_ip
= ip_mac
.split(";")
228 if len(dual_ip
) == 2:
230 if ipaddress
.ip_address(ip
).version
== 6:
231 ipv6
= ipaddress
.IPv6Address(ip
)
232 next_ipv6
= str(ipaddress
.IPv6Address(int(ipv6
) + 1))
233 elif ipaddress
.ip_address(ip
).version
== 4:
234 ipv4
= ipaddress
.IPv4Address(ip
)
235 next_ipv4
= str(ipaddress
.IPv4Address(int(ipv4
) + 1))
236 return [next_ipv4
, next_ipv6
]
237 # try with ipv4 look for last dot
238 i
= ip_mac
.rfind(".")
241 return "{}{}".format(ip_mac
[:i
], int(ip_mac
[i
:]) + vm_index
)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i
= ip_mac
.rfind(":")
246 # format in hex, len can be 2 for mac or 4 for ipv6
247 return ("{}{:0" + str(len(ip_mac
) - i
) + "x}").format(
248 ip_mac
[:i
], int(ip_mac
[i
:], 16) + vm_index
254 async def _on_update_n2vc_db(self
, table
, filter, path
, updated_data
, vca_id
=None):
255 # remove last dot from path (if exists)
256 if path
.endswith("."):
259 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
260 # .format(table, filter, path, updated_data))
262 nsr_id
= filter.get("_id")
264 # read ns record from database
265 nsr
= self
.db
.get_one(table
="nsrs", q_filter
=filter)
266 current_ns_status
= nsr
.get("nsState")
268 # First, we need to verify if the current vcaStatus is null, because if that is the case,
269 # MongoDB will not be able to create the fields used within the update key in the database
270 if not nsr
.get("vcaStatus"):
271 # Write an empty dictionary to the vcaStatus field, it its value is null
272 self
.update_db_2("nsrs", nsr_id
, {"vcaStatus": dict()})
274 # Get vca status for NS
275 status_dict
= await self
.n2vc
.get_status(
276 namespace
="." + nsr_id
, yaml_format
=False, vca_id
=vca_id
279 # Update the vcaStatus
280 db_key
= f
"vcaStatus.{nsr_id}.VNF"
283 db_dict
[db_key
] = status_dict
[nsr_id
]
284 await self
.n2vc
.update_vca_status(db_dict
[db_key
], vca_id
=vca_id
)
286 # update configurationStatus for this VCA
288 vca_index
= int(path
[path
.rfind(".") + 1 :])
291 target_dict
=nsr
, key_list
=("_admin", "deployed", "VCA")
293 vca_status
= vca_list
[vca_index
].get("status")
295 configuration_status_list
= nsr
.get("configurationStatus")
296 config_status
= configuration_status_list
[vca_index
].get("status")
298 if config_status
== "BROKEN" and vca_status
!= "failed":
299 db_dict
["configurationStatus"][vca_index
] = "READY"
300 elif config_status
!= "BROKEN" and vca_status
== "failed":
301 db_dict
["configurationStatus"][vca_index
] = "BROKEN"
302 except Exception as e
:
303 # not update configurationStatus
304 self
.logger
.debug("Error updating vca_index (ignore): {}".format(e
))
306 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
307 # if nsState = 'DEGRADED' check if all is OK
309 if current_ns_status
in ("READY", "DEGRADED"):
310 error_description
= ""
312 if status_dict
.get("machines"):
313 for machine_id
in status_dict
.get("machines"):
314 machine
= status_dict
.get("machines").get(machine_id
)
315 # check machine agent-status
316 if machine
.get("agent-status"):
317 s
= machine
.get("agent-status").get("status")
320 error_description
+= (
321 "machine {} agent-status={} ; ".format(
325 # check machine instance status
326 if machine
.get("instance-status"):
327 s
= machine
.get("instance-status").get("status")
330 error_description
+= (
331 "machine {} instance-status={} ; ".format(
336 if status_dict
.get("applications"):
337 for app_id
in status_dict
.get("applications"):
338 app
= status_dict
.get("applications").get(app_id
)
339 # check application status
340 if app
.get("status"):
341 s
= app
.get("status").get("status")
344 error_description
+= (
345 "application {} status={} ; ".format(app_id
, s
)
348 if error_description
:
349 db_dict
["errorDescription"] = error_description
350 if current_ns_status
== "READY" and is_degraded
:
351 db_dict
["nsState"] = "DEGRADED"
352 if current_ns_status
== "DEGRADED" and not is_degraded
:
353 db_dict
["nsState"] = "READY"
356 self
.update_db_2("nsrs", nsr_id
, db_dict
)
358 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
360 except Exception as e
:
361 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
363 async def _on_update_k8s_db(
364 self
, cluster_uuid
, kdu_instance
, filter=None, vca_id
=None, cluster_type
="juju"
367 Updating vca status in NSR record
368 :param cluster_uuid: UUID of a k8s cluster
369 :param kdu_instance: The unique name of the KDU instance
370 :param filter: To get nsr_id
371 :cluster_type: The cluster type (juju, k8s)
375 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
376 # .format(cluster_uuid, kdu_instance, filter))
378 nsr_id
= filter.get("_id")
380 vca_status
= await self
.k8scluster_map
[cluster_type
].status_kdu(
381 cluster_uuid
=cluster_uuid
,
382 kdu_instance
=kdu_instance
,
384 complete_status
=True,
388 # First, we need to verify if the current vcaStatus is null, because if that is the case,
389 # MongoDB will not be able to create the fields used within the update key in the database
390 nsr
= self
.db
.get_one(table
="nsrs", q_filter
=filter)
391 if not nsr
.get("vcaStatus"):
392 # Write an empty dictionary to the vcaStatus field, it its value is null
393 self
.update_db_2("nsrs", nsr_id
, {"vcaStatus": dict()})
395 # Update the vcaStatus
396 db_key
= f
"vcaStatus.{nsr_id}.KNF"
399 db_dict
[db_key
] = vca_status
401 if cluster_type
in ("juju-bundle", "juju"):
402 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
403 # status in a similar way between Juju Bundles and Helm Charts on this side
404 await self
.k8sclusterjuju
.update_vca_status(
411 f
"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
415 self
.update_db_2("nsrs", nsr_id
, db_dict
)
416 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
418 except Exception as e
:
419 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
422 def _parse_cloud_init(cloud_init_text
, additional_params
, vnfd_id
, vdu_id
):
425 undefined
=StrictUndefined
,
426 autoescape
=select_autoescape(default_for_string
=True, default
=True),
428 template
= env
.from_string(cloud_init_text
)
429 return template
.render(additional_params
or {})
430 except UndefinedError
as e
:
432 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
433 "file, must be provided in the instantiation parameters inside the "
434 "'additionalParamsForVnf/Vdu' block".format(e
, vnfd_id
, vdu_id
)
436 except (TemplateError
, TemplateNotFound
) as e
:
438 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
443 def _get_vdu_cloud_init_content(self
, vdu
, vnfd
):
444 cloud_init_content
= cloud_init_file
= None
446 if vdu
.get("cloud-init-file"):
447 base_folder
= vnfd
["_admin"]["storage"]
448 if base_folder
["pkg-dir"]:
449 cloud_init_file
= "{}/{}/cloud_init/{}".format(
450 base_folder
["folder"],
451 base_folder
["pkg-dir"],
452 vdu
["cloud-init-file"],
455 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
456 base_folder
["folder"],
457 vdu
["cloud-init-file"],
459 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
460 cloud_init_content
= ci_file
.read()
461 elif vdu
.get("cloud-init"):
462 cloud_init_content
= vdu
["cloud-init"]
464 return cloud_init_content
465 except FsException
as e
:
467 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
468 vnfd
["id"], vdu
["id"], cloud_init_file
, e
472 def _get_vdu_additional_params(self
, db_vnfr
, vdu_id
):
474 (vdur
for vdur
in db_vnfr
.get("vdur") if vdu_id
== vdur
["vdu-id-ref"]), {}
476 additional_params
= vdur
.get("additionalParams")
477 return parse_yaml_strings(additional_params
)
480 def ip_profile_2_RO(ip_profile
):
481 RO_ip_profile
= deepcopy(ip_profile
)
482 if "dns-server" in RO_ip_profile
:
483 if isinstance(RO_ip_profile
["dns-server"], list):
484 RO_ip_profile
["dns-address"] = []
485 for ds
in RO_ip_profile
.pop("dns-server"):
486 RO_ip_profile
["dns-address"].append(ds
["address"])
488 RO_ip_profile
["dns-address"] = RO_ip_profile
.pop("dns-server")
489 if RO_ip_profile
.get("ip-version") == "ipv4":
490 RO_ip_profile
["ip-version"] = "IPv4"
491 if RO_ip_profile
.get("ip-version") == "ipv6":
492 RO_ip_profile
["ip-version"] = "IPv6"
493 if "dhcp-params" in RO_ip_profile
:
494 RO_ip_profile
["dhcp"] = RO_ip_profile
.pop("dhcp-params")
497 def scale_vnfr(self
, db_vnfr
, vdu_create
=None, vdu_delete
=None, mark_delete
=False):
498 db_vdu_push_list
= []
500 db_update
= {"_admin.modified": time()}
502 for vdu_id
, vdu_count
in vdu_create
.items():
506 for vdur
in reversed(db_vnfr
["vdur"])
507 if vdur
["vdu-id-ref"] == vdu_id
512 # Read the template saved in the db:
514 "No vdur in the database. Using the vdur-template to scale"
516 vdur_template
= db_vnfr
.get("vdur-template")
517 if not vdur_template
:
519 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
523 vdur
= vdur_template
[0]
524 # Delete a template from the database after using it
527 {"_id": db_vnfr
["_id"]},
529 pull
={"vdur-template": {"_id": vdur
["_id"]}},
531 for count
in range(vdu_count
):
532 vdur_copy
= deepcopy(vdur
)
533 vdur_copy
["status"] = "BUILD"
534 vdur_copy
["status-detailed"] = None
535 vdur_copy
["ip-address"] = None
536 vdur_copy
["_id"] = str(uuid4())
537 vdur_copy
["count-index"] += count
+ 1
538 vdur_copy
["id"] = "{}-{}".format(
539 vdur_copy
["vdu-id-ref"], vdur_copy
["count-index"]
541 vdur_copy
.pop("vim_info", None)
542 for iface
in vdur_copy
["interfaces"]:
543 if iface
.get("fixed-ip"):
544 iface
["ip-address"] = self
.increment_ip_mac(
545 iface
["ip-address"], count
+ 1
548 iface
.pop("ip-address", None)
549 if iface
.get("fixed-mac"):
550 iface
["mac-address"] = self
.increment_ip_mac(
551 iface
["mac-address"], count
+ 1
554 iface
.pop("mac-address", None)
558 ) # only first vdu can be managment of vnf
559 db_vdu_push_list
.append(vdur_copy
)
560 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
562 if len(db_vnfr
["vdur"]) == 1:
563 # The scale will move to 0 instances
565 "Scaling to 0 !, creating the template with the last vdur"
567 template_vdur
= [db_vnfr
["vdur"][0]]
568 for vdu_id
, vdu_count
in vdu_delete
.items():
570 indexes_to_delete
= [
572 for iv
in enumerate(db_vnfr
["vdur"])
573 if iv
[1]["vdu-id-ref"] == vdu_id
577 "vdur.{}.status".format(i
): "DELETING"
578 for i
in indexes_to_delete
[-vdu_count
:]
582 # it must be deleted one by one because common.db does not allow otherwise
585 for v
in reversed(db_vnfr
["vdur"])
586 if v
["vdu-id-ref"] == vdu_id
588 for vdu
in vdus_to_delete
[:vdu_count
]:
591 {"_id": db_vnfr
["_id"]},
593 pull
={"vdur": {"_id": vdu
["_id"]}},
597 db_push
["vdur"] = db_vdu_push_list
599 db_push
["vdur-template"] = template_vdur
602 db_vnfr
["vdur-template"] = template_vdur
603 self
.db
.set_one("vnfrs", {"_id": db_vnfr
["_id"]}, db_update
, push_list
=db_push
)
604 # modify passed dictionary db_vnfr
605 db_vnfr_
= self
.db
.get_one("vnfrs", {"_id": db_vnfr
["_id"]})
606 db_vnfr
["vdur"] = db_vnfr_
["vdur"]
608 def ns_update_nsr(self
, ns_update_nsr
, db_nsr
, nsr_desc_RO
):
610 Updates database nsr with the RO info for the created vld
611 :param ns_update_nsr: dictionary to be filled with the updated info
612 :param db_nsr: content of db_nsr. This is also modified
613 :param nsr_desc_RO: nsr descriptor from RO
614 :return: Nothing, LcmException is raised on errors
617 for vld_index
, vld
in enumerate(get_iterable(db_nsr
, "vld")):
618 for net_RO
in get_iterable(nsr_desc_RO
, "nets"):
619 if vld
["id"] != net_RO
.get("ns_net_osm_id"):
621 vld
["vim-id"] = net_RO
.get("vim_net_id")
622 vld
["name"] = net_RO
.get("vim_name")
623 vld
["status"] = net_RO
.get("status")
624 vld
["status-detailed"] = net_RO
.get("error_msg")
625 ns_update_nsr
["vld.{}".format(vld_index
)] = vld
629 "ns_update_nsr: Not found vld={} at RO info".format(vld
["id"])
632 def set_vnfr_at_error(self
, db_vnfrs
, error_text
):
634 for db_vnfr
in db_vnfrs
.values():
635 vnfr_update
= {"status": "ERROR"}
636 for vdu_index
, vdur
in enumerate(get_iterable(db_vnfr
, "vdur")):
637 if "status" not in vdur
:
638 vdur
["status"] = "ERROR"
639 vnfr_update
["vdur.{}.status".format(vdu_index
)] = "ERROR"
641 vdur
["status-detailed"] = str(error_text
)
643 "vdur.{}.status-detailed".format(vdu_index
)
645 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
646 except DbException
as e
:
647 self
.logger
.error("Cannot update vnf. {}".format(e
))
649 def _get_ns_config_info(self
, nsr_id
):
651 Generates a mapping between vnf,vdu elements and the N2VC id
652 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
653 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
654 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
655 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
657 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
658 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
660 ns_config_info
= {"osm-config-mapping": mapping
}
661 for vca
in vca_deployed_list
:
662 if not vca
["member-vnf-index"]:
664 if not vca
["vdu_id"]:
665 mapping
[vca
["member-vnf-index"]] = vca
["application"]
669 vca
["member-vnf-index"], vca
["vdu_id"], vca
["vdu_count_index"]
671 ] = vca
["application"]
672 return ns_config_info
674 async def _instantiate_ng_ro(
690 def get_vim_account(vim_account_id
):
692 if vim_account_id
in db_vims
:
693 return db_vims
[vim_account_id
]
694 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
695 db_vims
[vim_account_id
] = db_vim
698 # modify target_vld info with instantiation parameters
699 def parse_vld_instantiation_params(
700 target_vim
, target_vld
, vld_params
, target_sdn
702 if vld_params
.get("ip-profile"):
703 target_vld
["vim_info"][target_vim
]["ip_profile"] = vld_to_ro_ip_profile(
704 vld_params
["ip-profile"]
706 if vld_params
.get("provider-network"):
707 target_vld
["vim_info"][target_vim
]["provider_network"] = vld_params
[
710 if "sdn-ports" in vld_params
["provider-network"] and target_sdn
:
711 target_vld
["vim_info"][target_sdn
]["sdn-ports"] = vld_params
[
715 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
716 # if wim_account_id is specified in vld_params, validate if it is feasible.
717 wim_account_id
, db_wim
= select_feasible_wim_account(
718 db_nsr
, db_vnfrs
, target_vld
, vld_params
, self
.logger
722 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
723 self
.logger
.info("WIM selected: {:s}".format(str(wim_account_id
)))
724 # update vld_params with correct WIM account Id
725 vld_params
["wimAccountId"] = wim_account_id
727 target_wim
= "wim:{}".format(wim_account_id
)
728 target_wim_attrs
= get_target_wim_attrs(nsr_id
, target_vld
, vld_params
)
729 sdn_ports
= get_sdn_ports(vld_params
, db_wim
)
730 if len(sdn_ports
) > 0:
731 target_vld
["vim_info"][target_wim
] = target_wim_attrs
732 target_vld
["vim_info"][target_wim
]["sdn-ports"] = sdn_ports
735 "Target VLD with WIM data: {:s}".format(str(target_vld
))
738 for param
in ("vim-network-name", "vim-network-id"):
739 if vld_params
.get(param
):
740 if isinstance(vld_params
[param
], dict):
741 for vim
, vim_net
in vld_params
[param
].items():
742 other_target_vim
= "vim:" + vim
744 target_vld
["vim_info"],
745 (other_target_vim
, param
.replace("-", "_")),
748 else: # isinstance str
749 target_vld
["vim_info"][target_vim
][
750 param
.replace("-", "_")
751 ] = vld_params
[param
]
752 if vld_params
.get("common_id"):
753 target_vld
["common_id"] = vld_params
.get("common_id")
755 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
756 def update_ns_vld_target(target
, ns_params
):
757 for vnf_params
in ns_params
.get("vnf", ()):
758 if vnf_params
.get("vimAccountId"):
762 for vnfr
in db_vnfrs
.values()
763 if vnf_params
["member-vnf-index"]
764 == vnfr
["member-vnf-index-ref"]
768 vdur
= next((vdur
for vdur
in target_vnf
.get("vdur", ())), None)
771 for a_index
, a_vld
in enumerate(target
["ns"]["vld"]):
772 target_vld
= find_in_list(
773 get_iterable(vdur
, "interfaces"),
774 lambda iface
: iface
.get("ns-vld-id") == a_vld
["name"],
777 vld_params
= find_in_list(
778 get_iterable(ns_params
, "vld"),
779 lambda v_vld
: v_vld
["name"] in (a_vld
["name"], a_vld
["id"]),
782 if vnf_params
.get("vimAccountId") not in a_vld
.get(
785 target_vim_network_list
= [
786 v
for _
, v
in a_vld
.get("vim_info").items()
788 target_vim_network_name
= next(
790 item
.get("vim_network_name", "")
791 for item
in target_vim_network_list
796 target
["ns"]["vld"][a_index
].get("vim_info").update(
798 "vim:{}".format(vnf_params
["vimAccountId"]): {
799 "vim_network_name": target_vim_network_name
,
805 for param
in ("vim-network-name", "vim-network-id"):
806 if vld_params
.get(param
) and isinstance(
807 vld_params
[param
], dict
809 for vim
, vim_net
in vld_params
[
812 other_target_vim
= "vim:" + vim
814 target
["ns"]["vld"][a_index
].get(
819 param
.replace("-", "_"),
824 nslcmop_id
= db_nslcmop
["_id"]
826 "name": db_nsr
["name"],
829 "image": deepcopy(db_nsr
["image"]),
830 "flavor": deepcopy(db_nsr
["flavor"]),
831 "action_id": nslcmop_id
,
832 "cloud_init_content": {},
834 for image
in target
["image"]:
835 image
["vim_info"] = {}
836 for flavor
in target
["flavor"]:
837 flavor
["vim_info"] = {}
838 if db_nsr
.get("shared-volumes"):
839 target
["shared-volumes"] = deepcopy(db_nsr
["shared-volumes"])
840 for shared_volumes
in target
["shared-volumes"]:
841 shared_volumes
["vim_info"] = {}
842 if db_nsr
.get("affinity-or-anti-affinity-group"):
843 target
["affinity-or-anti-affinity-group"] = deepcopy(
844 db_nsr
["affinity-or-anti-affinity-group"]
846 for affinity_or_anti_affinity_group
in target
[
847 "affinity-or-anti-affinity-group"
849 affinity_or_anti_affinity_group
["vim_info"] = {}
851 if db_nslcmop
.get("lcmOperationType") != "instantiate":
852 # get parameters of instantiation:
853 db_nslcmop_instantiate
= self
.db
.get_list(
856 "nsInstanceId": db_nslcmop
["nsInstanceId"],
857 "lcmOperationType": "instantiate",
860 ns_params
= db_nslcmop_instantiate
.get("operationParams")
862 ns_params
= db_nslcmop
.get("operationParams")
863 ssh_keys_instantiation
= ns_params
.get("ssh_keys") or []
864 ssh_keys_all
= ssh_keys_instantiation
+ (n2vc_key_list
or [])
867 for vld_index
, vld
in enumerate(db_nsr
.get("vld")):
868 target_vim
= "vim:{}".format(ns_params
["vimAccountId"])
872 "mgmt-network": vld
.get("mgmt-network", False),
873 "type": vld
.get("type"),
876 "vim_network_name": vld
.get("vim-network-name"),
877 "vim_account_id": ns_params
["vimAccountId"],
881 # check if this network needs SDN assist
882 if vld
.get("pci-interfaces"):
883 db_vim
= get_vim_account(ns_params
["vimAccountId"])
884 if vim_config
:= db_vim
.get("config"):
885 if sdnc_id
:= vim_config
.get("sdn-controller"):
886 sdn_vld
= "nsrs:{}:vld.{}".format(nsr_id
, vld
["id"])
887 target_sdn
= "sdn:{}".format(sdnc_id
)
888 target_vld
["vim_info"][target_sdn
] = {
890 "target_vim": target_vim
,
892 "type": vld
.get("type"),
895 nsd_vnf_profiles
= get_vnf_profiles(nsd
)
896 for nsd_vnf_profile
in nsd_vnf_profiles
:
897 for cp
in nsd_vnf_profile
["virtual-link-connectivity"]:
898 if cp
["virtual-link-profile-id"] == vld
["id"]:
900 "member_vnf:{}.{}".format(
901 cp
["constituent-cpd-id"][0][
902 "constituent-base-element-id"
904 cp
["constituent-cpd-id"][0]["constituent-cpd-id"],
906 ] = "nsrs:{}:vld.{}".format(nsr_id
, vld_index
)
908 # check at nsd descriptor, if there is an ip-profile
910 nsd_vlp
= find_in_list(
911 get_virtual_link_profiles(nsd
),
912 lambda a_link_profile
: a_link_profile
["virtual-link-desc-id"]
917 and nsd_vlp
.get("virtual-link-protocol-data")
918 and nsd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
920 vld_params
["ip-profile"] = nsd_vlp
["virtual-link-protocol-data"][
924 # update vld_params with instantiation params
925 vld_instantiation_params
= find_in_list(
926 get_iterable(ns_params
, "vld"),
927 lambda a_vld
: a_vld
["name"] in (vld
["name"], vld
["id"]),
929 if vld_instantiation_params
:
930 vld_params
.update(vld_instantiation_params
)
931 parse_vld_instantiation_params(target_vim
, target_vld
, vld_params
, None)
932 target
["ns"]["vld"].append(target_vld
)
933 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
934 update_ns_vld_target(target
, ns_params
)
936 for vnfr
in db_vnfrs
.values():
938 db_vnfds
, lambda db_vnf
: db_vnf
["id"] == vnfr
["vnfd-ref"]
940 vnf_params
= find_in_list(
941 get_iterable(ns_params
, "vnf"),
942 lambda a_vnf
: a_vnf
["member-vnf-index"] == vnfr
["member-vnf-index-ref"],
944 target_vnf
= deepcopy(vnfr
)
945 target_vim
= "vim:{}".format(vnfr
["vim-account-id"])
946 for vld
in target_vnf
.get("vld", ()):
947 # check if connected to a ns.vld, to fill target'
948 vnf_cp
= find_in_list(
949 vnfd
.get("int-virtual-link-desc", ()),
950 lambda cpd
: cpd
.get("id") == vld
["id"],
953 ns_cp
= "member_vnf:{}.{}".format(
954 vnfr
["member-vnf-index-ref"], vnf_cp
["id"]
956 if cp2target
.get(ns_cp
):
957 vld
["target"] = cp2target
[ns_cp
]
960 target_vim
: {"vim_network_name": vld
.get("vim-network-name")}
962 # check if this network needs SDN assist
964 if vld
.get("pci-interfaces"):
965 db_vim
= get_vim_account(vnfr
["vim-account-id"])
966 sdnc_id
= db_vim
["config"].get("sdn-controller")
968 sdn_vld
= "vnfrs:{}:vld.{}".format(target_vnf
["_id"], vld
["id"])
969 target_sdn
= "sdn:{}".format(sdnc_id
)
970 vld
["vim_info"][target_sdn
] = {
972 "target_vim": target_vim
,
974 "type": vld
.get("type"),
977 # check at vnfd descriptor, if there is an ip-profile
979 vnfd_vlp
= find_in_list(
980 get_virtual_link_profiles(vnfd
),
981 lambda a_link_profile
: a_link_profile
["id"] == vld
["id"],
985 and vnfd_vlp
.get("virtual-link-protocol-data")
986 and vnfd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
988 vld_params
["ip-profile"] = vnfd_vlp
["virtual-link-protocol-data"][
991 # update vld_params with instantiation params
993 vld_instantiation_params
= find_in_list(
994 get_iterable(vnf_params
, "internal-vld"),
995 lambda i_vld
: i_vld
["name"] == vld
["id"],
997 if vld_instantiation_params
:
998 vld_params
.update(vld_instantiation_params
)
999 parse_vld_instantiation_params(target_vim
, vld
, vld_params
, target_sdn
)
1002 for vdur
in target_vnf
.get("vdur", ()):
1003 if vdur
.get("status") == "DELETING" or vdur
.get("pdu-type"):
1004 continue # This vdu must not be created
1005 vdur
["vim_info"] = {"vim_account_id": vnfr
["vim-account-id"]}
1007 self
.logger
.debug("NS > ssh_keys > {}".format(ssh_keys_all
))
1010 vdu_configuration
= get_configuration(vnfd
, vdur
["vdu-id-ref"])
1011 vnf_configuration
= get_configuration(vnfd
, vnfd
["id"])
1014 and vdu_configuration
.get("config-access")
1015 and vdu_configuration
.get("config-access").get("ssh-access")
1017 vdur
["ssh-keys"] = ssh_keys_all
1018 vdur
["ssh-access-required"] = vdu_configuration
[
1020 ]["ssh-access"]["required"]
1023 and vnf_configuration
.get("config-access")
1024 and vnf_configuration
.get("config-access").get("ssh-access")
1025 and any(iface
.get("mgmt-vnf") for iface
in vdur
["interfaces"])
1027 vdur
["ssh-keys"] = ssh_keys_all
1028 vdur
["ssh-access-required"] = vnf_configuration
[
1030 ]["ssh-access"]["required"]
1031 elif ssh_keys_instantiation
and find_in_list(
1032 vdur
["interfaces"], lambda iface
: iface
.get("mgmt-vnf")
1034 vdur
["ssh-keys"] = ssh_keys_instantiation
1036 self
.logger
.debug("NS > vdur > {}".format(vdur
))
1038 vdud
= get_vdu(vnfd
, vdur
["vdu-id-ref"])
1040 if vdud
.get("cloud-init-file"):
1041 vdur
["cloud-init"] = "{}:file:{}".format(
1042 vnfd
["_id"], vdud
.get("cloud-init-file")
1044 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1045 if vdur
["cloud-init"] not in target
["cloud_init_content"]:
1046 base_folder
= vnfd
["_admin"]["storage"]
1047 if base_folder
["pkg-dir"]:
1048 cloud_init_file
= "{}/{}/cloud_init/{}".format(
1049 base_folder
["folder"],
1050 base_folder
["pkg-dir"],
1051 vdud
.get("cloud-init-file"),
1054 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
1055 base_folder
["folder"],
1056 vdud
.get("cloud-init-file"),
1058 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
1059 target
["cloud_init_content"][
1062 elif vdud
.get("cloud-init"):
1063 vdur
["cloud-init"] = "{}:vdu:{}".format(
1064 vnfd
["_id"], get_vdu_index(vnfd
, vdur
["vdu-id-ref"])
1066 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1067 target
["cloud_init_content"][vdur
["cloud-init"]] = vdud
[
1070 vdur
["additionalParams"] = vdur
.get("additionalParams") or {}
1071 deploy_params_vdu
= self
._format
_additional
_params
(
1072 vdur
.get("additionalParams") or {}
1074 deploy_params_vdu
["OSM"] = get_osm_params(
1075 vnfr
, vdur
["vdu-id-ref"], vdur
["count-index"]
1077 vdur
["additionalParams"] = deploy_params_vdu
1080 ns_flavor
= target
["flavor"][int(vdur
["ns-flavor-id"])]
1081 if target_vim
not in ns_flavor
["vim_info"]:
1082 ns_flavor
["vim_info"][target_vim
] = {}
1085 # in case alternative images are provided we must check if they should be applied
1086 # for the vim_type, modify the vim_type taking into account
1087 ns_image_id
= int(vdur
["ns-image-id"])
1088 if vdur
.get("alt-image-ids"):
1089 db_vim
= get_vim_account(vnfr
["vim-account-id"])
1090 vim_type
= db_vim
["vim_type"]
1091 for alt_image_id
in vdur
.get("alt-image-ids"):
1092 ns_alt_image
= target
["image"][int(alt_image_id
)]
1093 if vim_type
== ns_alt_image
.get("vim-type"):
1094 # must use alternative image
1096 "use alternative image id: {}".format(alt_image_id
)
1098 ns_image_id
= alt_image_id
1099 vdur
["ns-image-id"] = ns_image_id
1101 ns_image
= target
["image"][int(ns_image_id
)]
1102 if target_vim
not in ns_image
["vim_info"]:
1103 ns_image
["vim_info"][target_vim
] = {}
1106 if vdur
.get("affinity-or-anti-affinity-group-id"):
1107 for ags_id
in vdur
["affinity-or-anti-affinity-group-id"]:
1108 ns_ags
= target
["affinity-or-anti-affinity-group"][int(ags_id
)]
1109 if target_vim
not in ns_ags
["vim_info"]:
1110 ns_ags
["vim_info"][target_vim
] = {}
1113 if vdur
.get("shared-volumes-id"):
1114 for sv_id
in vdur
["shared-volumes-id"]:
1115 ns_sv
= find_in_list(
1116 target
["shared-volumes"], lambda sv
: sv_id
in sv
["id"]
1119 ns_sv
["vim_info"][target_vim
] = {}
1121 vdur
["vim_info"] = {target_vim
: {}}
1122 # instantiation parameters
1124 vdu_instantiation_params
= find_in_list(
1125 get_iterable(vnf_params
, "vdu"),
1126 lambda i_vdu
: i_vdu
["id"] == vdud
["id"],
1128 if vdu_instantiation_params
:
1129 # Parse the vdu_volumes from the instantiation params
1130 vdu_volumes
= get_volumes_from_instantiation_params(
1131 vdu_instantiation_params
, vdud
1133 vdur
["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1134 vdur
["additionalParams"]["OSM"][
1136 ] = vdu_instantiation_params
.get("vim-flavor-id")
1137 vdur_list
.append(vdur
)
1138 target_vnf
["vdur"] = vdur_list
1139 target
["vnf"].append(target_vnf
)
1141 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
1142 desc
= await self
.RO
.deploy(nsr_id
, target
)
1143 self
.logger
.debug("RO return > {}".format(desc
))
1144 action_id
= desc
["action_id"]
1145 await self
._wait
_ng
_ro
(
1152 operation
="instantiation",
1157 "_admin.deployed.RO.operational-status": "running",
1158 "detailed-status": " ".join(stage
),
1160 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1161 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1162 self
._write
_op
_status
(nslcmop_id
, stage
)
1164 logging_text
+ "ns deployed at RO. RO_id={}".format(action_id
)
1168 async def _wait_ng_ro(
1178 detailed_status_old
= None
1180 start_time
= start_time
or time()
1181 while time() <= start_time
+ timeout
:
1182 desc_status
= await self
.op_status_map
[operation
](nsr_id
, action_id
)
1183 self
.logger
.debug("Wait NG RO > {}".format(desc_status
))
1184 if desc_status
["status"] == "FAILED":
1185 raise NgRoException(desc_status
["details"])
1186 elif desc_status
["status"] == "BUILD":
1188 stage
[2] = "VIM: ({})".format(desc_status
["details"])
1189 elif desc_status
["status"] == "DONE":
1191 stage
[2] = "Deployed at VIM"
1194 assert False, "ROclient.check_ns_status returns unknown {}".format(
1195 desc_status
["status"]
1197 if stage
and nslcmop_id
and stage
[2] != detailed_status_old
:
1198 detailed_status_old
= stage
[2]
1199 db_nsr_update
["detailed-status"] = " ".join(stage
)
1200 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1201 self
._write
_op
_status
(nslcmop_id
, stage
)
1202 await asyncio
.sleep(15)
1203 else: # timeout_ns_deploy
1204 raise NgRoException("Timeout waiting ns to deploy")
1206 async def _terminate_ng_ro(
1207 self
, logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
1212 start_deploy
= time()
1219 "action_id": nslcmop_id
,
1221 desc
= await self
.RO
.deploy(nsr_id
, target
)
1222 action_id
= desc
["action_id"]
1223 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETING"
1226 + "ns terminate action at RO. action_id={}".format(action_id
)
1230 delete_timeout
= 20 * 60 # 20 minutes
1231 await self
._wait
_ng
_ro
(
1238 operation
="termination",
1240 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1242 await self
.RO
.delete(nsr_id
)
1243 except NgRoException
as e
:
1244 if e
.http_code
== 404: # not found
1245 db_nsr_update
["_admin.deployed.RO.nsr_id"] = None
1246 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1248 logging_text
+ "RO_action_id={} already deleted".format(action_id
)
1250 elif e
.http_code
== 409: # conflict
1251 failed_detail
.append("delete conflict: {}".format(e
))
1254 + "RO_action_id={} delete conflict: {}".format(action_id
, e
)
1257 failed_detail
.append("delete error: {}".format(e
))
1260 + "RO_action_id={} delete error: {}".format(action_id
, e
)
1262 except Exception as e
:
1263 failed_detail
.append("delete error: {}".format(e
))
1265 logging_text
+ "RO_action_id={} delete error: {}".format(action_id
, e
)
1269 stage
[2] = "Error deleting from VIM"
1271 stage
[2] = "Deleted from VIM"
1272 db_nsr_update
["detailed-status"] = " ".join(stage
)
1273 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1274 self
._write
_op
_status
(nslcmop_id
, stage
)
1277 raise LcmException("; ".join(failed_detail
))
1280 async def instantiate_RO(
1294 :param logging_text: preffix text to use at logging
1295 :param nsr_id: nsr identity
1296 :param nsd: database content of ns descriptor
1297 :param db_nsr: database content of ns record
1298 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1300 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1301 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1302 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1303 :return: None or exception
1306 start_deploy
= time()
1307 ns_params
= db_nslcmop
.get("operationParams")
1308 if ns_params
and ns_params
.get("timeout_ns_deploy"):
1309 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
1311 timeout_ns_deploy
= self
.timeout
.ns_deploy
1313 # Check for and optionally request placement optimization. Database will be updated if placement activated
1314 stage
[2] = "Waiting for Placement."
1315 if await self
._do
_placement
(logging_text
, db_nslcmop
, db_vnfrs
):
1316 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1317 for vnfr
in db_vnfrs
.values():
1318 if ns_params
["vimAccountId"] == vnfr
["vim-account-id"]:
1321 ns_params
["vimAccountId"] == vnfr
["vim-account-id"]
1323 return await self
._instantiate
_ng
_ro
(
1336 except Exception as e
:
1337 stage
[2] = "ERROR deploying at VIM"
1338 self
.set_vnfr_at_error(db_vnfrs
, str(e
))
1340 "Error deploying at VIM {}".format(e
),
1341 exc_info
=not isinstance(
1344 ROclient
.ROClientException
,
1353 async def wait_kdu_up(self
, logging_text
, nsr_id
, vnfr_id
, kdu_name
):
1355 Wait for kdu to be up, get ip address
1356 :param logging_text: prefix use for logging
1360 :return: IP address, K8s services
1363 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1366 while nb_tries
< 360:
1367 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1371 for x
in get_iterable(db_vnfr
, "kdur")
1372 if x
.get("kdu-name") == kdu_name
1378 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id
, kdu_name
)
1380 if kdur
.get("status"):
1381 if kdur
["status"] in ("READY", "ENABLED"):
1382 return kdur
.get("ip-address"), kdur
.get("services")
1385 "target KDU={} is in error state".format(kdu_name
)
1388 await asyncio
.sleep(10)
1390 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name
))
1392 async def wait_vm_up_insert_key_ro(
1393 self
, logging_text
, nsr_id
, vnfr_id
, vdu_id
, vdu_index
, pub_key
=None, user
=None
1396 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1397 :param logging_text: prefix use for logging
1402 :param pub_key: public ssh key to inject, None to skip
1403 :param user: user to apply the public ssh key
1407 self
.logger
.debug(logging_text
+ "Starting wait_vm_up_insert_key_ro")
1409 target_vdu_id
= None
1414 if ro_retries
>= 360: # 1 hour
1416 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id
)
1419 await asyncio
.sleep(10)
1422 if not target_vdu_id
:
1423 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1425 if not vdu_id
: # for the VNF case
1426 if db_vnfr
.get("status") == "ERROR":
1428 "Cannot inject ssh-key because target VNF is in error state"
1430 ip_address
= db_vnfr
.get("ip-address")
1436 for x
in get_iterable(db_vnfr
, "vdur")
1437 if x
.get("ip-address") == ip_address
1445 for x
in get_iterable(db_vnfr
, "vdur")
1446 if x
.get("vdu-id-ref") == vdu_id
1447 and x
.get("count-index") == vdu_index
1453 not vdur
and len(db_vnfr
.get("vdur", ())) == 1
1454 ): # If only one, this should be the target vdu
1455 vdur
= db_vnfr
["vdur"][0]
1458 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1459 vnfr_id
, vdu_id
, vdu_index
1462 # New generation RO stores information at "vim_info"
1465 if vdur
.get("vim_info"):
1467 t
for t
in vdur
["vim_info"]
1468 ) # there should be only one key
1469 ng_ro_status
= vdur
["vim_info"][target_vim
].get("vim_status")
1471 vdur
.get("pdu-type")
1472 or vdur
.get("status") == "ACTIVE"
1473 or ng_ro_status
== "ACTIVE"
1475 ip_address
= vdur
.get("ip-address")
1478 target_vdu_id
= vdur
["vdu-id-ref"]
1479 elif vdur
.get("status") == "ERROR" or ng_ro_status
== "ERROR":
1481 "Cannot inject ssh-key because target VM is in error state"
1484 if not target_vdu_id
:
1487 # inject public key into machine
1488 if pub_key
and user
:
1489 self
.logger
.debug(logging_text
+ "Inserting RO key")
1490 self
.logger
.debug("SSH > PubKey > {}".format(pub_key
))
1491 if vdur
.get("pdu-type"):
1492 self
.logger
.error(logging_text
+ "Cannot inject ssh-ky to a PDU")
1497 "action": "inject_ssh_key",
1501 "vnf": [{"_id": vnfr_id
, "vdur": [{"id": vdur
["id"]}]}],
1503 desc
= await self
.RO
.deploy(nsr_id
, target
)
1504 action_id
= desc
["action_id"]
1505 await self
._wait
_ng
_ro
(
1506 nsr_id
, action_id
, timeout
=600, operation
="instantiation"
1509 except NgRoException
as e
:
1511 "Reaching max tries injecting key. Error: {}".format(e
)
1518 async def _wait_dependent_n2vc(self
, nsr_id
, vca_deployed_list
, vca_index
):
1520 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1522 my_vca
= vca_deployed_list
[vca_index
]
1523 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
1524 # vdu or kdu: no dependencies
1528 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
1529 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1530 configuration_status_list
= db_nsr
["configurationStatus"]
1531 for index
, vca_deployed
in enumerate(configuration_status_list
):
1532 if index
== vca_index
:
1535 if not my_vca
.get("member-vnf-index") or (
1536 vca_deployed
.get("member-vnf-index")
1537 == my_vca
.get("member-vnf-index")
1539 internal_status
= configuration_status_list
[index
].get("status")
1540 if internal_status
== "READY":
1542 elif internal_status
== "BROKEN":
1544 "Configuration aborted because dependent charm/s has failed"
1549 # no dependencies, return
1551 await asyncio
.sleep(10)
1554 raise LcmException("Configuration aborted because dependent charm/s timeout")
1556 def get_vca_id(self
, db_vnfr
: dict, db_nsr
: dict):
1559 vca_id
= deep_get(db_vnfr
, ("vca-id",))
1561 vim_account_id
= deep_get(db_nsr
, ("instantiate_params", "vimAccountId"))
1562 vca_id
= VimAccountDB
.get_vim_account_with_id(vim_account_id
).get("vca")
1565 async def instantiate_N2VC(
1583 ee_config_descriptor
,
1585 nsr_id
= db_nsr
["_id"]
1586 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
1587 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1588 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
1589 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
1591 "collection": "nsrs",
1592 "filter": {"_id": nsr_id
},
1593 "path": db_update_entry
,
1598 element_under_configuration
= nsr_id
1602 vnfr_id
= db_vnfr
["_id"]
1603 osm_config
["osm"]["vnf_id"] = vnfr_id
1605 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
1607 if vca_type
== "native_charm":
1610 index_number
= vdu_index
or 0
1613 element_type
= "VNF"
1614 element_under_configuration
= vnfr_id
1615 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
1617 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
1618 element_type
= "VDU"
1619 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
1620 osm_config
["osm"]["vdu_id"] = vdu_id
1622 namespace
+= ".{}".format(kdu_name
)
1623 element_type
= "KDU"
1624 element_under_configuration
= kdu_name
1625 osm_config
["osm"]["kdu_name"] = kdu_name
1628 if base_folder
["pkg-dir"]:
1629 artifact_path
= "{}/{}/{}/{}".format(
1630 base_folder
["folder"],
1631 base_folder
["pkg-dir"],
1634 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1639 artifact_path
= "{}/Scripts/{}/{}/".format(
1640 base_folder
["folder"],
1643 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1648 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
1650 # get initial_config_primitive_list that applies to this element
1651 initial_config_primitive_list
= config_descriptor
.get(
1652 "initial-config-primitive"
1656 "Initial config primitive list > {}".format(
1657 initial_config_primitive_list
1661 # add config if not present for NS charm
1662 ee_descriptor_id
= ee_config_descriptor
.get("id")
1663 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
1664 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
1665 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
1669 "Initial config primitive list #2 > {}".format(
1670 initial_config_primitive_list
1673 # n2vc_redesign STEP 3.1
1674 # find old ee_id if exists
1675 ee_id
= vca_deployed
.get("ee_id")
1677 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
1678 # create or register execution environment in VCA
1679 if vca_type
in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
1680 self
._write
_configuration
_status
(
1682 vca_index
=vca_index
,
1684 element_under_configuration
=element_under_configuration
,
1685 element_type
=element_type
,
1688 step
= "create execution environment"
1689 self
.logger
.debug(logging_text
+ step
)
1693 if vca_type
== "k8s_proxy_charm":
1694 ee_id
= await self
.vca_map
[vca_type
].install_k8s_proxy_charm(
1695 charm_name
=artifact_path
[artifact_path
.rfind("/") + 1 :],
1696 namespace
=namespace
,
1697 artifact_path
=artifact_path
,
1701 elif vca_type
== "helm-v3":
1702 ee_id
, credentials
= await self
.vca_map
[
1704 ].create_execution_environment(
1709 artifact_path
=artifact_path
,
1710 chart_model
=vca_name
,
1714 ee_id
, credentials
= await self
.vca_map
[
1716 ].create_execution_environment(
1717 namespace
=namespace
,
1723 elif vca_type
== "native_charm":
1724 step
= "Waiting to VM being up and getting IP address"
1725 self
.logger
.debug(logging_text
+ step
)
1726 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
1735 credentials
= {"hostname": rw_mgmt_ip
}
1737 username
= deep_get(
1738 config_descriptor
, ("config-access", "ssh-access", "default-user")
1740 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1741 # merged. Meanwhile let's get username from initial-config-primitive
1742 if not username
and initial_config_primitive_list
:
1743 for config_primitive
in initial_config_primitive_list
:
1744 for param
in config_primitive
.get("parameter", ()):
1745 if param
["name"] == "ssh-username":
1746 username
= param
["value"]
1750 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1751 "'config-access.ssh-access.default-user'"
1753 credentials
["username"] = username
1754 # n2vc_redesign STEP 3.2
1756 self
._write
_configuration
_status
(
1758 vca_index
=vca_index
,
1759 status
="REGISTERING",
1760 element_under_configuration
=element_under_configuration
,
1761 element_type
=element_type
,
1764 step
= "register execution environment {}".format(credentials
)
1765 self
.logger
.debug(logging_text
+ step
)
1766 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
1767 credentials
=credentials
,
1768 namespace
=namespace
,
1773 # for compatibility with MON/POL modules, the need model and application name at database
1774 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1775 ee_id_parts
= ee_id
.split(".")
1776 db_nsr_update
= {db_update_entry
+ "ee_id": ee_id
}
1777 if len(ee_id_parts
) >= 2:
1778 model_name
= ee_id_parts
[0]
1779 application_name
= ee_id_parts
[1]
1780 db_nsr_update
[db_update_entry
+ "model"] = model_name
1781 db_nsr_update
[db_update_entry
+ "application"] = application_name
1783 # n2vc_redesign STEP 3.3
1784 step
= "Install configuration Software"
1786 self
._write
_configuration
_status
(
1788 vca_index
=vca_index
,
1789 status
="INSTALLING SW",
1790 element_under_configuration
=element_under_configuration
,
1791 element_type
=element_type
,
1792 other_update
=db_nsr_update
,
1795 # TODO check if already done
1796 self
.logger
.debug(logging_text
+ step
)
1798 if vca_type
== "native_charm":
1799 config_primitive
= next(
1800 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
1803 if config_primitive
:
1804 config
= self
._map
_primitive
_params
(
1805 config_primitive
, {}, deploy_params
1808 if vca_type
== "lxc_proxy_charm":
1809 if element_type
== "NS":
1810 num_units
= db_nsr
.get("config-units") or 1
1811 elif element_type
== "VNF":
1812 num_units
= db_vnfr
.get("config-units") or 1
1813 elif element_type
== "VDU":
1814 for v
in db_vnfr
["vdur"]:
1815 if vdu_id
== v
["vdu-id-ref"]:
1816 num_units
= v
.get("config-units") or 1
1818 if vca_type
!= "k8s_proxy_charm":
1819 await self
.vca_map
[vca_type
].install_configuration_sw(
1821 artifact_path
=artifact_path
,
1824 num_units
=num_units
,
1829 # write in db flag of configuration_sw already installed
1831 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
1834 # add relations for this VCA (wait for other peers related with this VCA)
1835 is_relation_added
= await self
._add
_vca
_relations
(
1836 logging_text
=logging_text
,
1839 vca_index
=vca_index
,
1842 if not is_relation_added
:
1843 raise LcmException("Relations could not be added to VCA.")
1845 # if SSH access is required, then get execution environment SSH public
1846 # if native charm we have waited already to VM be UP
1847 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
1850 # self.logger.debug("get ssh key block")
1852 config_descriptor
, ("config-access", "ssh-access", "required")
1854 # self.logger.debug("ssh key needed")
1855 # Needed to inject a ssh key
1858 ("config-access", "ssh-access", "default-user"),
1860 step
= "Install configuration Software, getting public ssh key"
1861 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
1862 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
1865 step
= "Insert public key into VM user={} ssh_key={}".format(
1869 # self.logger.debug("no need to get ssh key")
1870 step
= "Waiting to VM being up and getting IP address"
1871 self
.logger
.debug(logging_text
+ step
)
1873 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1876 # n2vc_redesign STEP 5.1
1877 # wait for RO (ip-address) Insert pub_key into VM
1880 rw_mgmt_ip
, services
= await self
.wait_kdu_up(
1881 logging_text
, nsr_id
, vnfr_id
, kdu_name
1883 vnfd
= self
.db
.get_one(
1885 {"_id": f
'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1887 kdu
= get_kdu(vnfd
, kdu_name
)
1889 service
["name"] for service
in get_kdu_services(kdu
)
1891 exposed_services
= []
1892 for service
in services
:
1893 if any(s
in service
["name"] for s
in kdu_services
):
1894 exposed_services
.append(service
)
1895 await self
.vca_map
[vca_type
].exec_primitive(
1897 primitive_name
="config",
1899 "osm-config": json
.dumps(
1901 k8s
={"services": exposed_services
}
1908 # This verification is needed in order to avoid trying to add a public key
1909 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1910 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1911 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1913 elif db_vnfr
.get("vdur"):
1914 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
1924 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
1926 # store rw_mgmt_ip in deploy params for later replacement
1927 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
1929 # n2vc_redesign STEP 6 Execute initial config primitive
1930 step
= "execute initial config primitive"
1932 # wait for dependent primitives execution (NS -> VNF -> VDU)
1933 if initial_config_primitive_list
:
1934 await self
._wait
_dependent
_n
2vc
(nsr_id
, vca_deployed_list
, vca_index
)
1936 # stage, in function of element type: vdu, kdu, vnf or ns
1937 my_vca
= vca_deployed_list
[vca_index
]
1938 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
1940 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
1941 elif my_vca
.get("member-vnf-index"):
1943 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
1946 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
1948 self
._write
_configuration
_status
(
1949 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
1952 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
1954 check_if_terminated_needed
= True
1955 for initial_config_primitive
in initial_config_primitive_list
:
1956 # adding information on the vca_deployed if it is a NS execution environment
1957 if not vca_deployed
["member-vnf-index"]:
1958 deploy_params
["ns_config_info"] = json
.dumps(
1959 self
._get
_ns
_config
_info
(nsr_id
)
1961 # TODO check if already done
1962 primitive_params_
= self
._map
_primitive
_params
(
1963 initial_config_primitive
, {}, deploy_params
1966 step
= "execute primitive '{}' params '{}'".format(
1967 initial_config_primitive
["name"], primitive_params_
1969 self
.logger
.debug(logging_text
+ step
)
1970 await self
.vca_map
[vca_type
].exec_primitive(
1972 primitive_name
=initial_config_primitive
["name"],
1973 params_dict
=primitive_params_
,
1978 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1979 if check_if_terminated_needed
:
1980 if config_descriptor
.get("terminate-config-primitive"):
1982 "nsrs", nsr_id
, {db_update_entry
+ "needed_terminate": True}
1984 check_if_terminated_needed
= False
1986 # TODO register in database that primitive is done
1988 # STEP 7 Configure metrics
1989 if vca_type
== "helm-v3":
1990 # TODO: review for those cases where the helm chart is a reference and
1991 # is not part of the NF package
1992 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
1994 artifact_path
=artifact_path
,
1995 ee_config_descriptor
=ee_config_descriptor
,
1998 target_ip
=rw_mgmt_ip
,
1999 element_type
=element_type
,
2000 vnf_member_index
=db_vnfr
.get("member-vnf-index-ref", ""),
2002 vdu_index
=vdu_index
,
2004 kdu_index
=kdu_index
,
2010 {db_update_entry
+ "prometheus_jobs": prometheus_jobs
},
2013 for job
in prometheus_jobs
:
2016 {"job_name": job
["job_name"]},
2019 fail_on_empty
=False,
2022 step
= "instantiated at VCA"
2023 self
.logger
.debug(logging_text
+ step
)
2025 self
._write
_configuration
_status
(
2026 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
2029 except Exception as e
: # TODO not use Exception but N2VC exception
2030 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2032 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
2035 "Exception while {} : {}".format(step
, e
), exc_info
=True
2037 self
._write
_configuration
_status
(
2038 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
2040 raise LcmException("{}. {}".format(step
, e
)) from e
2042 def _write_ns_status(
2046 current_operation
: str,
2047 current_operation_id
: str,
2048 error_description
: str = None,
2049 error_detail
: str = None,
2050 other_update
: dict = None,
2053 Update db_nsr fields.
2056 :param current_operation:
2057 :param current_operation_id:
2058 :param error_description:
2059 :param error_detail:
2060 :param other_update: Other required changes at database if provided, will be cleared
2064 db_dict
= other_update
or {}
2067 ] = current_operation_id
# for backward compatibility
2068 db_dict
["_admin.current-operation"] = current_operation_id
2069 db_dict
["_admin.operation-type"] = (
2070 current_operation
if current_operation
!= "IDLE" else None
2072 db_dict
["currentOperation"] = current_operation
2073 db_dict
["currentOperationID"] = current_operation_id
2074 db_dict
["errorDescription"] = error_description
2075 db_dict
["errorDetail"] = error_detail
2078 db_dict
["nsState"] = ns_state
2079 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2080 except DbException
as e
:
2081 self
.logger
.warn("Error writing NS status, ns={}: {}".format(nsr_id
, e
))
2083 def _write_op_status(
2087 error_message
: str = None,
2088 queuePosition
: int = 0,
2089 operation_state
: str = None,
2090 other_update
: dict = None,
2093 db_dict
= other_update
or {}
2094 db_dict
["queuePosition"] = queuePosition
2095 if isinstance(stage
, list):
2096 db_dict
["stage"] = stage
[0]
2097 db_dict
["detailed-status"] = " ".join(stage
)
2098 elif stage
is not None:
2099 db_dict
["stage"] = str(stage
)
2101 if error_message
is not None:
2102 db_dict
["errorMessage"] = error_message
2103 if operation_state
is not None:
2104 db_dict
["operationState"] = operation_state
2105 db_dict
["statusEnteredTime"] = time()
2106 self
.update_db_2("nslcmops", op_id
, db_dict
)
2107 except DbException
as e
:
2109 "Error writing OPERATION status for op_id: {} -> {}".format(op_id
, e
)
2112 def _write_all_config_status(self
, db_nsr
: dict, status
: str):
2114 nsr_id
= db_nsr
["_id"]
2115 # configurationStatus
2116 config_status
= db_nsr
.get("configurationStatus")
2119 "configurationStatus.{}.status".format(index
): status
2120 for index
, v
in enumerate(config_status
)
2124 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2126 except DbException
as e
:
2128 "Error writing all configuration status, ns={}: {}".format(nsr_id
, e
)
2131 def _write_configuration_status(
2136 element_under_configuration
: str = None,
2137 element_type
: str = None,
2138 other_update
: dict = None,
2140 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2141 # .format(vca_index, status))
2144 db_path
= "configurationStatus.{}.".format(vca_index
)
2145 db_dict
= other_update
or {}
2147 db_dict
[db_path
+ "status"] = status
2148 if element_under_configuration
:
2150 db_path
+ "elementUnderConfiguration"
2151 ] = element_under_configuration
2153 db_dict
[db_path
+ "elementType"] = element_type
2154 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2155 except DbException
as e
:
2157 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2158 status
, nsr_id
, vca_index
, e
2162 async def _do_placement(self
, logging_text
, db_nslcmop
, db_vnfrs
):
2164 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2165 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2166 Database is used because the result can be obtained from a different LCM worker in case of HA.
2167 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2168 :param db_nslcmop: database content of nslcmop
2169 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2170 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2171 computed 'vim-account-id'
2174 nslcmop_id
= db_nslcmop
["_id"]
2175 placement_engine
= deep_get(db_nslcmop
, ("operationParams", "placement-engine"))
2176 if placement_engine
== "PLA":
2178 logging_text
+ "Invoke and wait for placement optimization"
2180 await self
.msg
.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id
})
2181 db_poll_interval
= 5
2182 wait
= db_poll_interval
* 10
2184 while not pla_result
and wait
>= 0:
2185 await asyncio
.sleep(db_poll_interval
)
2186 wait
-= db_poll_interval
2187 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2188 pla_result
= deep_get(db_nslcmop
, ("_admin", "pla"))
2192 "Placement timeout for nslcmopId={}".format(nslcmop_id
)
2195 for pla_vnf
in pla_result
["vnf"]:
2196 vnfr
= db_vnfrs
.get(pla_vnf
["member-vnf-index"])
2197 if not pla_vnf
.get("vimAccountId") or not vnfr
:
2202 {"_id": vnfr
["_id"]},
2203 {"vim-account-id": pla_vnf
["vimAccountId"]},
2206 vnfr
["vim-account-id"] = pla_vnf
["vimAccountId"]
2209 def _gather_vnfr_healing_alerts(self
, vnfr
, vnfd
):
2211 nsr_id
= vnfr
["nsr-id-ref"]
2212 df
= vnfd
.get("df", [{}])[0]
2213 # Checking for auto-healing configuration
2214 if "healing-aspect" in df
:
2215 healing_aspects
= df
["healing-aspect"]
2216 for healing
in healing_aspects
:
2217 for healing_policy
in healing
.get("healing-policy", ()):
2218 vdu_id
= healing_policy
["vdu-id"]
2220 (vdur
for vdur
in vnfr
["vdur"] if vdu_id
== vdur
["vdu-id-ref"]),
2225 metric_name
= "vm_status"
2226 vdu_name
= vdur
.get("name")
2227 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2229 name
= f
"healing_{uuid}"
2230 action
= healing_policy
2231 # action_on_recovery = healing.get("action-on-recovery")
2232 # cooldown_time = healing.get("cooldown-time")
2233 # day1 = healing.get("day1")
2237 "metric": metric_name
,
2240 "vnf_member_index": vnf_member_index
,
2241 "vdu_name": vdu_name
,
2243 "alarm_status": "ok",
2244 "action_type": "healing",
2247 alerts
.append(alert
)
2250 def _gather_vnfr_scaling_alerts(self
, vnfr
, vnfd
):
2252 nsr_id
= vnfr
["nsr-id-ref"]
2253 df
= vnfd
.get("df", [{}])[0]
2254 # Checking for auto-scaling configuration
2255 if "scaling-aspect" in df
:
2256 scaling_aspects
= df
["scaling-aspect"]
2257 all_vnfd_monitoring_params
= {}
2258 for ivld
in vnfd
.get("int-virtual-link-desc", ()):
2259 for mp
in ivld
.get("monitoring-parameters", ()):
2260 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2261 for vdu
in vnfd
.get("vdu", ()):
2262 for mp
in vdu
.get("monitoring-parameter", ()):
2263 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2264 for df
in vnfd
.get("df", ()):
2265 for mp
in df
.get("monitoring-parameter", ()):
2266 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2267 for scaling_aspect
in scaling_aspects
:
2268 scaling_group_name
= scaling_aspect
.get("name", "")
2269 # Get monitored VDUs
2270 all_monitored_vdus
= set()
2271 for delta
in scaling_aspect
.get("aspect-delta-details", {}).get(
2274 for vdu_delta
in delta
.get("vdu-delta", ()):
2275 all_monitored_vdus
.add(vdu_delta
.get("id"))
2276 monitored_vdurs
= list(
2278 lambda vdur
: vdur
["vdu-id-ref"] in all_monitored_vdus
,
2282 if not monitored_vdurs
:
2284 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2287 for scaling_policy
in scaling_aspect
.get("scaling-policy", ()):
2288 if scaling_policy
["scaling-type"] != "automatic":
2290 threshold_time
= scaling_policy
.get("threshold-time", "1")
2291 cooldown_time
= scaling_policy
.get("cooldown-time", "0")
2292 for scaling_criteria
in scaling_policy
["scaling-criteria"]:
2293 monitoring_param_ref
= scaling_criteria
.get(
2294 "vnf-monitoring-param-ref"
2296 vnf_monitoring_param
= all_vnfd_monitoring_params
[
2297 monitoring_param_ref
2299 for vdur
in monitored_vdurs
:
2300 vdu_id
= vdur
["vdu-id-ref"]
2301 metric_name
= vnf_monitoring_param
.get("performance-metric")
2302 metric_name
= f
"osm_{metric_name}"
2303 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2304 scalein_threshold
= scaling_criteria
.get(
2305 "scale-in-threshold"
2307 scaleout_threshold
= scaling_criteria
.get(
2308 "scale-out-threshold"
2310 # Looking for min/max-number-of-instances
2311 instances_min_number
= 1
2312 instances_max_number
= 1
2313 vdu_profile
= df
["vdu-profile"]
2316 item
for item
in vdu_profile
if item
["id"] == vdu_id
2318 instances_min_number
= profile
.get(
2319 "min-number-of-instances", 1
2321 instances_max_number
= profile
.get(
2322 "max-number-of-instances", 1
2325 if scalein_threshold
:
2327 name
= f
"scalein_{uuid}"
2328 operation
= scaling_criteria
[
2329 "scale-in-relational-operation"
2331 rel_operator
= self
.rel_operation_types
.get(
2334 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2335 expression
= f
"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2338 "vnf_member_index": vnf_member_index
,
2344 "for": str(threshold_time
) + "m",
2347 action
= scaling_policy
2349 "scaling-group": scaling_group_name
,
2350 "cooldown-time": cooldown_time
,
2355 "metric": metric_name
,
2358 "vnf_member_index": vnf_member_index
,
2361 "alarm_status": "ok",
2362 "action_type": "scale_in",
2364 "prometheus_config": prom_cfg
,
2366 alerts
.append(alert
)
2368 if scaleout_threshold
:
2370 name
= f
"scaleout_{uuid}"
2371 operation
= scaling_criteria
[
2372 "scale-out-relational-operation"
2374 rel_operator
= self
.rel_operation_types
.get(
2377 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2378 expression
= f
"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2381 "vnf_member_index": vnf_member_index
,
2387 "for": str(threshold_time
) + "m",
2390 action
= scaling_policy
2392 "scaling-group": scaling_group_name
,
2393 "cooldown-time": cooldown_time
,
2398 "metric": metric_name
,
2401 "vnf_member_index": vnf_member_index
,
2404 "alarm_status": "ok",
2405 "action_type": "scale_out",
2407 "prometheus_config": prom_cfg
,
2409 alerts
.append(alert
)
2412 def _gather_vnfr_alarm_alerts(self
, vnfr
, vnfd
):
2414 nsr_id
= vnfr
["nsr-id-ref"]
2415 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2417 # Checking for VNF alarm configuration
2418 for vdur
in vnfr
["vdur"]:
2419 vdu_id
= vdur
["vdu-id-ref"]
2420 vdu
= next(filter(lambda vdu
: vdu
["id"] == vdu_id
, vnfd
["vdu"]))
2422 # Get VDU monitoring params, since alerts are based on them
2423 vdu_monitoring_params
= {}
2424 for mp
in vdu
.get("monitoring-parameter", []):
2425 vdu_monitoring_params
[mp
.get("id")] = mp
2426 if not vdu_monitoring_params
:
2428 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2431 # Get alarms in the VDU
2432 alarm_descriptors
= vdu
["alarm"]
2433 # Create VDU alarms for each alarm in the VDU
2434 for alarm_descriptor
in alarm_descriptors
:
2435 # Check that the VDU alarm refers to a proper monitoring param
2436 alarm_monitoring_param
= alarm_descriptor
.get(
2437 "vnf-monitoring-param-ref", ""
2439 vdu_specific_monitoring_param
= vdu_monitoring_params
.get(
2440 alarm_monitoring_param
, {}
2442 if not vdu_specific_monitoring_param
:
2444 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2447 metric_name
= vdu_specific_monitoring_param
.get(
2448 "performance-metric"
2452 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2455 # Set params of the alarm to be created in Prometheus
2456 metric_name
= f
"osm_{metric_name}"
2457 metric_threshold
= alarm_descriptor
.get("value")
2459 alert_name
= f
"vdu_alarm_{uuid}"
2460 operation
= alarm_descriptor
["operation"]
2461 rel_operator
= self
.rel_operation_types
.get(operation
, "<=")
2462 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2463 expression
= f
"{metric_selector} {rel_operator} {metric_threshold}"
2466 "vnf_member_index": vnf_member_index
,
2468 "vdu_name": "{{ $labels.vdu_name }}",
2471 "alert": alert_name
,
2473 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2476 alarm_action
= dict()
2477 for action_type
in ["ok", "insufficient-data", "alarm"]:
2479 "actions" in alarm_descriptor
2480 and action_type
in alarm_descriptor
["actions"]
2482 alarm_action
[action_type
] = alarm_descriptor
["actions"][
2488 "metric": metric_name
,
2491 "vnf_member_index": vnf_member_index
,
2494 "alarm_status": "ok",
2495 "action_type": "vdu_alarm",
2496 "action": alarm_action
,
2497 "prometheus_config": prom_cfg
,
2499 alerts
.append(alert
)
2502 def update_nsrs_with_pla_result(self
, params
):
2504 nslcmop_id
= deep_get(params
, ("placement", "nslcmopId"))
2506 "nslcmops", nslcmop_id
, {"_admin.pla": params
.get("placement")}
2508 except Exception as e
:
2509 self
.logger
.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id
, e
))
2511 async def instantiate(self
, nsr_id
, nslcmop_id
):
2514 :param nsr_id: ns instance to deploy
2515 :param nslcmop_id: operation to run
2519 # Try to lock HA task here
2520 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
2521 if not task_is_locked_by_me
:
2523 "instantiate() task is not locked by me, ns={}".format(nsr_id
)
2527 logging_text
= "Task ns={} instantiate={} ".format(nsr_id
, nslcmop_id
)
2528 self
.logger
.debug(logging_text
+ "Enter")
2530 # get all needed from database
2532 # database nsrs record
2535 # database nslcmops record
2538 # update operation on nsrs
2540 # update operation on nslcmops
2541 db_nslcmop_update
= {}
2543 timeout_ns_deploy
= self
.timeout
.ns_deploy
2545 nslcmop_operation_state
= None
2546 db_vnfrs
= {} # vnf's info indexed by member-index
2548 tasks_dict_info
= {} # from task to info text
2552 "Stage 1/5: preparation of the environment.",
2553 "Waiting for previous operations to terminate.",
2556 # ^ stage, step, VIM progress
2558 # wait for any previous tasks in process
2559 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
2561 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2562 stage
[1] = "Reading from database."
2563 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2564 db_nsr_update
["detailed-status"] = "creating"
2565 db_nsr_update
["operational-status"] = "init"
2566 self
._write
_ns
_status
(
2568 ns_state
="BUILDING",
2569 current_operation
="INSTANTIATING",
2570 current_operation_id
=nslcmop_id
,
2571 other_update
=db_nsr_update
,
2573 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
2575 # read from db: operation
2576 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
2577 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2578 if db_nslcmop
["operationParams"].get("additionalParamsForVnf"):
2579 db_nslcmop
["operationParams"]["additionalParamsForVnf"] = json
.loads(
2580 db_nslcmop
["operationParams"]["additionalParamsForVnf"]
2582 ns_params
= db_nslcmop
.get("operationParams")
2583 if ns_params
and ns_params
.get("timeout_ns_deploy"):
2584 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
2587 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
2588 self
.logger
.debug(logging_text
+ stage
[1])
2589 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
2590 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
2591 self
.logger
.debug(logging_text
+ stage
[1])
2592 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
2593 self
.fs
.sync(db_nsr
["nsd-id"])
2595 # nsr_name = db_nsr["name"] # TODO short-name??
2597 # read from db: vnf's of this ns
2598 stage
[1] = "Getting vnfrs from db."
2599 self
.logger
.debug(logging_text
+ stage
[1])
2600 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
2602 # read from db: vnfd's for every vnf
2603 db_vnfds
= [] # every vnfd data
2605 # for each vnf in ns, read vnfd
2606 for vnfr
in db_vnfrs_list
:
2607 if vnfr
.get("kdur"):
2609 for kdur
in vnfr
["kdur"]:
2610 if kdur
.get("additionalParams"):
2611 kdur
["additionalParams"] = json
.loads(
2612 kdur
["additionalParams"]
2614 kdur_list
.append(kdur
)
2615 vnfr
["kdur"] = kdur_list
2617 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
2618 vnfd_id
= vnfr
["vnfd-id"]
2619 vnfd_ref
= vnfr
["vnfd-ref"]
2620 self
.fs
.sync(vnfd_id
)
2622 # if we haven't this vnfd, read it from db
2623 if vnfd_id
not in db_vnfds
:
2625 stage
[1] = "Getting vnfd={} id='{}' from db.".format(
2628 self
.logger
.debug(logging_text
+ stage
[1])
2629 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
2632 db_vnfds
.append(vnfd
)
2634 # Get or generates the _admin.deployed.VCA list
2635 vca_deployed_list
= None
2636 if db_nsr
["_admin"].get("deployed"):
2637 vca_deployed_list
= db_nsr
["_admin"]["deployed"].get("VCA")
2638 if vca_deployed_list
is None:
2639 vca_deployed_list
= []
2640 configuration_status_list
= []
2641 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2642 db_nsr_update
["configurationStatus"] = configuration_status_list
2643 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2644 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2645 elif isinstance(vca_deployed_list
, dict):
2646 # maintain backward compatibility. Change a dict to list at database
2647 vca_deployed_list
= list(vca_deployed_list
.values())
2648 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2649 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2652 deep_get(db_nsr
, ("_admin", "deployed", "RO", "vnfd")), list
2654 populate_dict(db_nsr
, ("_admin", "deployed", "RO", "vnfd"), [])
2655 db_nsr_update
["_admin.deployed.RO.vnfd"] = []
2657 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2658 db_nsr_update
["_admin.nsState"] = "INSTANTIATED"
2659 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2661 "vnfrs", {"nsr-id-ref": nsr_id
}, {"_admin.nsState": "INSTANTIATED"}
2664 # n2vc_redesign STEP 2 Deploy Network Scenario
2665 stage
[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2666 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
2668 stage
[1] = "Deploying KDUs."
2669 # self.logger.debug(logging_text + "Before deploy_kdus")
2670 # Call to deploy_kdus in case exists the "vdu:kdu" param
2671 await self
.deploy_kdus(
2672 logging_text
=logging_text
,
2674 nslcmop_id
=nslcmop_id
,
2677 task_instantiation_info
=tasks_dict_info
,
2680 stage
[1] = "Getting VCA public key."
2681 # n2vc_redesign STEP 1 Get VCA public ssh-key
2682 # feature 1429. Add n2vc public key to needed VMs
2683 n2vc_key
= self
.n2vc
.get_public_key()
2684 n2vc_key_list
= [n2vc_key
]
2685 if self
.vca_config
.public_key
:
2686 n2vc_key_list
.append(self
.vca_config
.public_key
)
2688 stage
[1] = "Deploying NS at VIM."
2689 task_ro
= asyncio
.ensure_future(
2690 self
.instantiate_RO(
2691 logging_text
=logging_text
,
2695 db_nslcmop
=db_nslcmop
,
2698 n2vc_key_list
=n2vc_key_list
,
2702 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "instantiate_RO", task_ro
)
2703 tasks_dict_info
[task_ro
] = "Deploying at VIM"
2705 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2706 stage
[1] = "Deploying Execution Environments."
2707 self
.logger
.debug(logging_text
+ stage
[1])
2709 # create namespace and certificate if any helm based EE is present in the NS
2710 if check_helm_ee_in_ns(db_vnfds
):
2711 await self
.vca_map
["helm-v3"].setup_ns_namespace(
2714 # create TLS certificates
2715 await self
.vca_map
["helm-v3"].create_tls_certificate(
2716 secret_name
=self
.EE_TLS_NAME
,
2719 usage
="server auth",
2723 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
2724 for vnf_profile
in get_vnf_profiles(nsd
):
2725 vnfd_id
= vnf_profile
["vnfd-id"]
2726 vnfd
= find_in_list(db_vnfds
, lambda a_vnf
: a_vnf
["id"] == vnfd_id
)
2727 member_vnf_index
= str(vnf_profile
["id"])
2728 db_vnfr
= db_vnfrs
[member_vnf_index
]
2729 base_folder
= vnfd
["_admin"]["storage"]
2736 # Get additional parameters
2737 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
2738 if db_vnfr
.get("additionalParamsForVnf"):
2739 deploy_params
.update(
2740 parse_yaml_strings(db_vnfr
["additionalParamsForVnf"].copy())
2743 descriptor_config
= get_configuration(vnfd
, vnfd
["id"])
2744 if descriptor_config
:
2746 logging_text
=logging_text
2747 + "member_vnf_index={} ".format(member_vnf_index
),
2750 nslcmop_id
=nslcmop_id
,
2756 member_vnf_index
=member_vnf_index
,
2757 vdu_index
=vdu_index
,
2758 kdu_index
=kdu_index
,
2760 deploy_params
=deploy_params
,
2761 descriptor_config
=descriptor_config
,
2762 base_folder
=base_folder
,
2763 task_instantiation_info
=tasks_dict_info
,
2767 # Deploy charms for each VDU that supports one.
2768 for vdud
in get_vdu_list(vnfd
):
2770 descriptor_config
= get_configuration(vnfd
, vdu_id
)
2771 vdur
= find_in_list(
2772 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
2775 if vdur
.get("additionalParams"):
2776 deploy_params_vdu
= parse_yaml_strings(vdur
["additionalParams"])
2778 deploy_params_vdu
= deploy_params
2779 deploy_params_vdu
["OSM"] = get_osm_params(
2780 db_vnfr
, vdu_id
, vdu_count_index
=0
2782 vdud_count
= get_number_of_instances(vnfd
, vdu_id
)
2784 self
.logger
.debug("VDUD > {}".format(vdud
))
2786 "Descriptor config > {}".format(descriptor_config
)
2788 if descriptor_config
:
2792 for vdu_index
in range(vdud_count
):
2793 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2795 logging_text
=logging_text
2796 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2797 member_vnf_index
, vdu_id
, vdu_index
2801 nslcmop_id
=nslcmop_id
,
2807 kdu_index
=kdu_index
,
2808 member_vnf_index
=member_vnf_index
,
2809 vdu_index
=vdu_index
,
2811 deploy_params
=deploy_params_vdu
,
2812 descriptor_config
=descriptor_config
,
2813 base_folder
=base_folder
,
2814 task_instantiation_info
=tasks_dict_info
,
2817 for kdud
in get_kdu_list(vnfd
):
2818 kdu_name
= kdud
["name"]
2819 descriptor_config
= get_configuration(vnfd
, kdu_name
)
2820 if descriptor_config
:
2824 kdu_index
, kdur
= next(
2826 for x
in enumerate(db_vnfr
["kdur"])
2827 if x
[1]["kdu-name"] == kdu_name
2829 deploy_params_kdu
= {"OSM": get_osm_params(db_vnfr
)}
2830 if kdur
.get("additionalParams"):
2831 deploy_params_kdu
.update(
2832 parse_yaml_strings(kdur
["additionalParams"].copy())
2836 logging_text
=logging_text
,
2839 nslcmop_id
=nslcmop_id
,
2845 member_vnf_index
=member_vnf_index
,
2846 vdu_index
=vdu_index
,
2847 kdu_index
=kdu_index
,
2849 deploy_params
=deploy_params_kdu
,
2850 descriptor_config
=descriptor_config
,
2851 base_folder
=base_folder
,
2852 task_instantiation_info
=tasks_dict_info
,
2856 # Check if each vnf has exporter for metric collection if so update prometheus job records
2857 if "exporters-endpoints" in vnfd
.get("df")[0]:
2858 exporter_config
= vnfd
.get("df")[0].get("exporters-endpoints")
2859 self
.logger
.debug("exporter config :{}".format(exporter_config
))
2860 artifact_path
= "{}/{}/{}".format(
2861 base_folder
["folder"],
2862 base_folder
["pkg-dir"],
2863 "exporter-endpoint",
2866 ee_config_descriptor
= exporter_config
2867 vnfr_id
= db_vnfr
["id"]
2868 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
2877 self
.logger
.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip
))
2878 self
.logger
.debug("Artifact_path:{}".format(artifact_path
))
2879 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
2880 vdu_id_for_prom
= None
2881 vdu_index_for_prom
= None
2882 for x
in get_iterable(db_vnfr
, "vdur"):
2883 vdu_id_for_prom
= x
.get("vdu-id-ref")
2884 vdu_index_for_prom
= x
.get("count-index")
2885 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
2887 artifact_path
=artifact_path
,
2888 ee_config_descriptor
=ee_config_descriptor
,
2891 target_ip
=rw_mgmt_ip
,
2893 vdu_id
=vdu_id_for_prom
,
2894 vdu_index
=vdu_index_for_prom
,
2897 self
.logger
.debug("Prometheus job:{}".format(prometheus_jobs
))
2899 db_nsr_update
["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2906 for job
in prometheus_jobs
:
2909 {"job_name": job
["job_name"]},
2912 fail_on_empty
=False,
2915 # Check if this NS has a charm configuration
2916 descriptor_config
= nsd
.get("ns-configuration")
2917 if descriptor_config
and descriptor_config
.get("juju"):
2920 member_vnf_index
= None
2927 # Get additional parameters
2928 deploy_params
= {"OSM": {"vim_account_id": ns_params
["vimAccountId"]}}
2929 if db_nsr
.get("additionalParamsForNs"):
2930 deploy_params
.update(
2931 parse_yaml_strings(db_nsr
["additionalParamsForNs"].copy())
2933 base_folder
= nsd
["_admin"]["storage"]
2935 logging_text
=logging_text
,
2938 nslcmop_id
=nslcmop_id
,
2944 member_vnf_index
=member_vnf_index
,
2945 vdu_index
=vdu_index
,
2946 kdu_index
=kdu_index
,
2948 deploy_params
=deploy_params
,
2949 descriptor_config
=descriptor_config
,
2950 base_folder
=base_folder
,
2951 task_instantiation_info
=tasks_dict_info
,
2955 # rest of staff will be done at finally
2958 ROclient
.ROClientException
,
2964 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
)
2967 except asyncio
.CancelledError
:
2969 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
2971 exc
= "Operation was cancelled"
2972 except Exception as e
:
2973 exc
= traceback
.format_exc()
2974 self
.logger
.critical(
2975 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
2980 error_list
.append(str(exc
))
2982 # wait for pending tasks
2984 stage
[1] = "Waiting for instantiate pending tasks."
2985 self
.logger
.debug(logging_text
+ stage
[1])
2986 error_list
+= await self
._wait
_for
_tasks
(
2994 stage
[1] = stage
[2] = ""
2995 except asyncio
.CancelledError
:
2996 error_list
.append("Cancelled")
2997 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
2998 await self
._wait
_for
_tasks
(
3006 except Exception as exc
:
3007 error_list
.append(str(exc
))
3009 # update operation-status
3010 db_nsr_update
["operational-status"] = "running"
3011 # let's begin with VCA 'configured' status (later we can change it)
3012 db_nsr_update
["config-status"] = "configured"
3013 for task
, task_name
in tasks_dict_info
.items():
3014 if not task
.done() or task
.cancelled() or task
.exception():
3015 if task_name
.startswith(self
.task_name_deploy_vca
):
3016 # A N2VC task is pending
3017 db_nsr_update
["config-status"] = "failed"
3019 # RO or KDU task is pending
3020 db_nsr_update
["operational-status"] = "failed"
3022 # update status at database
3024 error_detail
= ". ".join(error_list
)
3025 self
.logger
.error(logging_text
+ error_detail
)
3026 error_description_nslcmop
= "{} Detail: {}".format(
3027 stage
[0], error_detail
3029 error_description_nsr
= "Operation: INSTANTIATING.{}, {}".format(
3030 nslcmop_id
, stage
[0]
3033 db_nsr_update
["detailed-status"] = (
3034 error_description_nsr
+ " Detail: " + error_detail
3036 db_nslcmop_update
["detailed-status"] = error_detail
3037 nslcmop_operation_state
= "FAILED"
3041 error_description_nsr
= error_description_nslcmop
= None
3043 db_nsr_update
["detailed-status"] = "Done"
3044 db_nslcmop_update
["detailed-status"] = "Done"
3045 nslcmop_operation_state
= "COMPLETED"
3046 # Gather auto-healing and auto-scaling alerts for each vnfr
3049 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
3051 (sub
for sub
in db_vnfds
if sub
["_id"] == vnfr
["vnfd-id"]), None
3053 healing_alerts
= self
._gather
_vnfr
_healing
_alerts
(vnfr
, vnfd
)
3054 for alert
in healing_alerts
:
3055 self
.logger
.info(f
"Storing healing alert in MongoDB: {alert}")
3056 self
.db
.create("alerts", alert
)
3058 scaling_alerts
= self
._gather
_vnfr
_scaling
_alerts
(vnfr
, vnfd
)
3059 for alert
in scaling_alerts
:
3060 self
.logger
.info(f
"Storing scaling alert in MongoDB: {alert}")
3061 self
.db
.create("alerts", alert
)
3063 alarm_alerts
= self
._gather
_vnfr
_alarm
_alerts
(vnfr
, vnfd
)
3064 for alert
in alarm_alerts
:
3065 self
.logger
.info(f
"Storing VNF alarm alert in MongoDB: {alert}")
3066 self
.db
.create("alerts", alert
)
3068 self
._write
_ns
_status
(
3071 current_operation
="IDLE",
3072 current_operation_id
=None,
3073 error_description
=error_description_nsr
,
3074 error_detail
=error_detail
,
3075 other_update
=db_nsr_update
,
3077 self
._write
_op
_status
(
3080 error_message
=error_description_nslcmop
,
3081 operation_state
=nslcmop_operation_state
,
3082 other_update
=db_nslcmop_update
,
3085 if nslcmop_operation_state
:
3087 await self
.msg
.aiowrite(
3092 "nslcmop_id": nslcmop_id
,
3093 "operationState": nslcmop_operation_state
,
3094 "startTime": db_nslcmop
["startTime"],
3095 "links": db_nslcmop
["links"],
3096 "operationParams": {
3097 "nsInstanceId": nsr_id
,
3098 "nsdId": db_nsr
["nsd-id"],
3102 except Exception as e
:
3104 logging_text
+ "kafka_write notification Exception {}".format(e
)
3107 self
.logger
.debug(logging_text
+ "Exit")
3108 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_instantiate")
3110 def _get_vnfd(self
, vnfd_id
: str, projects_read
: str, cached_vnfds
: Dict
[str, Any
]):
3111 if vnfd_id
not in cached_vnfds
:
3112 cached_vnfds
[vnfd_id
] = self
.db
.get_one(
3113 "vnfds", {"id": vnfd_id
, "_admin.projects_read": projects_read
}
3115 return cached_vnfds
[vnfd_id
]
3117 def _get_vnfr(self
, nsr_id
: str, vnf_profile_id
: str, cached_vnfrs
: Dict
[str, Any
]):
3118 if vnf_profile_id
not in cached_vnfrs
:
3119 cached_vnfrs
[vnf_profile_id
] = self
.db
.get_one(
3122 "member-vnf-index-ref": vnf_profile_id
,
3123 "nsr-id-ref": nsr_id
,
3126 return cached_vnfrs
[vnf_profile_id
]
3128 def _is_deployed_vca_in_relation(
3129 self
, vca
: DeployedVCA
, relation
: Relation
3132 for endpoint
in (relation
.provider
, relation
.requirer
):
3133 if endpoint
["kdu-resource-profile-id"]:
3136 vca
.vnf_profile_id
== endpoint
.vnf_profile_id
3137 and vca
.vdu_profile_id
== endpoint
.vdu_profile_id
3138 and vca
.execution_environment_ref
== endpoint
.execution_environment_ref
3144 def _update_ee_relation_data_with_implicit_data(
3145 self
, nsr_id
, nsd
, ee_relation_data
, cached_vnfds
, vnf_profile_id
: str = None
3147 ee_relation_data
= safe_get_ee_relation(
3148 nsr_id
, ee_relation_data
, vnf_profile_id
=vnf_profile_id
3150 ee_relation_level
= EELevel
.get_level(ee_relation_data
)
3151 if (ee_relation_level
in (EELevel
.VNF
, EELevel
.VDU
)) and not ee_relation_data
[
3152 "execution-environment-ref"
3154 vnf_profile
= get_vnf_profile(nsd
, ee_relation_data
["vnf-profile-id"])
3155 vnfd_id
= vnf_profile
["vnfd-id"]
3156 project
= nsd
["_admin"]["projects_read"][0]
3157 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3160 if ee_relation_level
== EELevel
.VNF
3161 else ee_relation_data
["vdu-profile-id"]
3163 ee
= get_juju_ee_ref(db_vnfd
, entity_id
)
3166 f
"not execution environments found for ee_relation {ee_relation_data}"
3168 ee_relation_data
["execution-environment-ref"] = ee
["id"]
3169 return ee_relation_data
3171 def _get_ns_relations(
3174 nsd
: Dict
[str, Any
],
3176 cached_vnfds
: Dict
[str, Any
],
3177 ) -> List
[Relation
]:
3179 db_ns_relations
= get_ns_configuration_relation_list(nsd
)
3180 for r
in db_ns_relations
:
3181 provider_dict
= None
3182 requirer_dict
= None
3183 if all(key
in r
for key
in ("provider", "requirer")):
3184 provider_dict
= r
["provider"]
3185 requirer_dict
= r
["requirer"]
3186 elif "entities" in r
:
3187 provider_id
= r
["entities"][0]["id"]
3190 "endpoint": r
["entities"][0]["endpoint"],
3192 if provider_id
!= nsd
["id"]:
3193 provider_dict
["vnf-profile-id"] = provider_id
3194 requirer_id
= r
["entities"][1]["id"]
3197 "endpoint": r
["entities"][1]["endpoint"],
3199 if requirer_id
!= nsd
["id"]:
3200 requirer_dict
["vnf-profile-id"] = requirer_id
3203 "provider/requirer or entities must be included in the relation."
3205 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3206 nsr_id
, nsd
, provider_dict
, cached_vnfds
3208 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3209 nsr_id
, nsd
, requirer_dict
, cached_vnfds
3211 provider
= EERelation(relation_provider
)
3212 requirer
= EERelation(relation_requirer
)
3213 relation
= Relation(r
["name"], provider
, requirer
)
3214 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3216 relations
.append(relation
)
3219 def _get_vnf_relations(
3222 nsd
: Dict
[str, Any
],
3224 cached_vnfds
: Dict
[str, Any
],
3225 ) -> List
[Relation
]:
3227 if vca
.target_element
== "ns":
3228 self
.logger
.debug("VCA is a NS charm, not a VNF.")
3230 vnf_profile
= get_vnf_profile(nsd
, vca
.vnf_profile_id
)
3231 vnf_profile_id
= vnf_profile
["id"]
3232 vnfd_id
= vnf_profile
["vnfd-id"]
3233 project
= nsd
["_admin"]["projects_read"][0]
3234 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3235 db_vnf_relations
= get_relation_list(db_vnfd
, vnfd_id
)
3236 for r
in db_vnf_relations
:
3237 provider_dict
= None
3238 requirer_dict
= None
3239 if all(key
in r
for key
in ("provider", "requirer")):
3240 provider_dict
= r
["provider"]
3241 requirer_dict
= r
["requirer"]
3242 elif "entities" in r
:
3243 provider_id
= r
["entities"][0]["id"]
3246 "vnf-profile-id": vnf_profile_id
,
3247 "endpoint": r
["entities"][0]["endpoint"],
3249 if provider_id
!= vnfd_id
:
3250 provider_dict
["vdu-profile-id"] = provider_id
3251 requirer_id
= r
["entities"][1]["id"]
3254 "vnf-profile-id": vnf_profile_id
,
3255 "endpoint": r
["entities"][1]["endpoint"],
3257 if requirer_id
!= vnfd_id
:
3258 requirer_dict
["vdu-profile-id"] = requirer_id
3261 "provider/requirer or entities must be included in the relation."
3263 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3264 nsr_id
, nsd
, provider_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3266 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3267 nsr_id
, nsd
, requirer_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3269 provider
= EERelation(relation_provider
)
3270 requirer
= EERelation(relation_requirer
)
3271 relation
= Relation(r
["name"], provider
, requirer
)
3272 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3274 relations
.append(relation
)
3277 def _get_kdu_resource_data(
3279 ee_relation
: EERelation
,
3280 db_nsr
: Dict
[str, Any
],
3281 cached_vnfds
: Dict
[str, Any
],
3282 ) -> DeployedK8sResource
:
3283 nsd
= get_nsd(db_nsr
)
3284 vnf_profiles
= get_vnf_profiles(nsd
)
3285 vnfd_id
= find_in_list(
3287 lambda vnf_profile
: vnf_profile
["id"] == ee_relation
.vnf_profile_id
,
3289 project
= nsd
["_admin"]["projects_read"][0]
3290 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3291 kdu_resource_profile
= get_kdu_resource_profile(
3292 db_vnfd
, ee_relation
.kdu_resource_profile_id
3294 kdu_name
= kdu_resource_profile
["kdu-name"]
3295 deployed_kdu
, _
= get_deployed_kdu(
3296 db_nsr
.get("_admin", ()).get("deployed", ()),
3298 ee_relation
.vnf_profile_id
,
3300 deployed_kdu
.update({"resource-name": kdu_resource_profile
["resource-name"]})
3303 def _get_deployed_component(
3305 ee_relation
: EERelation
,
3306 db_nsr
: Dict
[str, Any
],
3307 cached_vnfds
: Dict
[str, Any
],
3308 ) -> DeployedComponent
:
3309 nsr_id
= db_nsr
["_id"]
3310 deployed_component
= None
3311 ee_level
= EELevel
.get_level(ee_relation
)
3312 if ee_level
== EELevel
.NS
:
3313 vca
= get_deployed_vca(db_nsr
, {"vdu_id": None, "member-vnf-index": None})
3315 deployed_component
= DeployedVCA(nsr_id
, vca
)
3316 elif ee_level
== EELevel
.VNF
:
3317 vca
= get_deployed_vca(
3321 "member-vnf-index": ee_relation
.vnf_profile_id
,
3322 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3326 deployed_component
= DeployedVCA(nsr_id
, vca
)
3327 elif ee_level
== EELevel
.VDU
:
3328 vca
= get_deployed_vca(
3331 "vdu_id": ee_relation
.vdu_profile_id
,
3332 "member-vnf-index": ee_relation
.vnf_profile_id
,
3333 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3337 deployed_component
= DeployedVCA(nsr_id
, vca
)
3338 elif ee_level
== EELevel
.KDU
:
3339 kdu_resource_data
= self
._get
_kdu
_resource
_data
(
3340 ee_relation
, db_nsr
, cached_vnfds
3342 if kdu_resource_data
:
3343 deployed_component
= DeployedK8sResource(kdu_resource_data
)
3344 return deployed_component
3346 async def _add_relation(
3350 db_nsr
: Dict
[str, Any
],
3351 cached_vnfds
: Dict
[str, Any
],
3352 cached_vnfrs
: Dict
[str, Any
],
3354 deployed_provider
= self
._get
_deployed
_component
(
3355 relation
.provider
, db_nsr
, cached_vnfds
3357 deployed_requirer
= self
._get
_deployed
_component
(
3358 relation
.requirer
, db_nsr
, cached_vnfds
3362 and deployed_requirer
3363 and deployed_provider
.config_sw_installed
3364 and deployed_requirer
.config_sw_installed
3366 provider_db_vnfr
= (
3368 relation
.provider
.nsr_id
,
3369 relation
.provider
.vnf_profile_id
,
3372 if relation
.provider
.vnf_profile_id
3375 requirer_db_vnfr
= (
3377 relation
.requirer
.nsr_id
,
3378 relation
.requirer
.vnf_profile_id
,
3381 if relation
.requirer
.vnf_profile_id
3384 provider_vca_id
= self
.get_vca_id(provider_db_vnfr
, db_nsr
)
3385 requirer_vca_id
= self
.get_vca_id(requirer_db_vnfr
, db_nsr
)
3386 provider_relation_endpoint
= RelationEndpoint(
3387 deployed_provider
.ee_id
,
3389 relation
.provider
.endpoint
,
3391 requirer_relation_endpoint
= RelationEndpoint(
3392 deployed_requirer
.ee_id
,
3394 relation
.requirer
.endpoint
,
3397 await self
.vca_map
[vca_type
].add_relation(
3398 provider
=provider_relation_endpoint
,
3399 requirer
=requirer_relation_endpoint
,
3401 except N2VCException
as exception
:
3402 self
.logger
.error(exception
)
3403 raise LcmException(exception
)
3407 async def _add_vca_relations(
3413 timeout
: int = 3600,
3416 # 1. find all relations for this VCA
3417 # 2. wait for other peers related
3421 # STEP 1: find all relations for this VCA
3424 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3425 nsd
= get_nsd(db_nsr
)
3428 deployed_vca_dict
= get_deployed_vca_list(db_nsr
)[vca_index
]
3429 my_vca
= DeployedVCA(nsr_id
, deployed_vca_dict
)
3434 relations
.extend(self
._get
_ns
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3435 relations
.extend(self
._get
_vnf
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3437 # if no relations, terminate
3439 self
.logger
.debug(logging_text
+ " No relations")
3442 self
.logger
.debug(logging_text
+ " adding relations {}".format(relations
))
3449 if now
- start
>= timeout
:
3450 self
.logger
.error(logging_text
+ " : timeout adding relations")
3453 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3454 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3456 # for each relation, find the VCA's related
3457 for relation
in relations
.copy():
3458 added
= await self
._add
_relation
(
3466 relations
.remove(relation
)
3469 self
.logger
.debug("Relations added")
3471 await asyncio
.sleep(5.0)
3475 except Exception as e
:
3476 self
.logger
.warn(logging_text
+ " ERROR adding relations: {}".format(e
))
3479 async def _install_kdu(
3487 k8s_instance_info
: dict,
3488 k8params
: dict = None,
3493 k8sclustertype
= k8s_instance_info
["k8scluster-type"]
3496 "collection": "nsrs",
3497 "filter": {"_id": nsr_id
},
3498 "path": nsr_db_path
,
3501 if k8s_instance_info
.get("kdu-deployment-name"):
3502 kdu_instance
= k8s_instance_info
.get("kdu-deployment-name")
3504 kdu_instance
= self
.k8scluster_map
[
3506 ].generate_kdu_instance_name(
3507 db_dict
=db_dict_install
,
3508 kdu_model
=k8s_instance_info
["kdu-model"],
3509 kdu_name
=k8s_instance_info
["kdu-name"],
3512 # Update the nsrs table with the kdu-instance value
3516 _desc
={nsr_db_path
+ ".kdu-instance": kdu_instance
},
3519 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3520 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3521 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3522 # namespace, this first verification could be removed, and the next step would be done for any kind
3524 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3525 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3526 if k8sclustertype
in ("juju", "juju-bundle"):
3527 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3528 # that the user passed a namespace which he wants its KDU to be deployed in)
3534 "_admin.projects_write": k8s_instance_info
["namespace"],
3535 "_admin.projects_read": k8s_instance_info
["namespace"],
3541 f
"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3546 _desc
={f
"{nsr_db_path}.namespace": kdu_instance
},
3548 k8s_instance_info
["namespace"] = kdu_instance
3550 await self
.k8scluster_map
[k8sclustertype
].install(
3551 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3552 kdu_model
=k8s_instance_info
["kdu-model"],
3555 db_dict
=db_dict_install
,
3557 kdu_name
=k8s_instance_info
["kdu-name"],
3558 namespace
=k8s_instance_info
["namespace"],
3559 kdu_instance
=kdu_instance
,
3563 # Obtain services to obtain management service ip
3564 services
= await self
.k8scluster_map
[k8sclustertype
].get_services(
3565 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3566 kdu_instance
=kdu_instance
,
3567 namespace
=k8s_instance_info
["namespace"],
3570 # Obtain management service info (if exists)
3571 vnfr_update_dict
= {}
3572 kdu_config
= get_configuration(vnfd
, kdud
["name"])
3574 target_ee_list
= kdu_config
.get("execution-environment-list", [])
3579 vnfr_update_dict
["kdur.{}.services".format(kdu_index
)] = services
3582 for service
in kdud
.get("service", [])
3583 if service
.get("mgmt-service")
3585 for mgmt_service
in mgmt_services
:
3586 for service
in services
:
3587 if service
["name"].startswith(mgmt_service
["name"]):
3588 # Mgmt service found, Obtain service ip
3589 ip
= service
.get("external_ip", service
.get("cluster_ip"))
3590 if isinstance(ip
, list) and len(ip
) == 1:
3594 "kdur.{}.ip-address".format(kdu_index
)
3597 # Check if must update also mgmt ip at the vnf
3598 service_external_cp
= mgmt_service
.get(
3599 "external-connection-point-ref"
3601 if service_external_cp
:
3603 deep_get(vnfd
, ("mgmt-interface", "cp"))
3604 == service_external_cp
3606 vnfr_update_dict
["ip-address"] = ip
3611 "external-connection-point-ref", ""
3613 == service_external_cp
,
3616 "kdur.{}.ip-address".format(kdu_index
)
3621 "Mgmt service name: {} not found".format(
3622 mgmt_service
["name"]
3626 vnfr_update_dict
["kdur.{}.status".format(kdu_index
)] = "READY"
3627 self
.update_db_2("vnfrs", vnfr_data
.get("_id"), vnfr_update_dict
)
3629 kdu_config
= get_configuration(vnfd
, k8s_instance_info
["kdu-name"])
3632 and kdu_config
.get("initial-config-primitive")
3633 and get_juju_ee_ref(vnfd
, k8s_instance_info
["kdu-name"]) is None
3635 initial_config_primitive_list
= kdu_config
.get(
3636 "initial-config-primitive"
3638 initial_config_primitive_list
.sort(key
=lambda val
: int(val
["seq"]))
3640 for initial_config_primitive
in initial_config_primitive_list
:
3641 primitive_params_
= self
._map
_primitive
_params
(
3642 initial_config_primitive
, {}, {}
3645 await asyncio
.wait_for(
3646 self
.k8scluster_map
[k8sclustertype
].exec_primitive(
3647 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3648 kdu_instance
=kdu_instance
,
3649 primitive_name
=initial_config_primitive
["name"],
3650 params
=primitive_params_
,
3651 db_dict
=db_dict_install
,
3657 except Exception as e
:
3658 # Prepare update db with error and raise exception
3661 "nsrs", nsr_id
, {nsr_db_path
+ ".detailed-status": str(e
)}
3665 vnfr_data
.get("_id"),
3666 {"kdur.{}.status".format(kdu_index
): "ERROR"},
3668 except Exception as error
:
3669 # ignore to keep original exception
3670 self
.logger
.warning(
3671 f
"An exception occurred while updating DB: {str(error)}"
3673 # reraise original error
3678 async def deploy_kdus(
3685 task_instantiation_info
,
3687 # Launch kdus if present in the descriptor
3689 k8scluster_id_2_uuic
= {
3690 "helm-chart-v3": {},
3694 async def _get_cluster_id(cluster_id
, cluster_type
):
3695 nonlocal k8scluster_id_2_uuic
3696 if cluster_id
in k8scluster_id_2_uuic
[cluster_type
]:
3697 return k8scluster_id_2_uuic
[cluster_type
][cluster_id
]
3699 # check if K8scluster is creating and wait look if previous tasks in process
3700 task_name
, task_dependency
= self
.lcm_tasks
.lookfor_related(
3701 "k8scluster", cluster_id
3704 text
= "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3705 task_name
, cluster_id
3707 self
.logger
.debug(logging_text
+ text
)
3708 await asyncio
.wait(task_dependency
, timeout
=3600)
3710 db_k8scluster
= self
.db
.get_one(
3711 "k8sclusters", {"_id": cluster_id
}, fail_on_empty
=False
3713 if not db_k8scluster
:
3714 raise LcmException("K8s cluster {} cannot be found".format(cluster_id
))
3716 k8s_id
= deep_get(db_k8scluster
, ("_admin", cluster_type
, "id"))
3718 if cluster_type
== "helm-chart-v3":
3720 # backward compatibility for existing clusters that have not been initialized for helm v3
3721 k8s_credentials
= yaml
.safe_dump(
3722 db_k8scluster
.get("credentials")
3724 k8s_id
, uninstall_sw
= await self
.k8sclusterhelm3
.init_env(
3725 k8s_credentials
, reuse_cluster_uuid
=cluster_id
3727 db_k8scluster_update
= {}
3728 db_k8scluster_update
["_admin.helm-chart-v3.error_msg"] = None
3729 db_k8scluster_update
["_admin.helm-chart-v3.id"] = k8s_id
3730 db_k8scluster_update
[
3731 "_admin.helm-chart-v3.created"
3733 db_k8scluster_update
[
3734 "_admin.helm-chart-v3.operationalState"
3737 "k8sclusters", cluster_id
, db_k8scluster_update
3739 except Exception as e
:
3742 + "error initializing helm-v3 cluster: {}".format(str(e
))
3745 "K8s cluster '{}' has not been initialized for '{}'".format(
3746 cluster_id
, cluster_type
3751 "K8s cluster '{}' has not been initialized for '{}'".format(
3752 cluster_id
, cluster_type
3755 k8scluster_id_2_uuic
[cluster_type
][cluster_id
] = k8s_id
3758 logging_text
+= "Deploy kdus: "
3761 db_nsr_update
= {"_admin.deployed.K8s": []}
3762 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3765 updated_cluster_list
= []
3766 updated_v3_cluster_list
= []
3768 for vnfr_data
in db_vnfrs
.values():
3769 vca_id
= self
.get_vca_id(vnfr_data
, {})
3770 for kdu_index
, kdur
in enumerate(get_iterable(vnfr_data
, "kdur")):
3771 # Step 0: Prepare and set parameters
3772 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
3773 vnfd_id
= vnfr_data
.get("vnfd-id")
3774 vnfd_with_id
= find_in_list(
3775 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3779 for kdud
in vnfd_with_id
["kdu"]
3780 if kdud
["name"] == kdur
["kdu-name"]
3782 namespace
= kdur
.get("k8s-namespace")
3783 kdu_deployment_name
= kdur
.get("kdu-deployment-name")
3784 if kdur
.get("helm-chart"):
3785 kdumodel
= kdur
["helm-chart"]
3786 # Default version: helm3, if helm-version is v2 assign v2
3787 k8sclustertype
= "helm-chart-v3"
3788 self
.logger
.debug("kdur: {}".format(kdur
))
3789 elif kdur
.get("juju-bundle"):
3790 kdumodel
= kdur
["juju-bundle"]
3791 k8sclustertype
= "juju-bundle"
3794 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3795 "juju-bundle. Maybe an old NBI version is running".format(
3796 vnfr_data
["member-vnf-index-ref"], kdur
["kdu-name"]
3799 # check if kdumodel is a file and exists
3801 vnfd_with_id
= find_in_list(
3802 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3804 storage
= deep_get(vnfd_with_id
, ("_admin", "storage"))
3805 if storage
: # may be not present if vnfd has not artifacts
3806 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3807 if storage
["pkg-dir"]:
3808 filename
= "{}/{}/{}s/{}".format(
3815 filename
= "{}/Scripts/{}s/{}".format(
3820 if self
.fs
.file_exists(
3821 filename
, mode
="file"
3822 ) or self
.fs
.file_exists(filename
, mode
="dir"):
3823 kdumodel
= self
.fs
.path
+ filename
3824 except (asyncio
.TimeoutError
, asyncio
.CancelledError
):
3826 except Exception as e
: # it is not a file
3827 self
.logger
.warning(f
"An exception occurred: {str(e)}")
3829 k8s_cluster_id
= kdur
["k8s-cluster"]["id"]
3830 step
= "Synchronize repos for k8s cluster '{}'".format(
3833 cluster_uuid
= await _get_cluster_id(k8s_cluster_id
, k8sclustertype
)
3837 k8sclustertype
== "helm-chart"
3838 and cluster_uuid
not in updated_cluster_list
3840 k8sclustertype
== "helm-chart-v3"
3841 and cluster_uuid
not in updated_v3_cluster_list
3843 del_repo_list
, added_repo_dict
= await asyncio
.ensure_future(
3844 self
.k8scluster_map
[k8sclustertype
].synchronize_repos(
3845 cluster_uuid
=cluster_uuid
3848 if del_repo_list
or added_repo_dict
:
3849 if k8sclustertype
== "helm-chart":
3851 "_admin.helm_charts_added." + item
: None
3852 for item
in del_repo_list
3855 "_admin.helm_charts_added." + item
: name
3856 for item
, name
in added_repo_dict
.items()
3858 updated_cluster_list
.append(cluster_uuid
)
3859 elif k8sclustertype
== "helm-chart-v3":
3861 "_admin.helm_charts_v3_added." + item
: None
3862 for item
in del_repo_list
3865 "_admin.helm_charts_v3_added." + item
: name
3866 for item
, name
in added_repo_dict
.items()
3868 updated_v3_cluster_list
.append(cluster_uuid
)
3870 logging_text
+ "repos synchronized on k8s cluster "
3871 "'{}' to_delete: {}, to_add: {}".format(
3872 k8s_cluster_id
, del_repo_list
, added_repo_dict
3877 {"_id": k8s_cluster_id
},
3883 step
= "Instantiating KDU {}.{} in k8s cluster {}".format(
3884 vnfr_data
["member-vnf-index-ref"],
3888 k8s_instance_info
= {
3889 "kdu-instance": None,
3890 "k8scluster-uuid": cluster_uuid
,
3891 "k8scluster-type": k8sclustertype
,
3892 "member-vnf-index": vnfr_data
["member-vnf-index-ref"],
3893 "kdu-name": kdur
["kdu-name"],
3894 "kdu-model": kdumodel
,
3895 "namespace": namespace
,
3896 "kdu-deployment-name": kdu_deployment_name
,
3898 db_path
= "_admin.deployed.K8s.{}".format(index
)
3899 db_nsr_update
[db_path
] = k8s_instance_info
3900 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3901 vnfd_with_id
= find_in_list(
3902 db_vnfds
, lambda vnf
: vnf
["_id"] == vnfd_id
3904 task
= asyncio
.ensure_future(
3913 k8params
=desc_params
,
3918 self
.lcm_tasks
.register(
3922 "instantiate_KDU-{}".format(index
),
3925 task_instantiation_info
[task
] = "Deploying KDU {}".format(
3931 except (LcmException
, asyncio
.CancelledError
):
3933 except Exception as e
:
3934 msg
= "Exception {} while {}: {}".format(type(e
).__name
__, step
, e
)
3935 if isinstance(e
, (N2VCException
, DbException
)):
3936 self
.logger
.error(logging_text
+ msg
)
3938 self
.logger
.critical(logging_text
+ msg
, exc_info
=True)
3939 raise LcmException(msg
)
3942 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3962 task_instantiation_info
,
3965 # launch instantiate_N2VC in a asyncio task and register task object
3966 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3967 # if not found, create one entry and update database
3968 # fill db_nsr._admin.deployed.VCA.<index>
3971 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
3975 get_charm_name
= False
3976 if "execution-environment-list" in descriptor_config
:
3977 ee_list
= descriptor_config
.get("execution-environment-list", [])
3978 elif "juju" in descriptor_config
:
3979 ee_list
= [descriptor_config
] # ns charms
3980 if "execution-environment-list" not in descriptor_config
:
3981 # charm name is only required for ns charms
3982 get_charm_name
= True
3983 else: # other types as script are not supported
3986 for ee_item
in ee_list
:
3989 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3990 ee_item
.get("juju"), ee_item
.get("helm-chart")
3993 ee_descriptor_id
= ee_item
.get("id")
3994 vca_name
, charm_name
, vca_type
= self
.get_vca_info(
3995 ee_item
, db_nsr
, get_charm_name
3999 logging_text
+ "skipping, non juju/charm/helm configuration"
4004 for vca_index
, vca_deployed
in enumerate(
4005 db_nsr
["_admin"]["deployed"]["VCA"]
4007 if not vca_deployed
:
4010 vca_deployed
.get("member-vnf-index") == member_vnf_index
4011 and vca_deployed
.get("vdu_id") == vdu_id
4012 and vca_deployed
.get("kdu_name") == kdu_name
4013 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
4014 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
4018 # not found, create one.
4020 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
4023 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
4025 target
+= "/kdu/{}".format(kdu_name
)
4027 "target_element": target
,
4028 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4029 "member-vnf-index": member_vnf_index
,
4031 "kdu_name": kdu_name
,
4032 "vdu_count_index": vdu_index
,
4033 "operational-status": "init", # TODO revise
4034 "detailed-status": "", # TODO revise
4035 "step": "initial-deploy", # TODO revise
4037 "vdu_name": vdu_name
,
4039 "ee_descriptor_id": ee_descriptor_id
,
4040 "charm_name": charm_name
,
4044 # create VCA and configurationStatus in db
4046 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
4047 "configurationStatus.{}".format(vca_index
): dict(),
4049 self
.update_db_2("nsrs", nsr_id
, db_dict
)
4051 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
4053 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
4054 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
4055 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
4058 task_n2vc
= asyncio
.ensure_future(
4059 self
.instantiate_N2VC(
4060 logging_text
=logging_text
,
4061 vca_index
=vca_index
,
4067 vdu_index
=vdu_index
,
4068 kdu_index
=kdu_index
,
4069 deploy_params
=deploy_params
,
4070 config_descriptor
=descriptor_config
,
4071 base_folder
=base_folder
,
4072 nslcmop_id
=nslcmop_id
,
4076 ee_config_descriptor
=ee_item
,
4079 self
.lcm_tasks
.register(
4083 "instantiate_N2VC-{}".format(vca_index
),
4086 task_instantiation_info
[
4088 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
4089 member_vnf_index
or "", vdu_id
or ""
4092 def _format_additional_params(self
, params
):
4093 params
= params
or {}
4094 for key
, value
in params
.items():
4095 if str(value
).startswith("!!yaml "):
4096 params
[key
] = yaml
.safe_load(value
[7:])
4099 def _get_terminate_primitive_params(self
, seq
, vnf_index
):
4100 primitive
= seq
.get("name")
4101 primitive_params
= {}
4103 "member_vnf_index": vnf_index
,
4104 "primitive": primitive
,
4105 "primitive_params": primitive_params
,
4108 return self
._map
_primitive
_params
(seq
, params
, desc_params
)
4112 def _retry_or_skip_suboperation(self
, db_nslcmop
, op_index
):
4113 op
= deep_get(db_nslcmop
, ("_admin", "operations"), [])[op_index
]
4114 if op
.get("operationState") == "COMPLETED":
4115 # b. Skip sub-operation
4116 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4117 return self
.SUBOPERATION_STATUS_SKIP
4119 # c. retry executing sub-operation
4120 # The sub-operation exists, and operationState != 'COMPLETED'
4121 # Update operationState = 'PROCESSING' to indicate a retry.
4122 operationState
= "PROCESSING"
4123 detailed_status
= "In progress"
4124 self
._update
_suboperation
_status
(
4125 db_nslcmop
, op_index
, operationState
, detailed_status
4127 # Return the sub-operation index
4128 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4129 # with arguments extracted from the sub-operation
4132 # Find a sub-operation where all keys in a matching dictionary must match
4133 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4134 def _find_suboperation(self
, db_nslcmop
, match
):
4135 if db_nslcmop
and match
:
4136 op_list
= db_nslcmop
.get("_admin", {}).get("operations", [])
4137 for i
, op
in enumerate(op_list
):
4138 if all(op
.get(k
) == match
[k
] for k
in match
):
4140 return self
.SUBOPERATION_STATUS_NOT_FOUND
4142 # Update status for a sub-operation given its index
4143 def _update_suboperation_status(
4144 self
, db_nslcmop
, op_index
, operationState
, detailed_status
4146 # Update DB for HA tasks
4147 q_filter
= {"_id": db_nslcmop
["_id"]}
4149 "_admin.operations.{}.operationState".format(op_index
): operationState
,
4150 "_admin.operations.{}.detailed-status".format(op_index
): detailed_status
,
4153 "nslcmops", q_filter
=q_filter
, update_dict
=update_dict
, fail_on_empty
=False
4156 # Add sub-operation, return the index of the added sub-operation
4157 # Optionally, set operationState, detailed-status, and operationType
4158 # Status and type are currently set for 'scale' sub-operations:
4159 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4160 # 'detailed-status' : status message
4161 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4162 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4163 def _add_suboperation(
4171 mapped_primitive_params
,
4172 operationState
=None,
4173 detailed_status
=None,
4176 RO_scaling_info
=None,
4179 return self
.SUBOPERATION_STATUS_NOT_FOUND
4180 # Get the "_admin.operations" list, if it exists
4181 db_nslcmop_admin
= db_nslcmop
.get("_admin", {})
4182 op_list
= db_nslcmop_admin
.get("operations")
4183 # Create or append to the "_admin.operations" list
4185 "member_vnf_index": vnf_index
,
4187 "vdu_count_index": vdu_count_index
,
4188 "primitive": primitive
,
4189 "primitive_params": mapped_primitive_params
,
4192 new_op
["operationState"] = operationState
4194 new_op
["detailed-status"] = detailed_status
4196 new_op
["lcmOperationType"] = operationType
4198 new_op
["RO_nsr_id"] = RO_nsr_id
4200 new_op
["RO_scaling_info"] = RO_scaling_info
4202 # No existing operations, create key 'operations' with current operation as first list element
4203 db_nslcmop_admin
.update({"operations": [new_op
]})
4204 op_list
= db_nslcmop_admin
.get("operations")
4206 # Existing operations, append operation to list
4207 op_list
.append(new_op
)
4209 db_nslcmop_update
= {"_admin.operations": op_list
}
4210 self
.update_db_2("nslcmops", db_nslcmop
["_id"], db_nslcmop_update
)
4211 op_index
= len(op_list
) - 1
4214 # Helper methods for scale() sub-operations
4216 # pre-scale/post-scale:
4217 # Check for 3 different cases:
4218 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4219 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4220 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4221 def _check_or_add_scale_suboperation(
4225 vnf_config_primitive
,
4229 RO_scaling_info
=None,
4231 # Find this sub-operation
4232 if RO_nsr_id
and RO_scaling_info
:
4233 operationType
= "SCALE-RO"
4235 "member_vnf_index": vnf_index
,
4236 "RO_nsr_id": RO_nsr_id
,
4237 "RO_scaling_info": RO_scaling_info
,
4241 "member_vnf_index": vnf_index
,
4242 "primitive": vnf_config_primitive
,
4243 "primitive_params": primitive_params
,
4244 "lcmOperationType": operationType
,
4246 op_index
= self
._find
_suboperation
(db_nslcmop
, match
)
4247 if op_index
== self
.SUBOPERATION_STATUS_NOT_FOUND
:
4248 # a. New sub-operation
4249 # The sub-operation does not exist, add it.
4250 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4251 # The following parameters are set to None for all kind of scaling:
4253 vdu_count_index
= None
4255 if RO_nsr_id
and RO_scaling_info
:
4256 vnf_config_primitive
= None
4257 primitive_params
= None
4260 RO_scaling_info
= None
4261 # Initial status for sub-operation
4262 operationState
= "PROCESSING"
4263 detailed_status
= "In progress"
4264 # Add sub-operation for pre/post-scaling (zero or more operations)
4265 self
._add
_suboperation
(
4271 vnf_config_primitive
,
4279 return self
.SUBOPERATION_STATUS_NEW
4281 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4282 # or op_index (operationState != 'COMPLETED')
4283 return self
._retry
_or
_skip
_suboperation
(db_nslcmop
, op_index
)
4285 # Function to return execution_environment id
4287 async def destroy_N2VC(
4295 exec_primitives
=True,
4300 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4301 :param logging_text:
4303 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4304 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4305 :param vca_index: index in the database _admin.deployed.VCA
4306 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4307 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4308 not executed properly
4309 :param scaling_in: True destroys the application, False destroys the model
4310 :return: None or exception
4315 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4316 vca_index
, vca_deployed
, config_descriptor
, destroy_ee
4320 vca_type
= vca_deployed
.get("type", "lxc_proxy_charm")
4322 # execute terminate_primitives
4324 terminate_primitives
= get_ee_sorted_terminate_config_primitive_list(
4325 config_descriptor
.get("terminate-config-primitive"),
4326 vca_deployed
.get("ee_descriptor_id"),
4328 vdu_id
= vca_deployed
.get("vdu_id")
4329 vdu_count_index
= vca_deployed
.get("vdu_count_index")
4330 vdu_name
= vca_deployed
.get("vdu_name")
4331 vnf_index
= vca_deployed
.get("member-vnf-index")
4332 if terminate_primitives
and vca_deployed
.get("needed_terminate"):
4333 for seq
in terminate_primitives
:
4334 # For each sequence in list, get primitive and call _ns_execute_primitive()
4335 step
= "Calling terminate action for vnf_member_index={} primitive={}".format(
4336 vnf_index
, seq
.get("name")
4338 self
.logger
.debug(logging_text
+ step
)
4339 # Create the primitive for each sequence, i.e. "primitive": "touch"
4340 primitive
= seq
.get("name")
4341 mapped_primitive_params
= self
._get
_terminate
_primitive
_params
(
4346 self
._add
_suboperation
(
4353 mapped_primitive_params
,
4355 # Sub-operations: Call _ns_execute_primitive() instead of action()
4357 result
, result_detail
= await self
._ns
_execute
_primitive
(
4358 vca_deployed
["ee_id"],
4360 mapped_primitive_params
,
4364 except LcmException
:
4365 # this happens when VCA is not deployed. In this case it is not needed to terminate
4367 result_ok
= ["COMPLETED", "PARTIALLY_COMPLETED"]
4368 if result
not in result_ok
:
4370 "terminate_primitive {} for vnf_member_index={} fails with "
4371 "error {}".format(seq
.get("name"), vnf_index
, result_detail
)
4373 # set that this VCA do not need terminated
4374 db_update_entry
= "_admin.deployed.VCA.{}.needed_terminate".format(
4378 "nsrs", db_nslcmop
["nsInstanceId"], {db_update_entry
: False}
4381 # Delete Prometheus Jobs if any
4382 # This uses NSR_ID, so it will destroy any jobs under this index
4383 self
.db
.del_list("prometheus_jobs", {"nsr_id": db_nslcmop
["nsInstanceId"]})
4386 await self
.vca_map
[vca_type
].delete_execution_environment(
4387 vca_deployed
["ee_id"],
4388 scaling_in
=scaling_in
,
4393 async def _delete_all_N2VC(self
, db_nsr
: dict, vca_id
: str = None):
4394 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="TERMINATING")
4395 namespace
= "." + db_nsr
["_id"]
4397 await self
.n2vc
.delete_namespace(
4398 namespace
=namespace
,
4399 total_timeout
=self
.timeout
.charm_delete
,
4402 except N2VCNotFound
: # already deleted. Skip
4404 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="DELETED")
4406 async def terminate(self
, nsr_id
, nslcmop_id
):
4407 # Try to lock HA task here
4408 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
4409 if not task_is_locked_by_me
:
4412 logging_text
= "Task ns={} terminate={} ".format(nsr_id
, nslcmop_id
)
4413 self
.logger
.debug(logging_text
+ "Enter")
4414 timeout_ns_terminate
= self
.timeout
.ns_terminate
4417 operation_params
= None
4419 error_list
= [] # annotates all failed error messages
4420 db_nslcmop_update
= {}
4421 autoremove
= False # autoremove after terminated
4422 tasks_dict_info
= {}
4425 "Stage 1/3: Preparing task.",
4426 "Waiting for previous operations to terminate.",
4429 # ^ contains [stage, step, VIM-status]
4431 # wait for any previous tasks in process
4432 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
4434 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
4435 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
4436 operation_params
= db_nslcmop
.get("operationParams") or {}
4437 if operation_params
.get("timeout_ns_terminate"):
4438 timeout_ns_terminate
= operation_params
["timeout_ns_terminate"]
4439 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
4440 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
4442 db_nsr_update
["operational-status"] = "terminating"
4443 db_nsr_update
["config-status"] = "terminating"
4444 self
._write
_ns
_status
(
4446 ns_state
="TERMINATING",
4447 current_operation
="TERMINATING",
4448 current_operation_id
=nslcmop_id
,
4449 other_update
=db_nsr_update
,
4451 self
._write
_op
_status
(op_id
=nslcmop_id
, queuePosition
=0, stage
=stage
)
4452 nsr_deployed
= deepcopy(db_nsr
["_admin"].get("deployed")) or {}
4453 if db_nsr
["_admin"]["nsState"] == "NOT_INSTANTIATED":
4456 stage
[1] = "Getting vnf descriptors from db."
4457 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
4459 db_vnfr
["member-vnf-index-ref"]: db_vnfr
for db_vnfr
in db_vnfrs_list
4461 db_vnfds_from_id
= {}
4462 db_vnfds_from_member_index
= {}
4464 for vnfr
in db_vnfrs_list
:
4465 vnfd_id
= vnfr
["vnfd-id"]
4466 if vnfd_id
not in db_vnfds_from_id
:
4467 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
4468 db_vnfds_from_id
[vnfd_id
] = vnfd
4469 db_vnfds_from_member_index
[
4470 vnfr
["member-vnf-index-ref"]
4471 ] = db_vnfds_from_id
[vnfd_id
]
4473 # Destroy individual execution environments when there are terminating primitives.
4474 # Rest of EE will be deleted at once
4475 # TODO - check before calling _destroy_N2VC
4476 # if not operation_params.get("skip_terminate_primitives"):#
4477 # or not vca.get("needed_terminate"):
4478 stage
[0] = "Stage 2/3 execute terminating primitives."
4479 self
.logger
.debug(logging_text
+ stage
[0])
4480 stage
[1] = "Looking execution environment that needs terminate."
4481 self
.logger
.debug(logging_text
+ stage
[1])
4483 for vca_index
, vca
in enumerate(get_iterable(nsr_deployed
, "VCA")):
4484 config_descriptor
= None
4485 vca_member_vnf_index
= vca
.get("member-vnf-index")
4486 vca_id
= self
.get_vca_id(
4487 db_vnfrs_dict
.get(vca_member_vnf_index
)
4488 if vca_member_vnf_index
4492 if not vca
or not vca
.get("ee_id"):
4494 if not vca
.get("member-vnf-index"):
4496 config_descriptor
= db_nsr
.get("ns-configuration")
4497 elif vca
.get("vdu_id"):
4498 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4499 config_descriptor
= get_configuration(db_vnfd
, vca
.get("vdu_id"))
4500 elif vca
.get("kdu_name"):
4501 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4502 config_descriptor
= get_configuration(db_vnfd
, vca
.get("kdu_name"))
4504 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4505 config_descriptor
= get_configuration(db_vnfd
, db_vnfd
["id"])
4506 vca_type
= vca
.get("type")
4507 exec_terminate_primitives
= not operation_params
.get(
4508 "skip_terminate_primitives"
4509 ) and vca
.get("needed_terminate")
4510 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4511 # pending native charms
4512 destroy_ee
= True if vca_type
in ("helm-v3", "native_charm") else False
4513 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4514 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4515 task
= asyncio
.ensure_future(
4523 exec_terminate_primitives
,
4527 tasks_dict_info
[task
] = "Terminating VCA {}".format(vca
.get("ee_id"))
4529 # wait for pending tasks of terminate primitives
4533 + "Waiting for tasks {}".format(list(tasks_dict_info
.keys()))
4535 error_list
= await self
._wait
_for
_tasks
(
4538 min(self
.timeout
.charm_delete
, timeout_ns_terminate
),
4542 tasks_dict_info
.clear()
4544 return # raise LcmException("; ".join(error_list))
4546 # remove All execution environments at once
4547 stage
[0] = "Stage 3/3 delete all."
4549 if nsr_deployed
.get("VCA"):
4550 stage
[1] = "Deleting all execution environments."
4551 self
.logger
.debug(logging_text
+ stage
[1])
4552 helm_vca_list
= get_deployed_vca(db_nsr
, {"type": "helm-v3"})
4554 # Delete Namespace and Certificates
4555 await self
.vca_map
["helm-v3"].delete_tls_certificate(
4556 namespace
=db_nslcmop
["nsInstanceId"],
4557 certificate_name
=self
.EE_TLS_NAME
,
4559 await self
.vca_map
["helm-v3"].delete_namespace(
4560 namespace
=db_nslcmop
["nsInstanceId"],
4563 vca_id
= self
.get_vca_id({}, db_nsr
)
4564 task_delete_ee
= asyncio
.ensure_future(
4566 self
._delete
_all
_N
2VC
(db_nsr
=db_nsr
, vca_id
=vca_id
),
4567 timeout
=self
.timeout
.charm_delete
,
4570 tasks_dict_info
[task_delete_ee
] = "Terminating all VCA"
4572 # Delete from k8scluster
4573 stage
[1] = "Deleting KDUs."
4574 self
.logger
.debug(logging_text
+ stage
[1])
4575 # print(nsr_deployed)
4576 for kdu
in get_iterable(nsr_deployed
, "K8s"):
4577 if not kdu
or not kdu
.get("kdu-instance"):
4579 kdu_instance
= kdu
.get("kdu-instance")
4580 if kdu
.get("k8scluster-type") in self
.k8scluster_map
:
4581 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4582 vca_id
= self
.get_vca_id({}, db_nsr
)
4583 task_delete_kdu_instance
= asyncio
.ensure_future(
4584 self
.k8scluster_map
[kdu
["k8scluster-type"]].uninstall(
4585 cluster_uuid
=kdu
.get("k8scluster-uuid"),
4586 kdu_instance
=kdu_instance
,
4588 namespace
=kdu
.get("namespace"),
4594 + "Unknown k8s deployment type {}".format(
4595 kdu
.get("k8scluster-type")
4600 task_delete_kdu_instance
4601 ] = "Terminating KDU '{}'".format(kdu
.get("kdu-name"))
4604 stage
[1] = "Deleting ns from VIM."
4605 if self
.ro_config
.ng
:
4606 task_delete_ro
= asyncio
.ensure_future(
4607 self
._terminate
_ng
_ro
(
4608 logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
4611 tasks_dict_info
[task_delete_ro
] = "Removing deployment from VIM"
4613 # rest of staff will be done at finally
4616 ROclient
.ROClientException
,
4621 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
4623 except asyncio
.CancelledError
:
4625 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
4627 exc
= "Operation was cancelled"
4628 except Exception as e
:
4629 exc
= traceback
.format_exc()
4630 self
.logger
.critical(
4631 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
4636 error_list
.append(str(exc
))
4638 # wait for pending tasks
4640 stage
[1] = "Waiting for terminate pending tasks."
4641 self
.logger
.debug(logging_text
+ stage
[1])
4642 error_list
+= await self
._wait
_for
_tasks
(
4645 timeout_ns_terminate
,
4649 stage
[1] = stage
[2] = ""
4650 except asyncio
.CancelledError
:
4651 error_list
.append("Cancelled")
4652 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
4653 await self
._wait
_for
_tasks
(
4656 timeout_ns_terminate
,
4660 except Exception as exc
:
4661 error_list
.append(str(exc
))
4662 # update status at database
4664 error_detail
= "; ".join(error_list
)
4665 # self.logger.error(logging_text + error_detail)
4666 error_description_nslcmop
= "{} Detail: {}".format(
4667 stage
[0], error_detail
4669 error_description_nsr
= "Operation: TERMINATING.{}, {}.".format(
4670 nslcmop_id
, stage
[0]
4673 db_nsr_update
["operational-status"] = "failed"
4674 db_nsr_update
["detailed-status"] = (
4675 error_description_nsr
+ " Detail: " + error_detail
4677 db_nslcmop_update
["detailed-status"] = error_detail
4678 nslcmop_operation_state
= "FAILED"
4682 error_description_nsr
= error_description_nslcmop
= None
4683 ns_state
= "NOT_INSTANTIATED"
4684 db_nsr_update
["operational-status"] = "terminated"
4685 db_nsr_update
["detailed-status"] = "Done"
4686 db_nsr_update
["_admin.nsState"] = "NOT_INSTANTIATED"
4687 db_nslcmop_update
["detailed-status"] = "Done"
4688 nslcmop_operation_state
= "COMPLETED"
4691 self
._write
_ns
_status
(
4694 current_operation
="IDLE",
4695 current_operation_id
=None,
4696 error_description
=error_description_nsr
,
4697 error_detail
=error_detail
,
4698 other_update
=db_nsr_update
,
4700 self
._write
_op
_status
(
4703 error_message
=error_description_nslcmop
,
4704 operation_state
=nslcmop_operation_state
,
4705 other_update
=db_nslcmop_update
,
4707 if ns_state
== "NOT_INSTANTIATED":
4711 {"nsr-id-ref": nsr_id
},
4712 {"_admin.nsState": "NOT_INSTANTIATED"},
4714 except DbException
as e
:
4717 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4721 if operation_params
:
4722 autoremove
= operation_params
.get("autoremove", False)
4723 if nslcmop_operation_state
:
4725 await self
.msg
.aiowrite(
4730 "nslcmop_id": nslcmop_id
,
4731 "operationState": nslcmop_operation_state
,
4732 "autoremove": autoremove
,
4735 except Exception as e
:
4737 logging_text
+ "kafka_write notification Exception {}".format(e
)
4739 self
.logger
.debug(f
"Deleting alerts: ns_id={nsr_id}")
4740 self
.db
.del_list("alerts", {"tags.ns_id": nsr_id
})
4742 self
.logger
.debug(logging_text
+ "Exit")
4743 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_terminate")
4745 async def _wait_for_tasks(
4746 self
, logging_text
, created_tasks_info
, timeout
, stage
, nslcmop_id
, nsr_id
=None
4749 error_detail_list
= []
4751 pending_tasks
= list(created_tasks_info
.keys())
4752 num_tasks
= len(pending_tasks
)
4754 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4755 self
._write
_op
_status
(nslcmop_id
, stage
)
4756 while pending_tasks
:
4758 _timeout
= timeout
+ time_start
- time()
4759 done
, pending_tasks
= await asyncio
.wait(
4760 pending_tasks
, timeout
=_timeout
, return_when
=asyncio
.FIRST_COMPLETED
4762 num_done
+= len(done
)
4763 if not done
: # Timeout
4764 for task
in pending_tasks
:
4765 new_error
= created_tasks_info
[task
] + ": Timeout"
4766 error_detail_list
.append(new_error
)
4767 error_list
.append(new_error
)
4770 if task
.cancelled():
4773 exc
= task
.exception()
4775 if isinstance(exc
, asyncio
.TimeoutError
):
4777 new_error
= created_tasks_info
[task
] + ": {}".format(exc
)
4778 error_list
.append(created_tasks_info
[task
])
4779 error_detail_list
.append(new_error
)
4786 ROclient
.ROClientException
,
4792 self
.logger
.error(logging_text
+ new_error
)
4794 exc_traceback
= "".join(
4795 traceback
.format_exception(None, exc
, exc
.__traceback
__)
4799 + created_tasks_info
[task
]
4805 logging_text
+ created_tasks_info
[task
] + ": Done"
4807 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4809 stage
[1] += " Errors: " + ". ".join(error_detail_list
) + "."
4810 if nsr_id
: # update also nsr
4815 "errorDescription": "Error at: " + ", ".join(error_list
),
4816 "errorDetail": ". ".join(error_detail_list
),
4819 self
._write
_op
_status
(nslcmop_id
, stage
)
4820 return error_detail_list
4822 async def _cancel_pending_tasks(self
, logging_text
, created_tasks_info
):
4823 for task
, name
in created_tasks_info
.items():
4824 self
.logger
.debug(logging_text
+ "Cancelling task: " + name
)
4828 def _map_primitive_params(primitive_desc
, params
, instantiation_params
):
4830 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4831 The default-value is used. If it is between < > it look for a value at instantiation_params
4832 :param primitive_desc: portion of VNFD/NSD that describes primitive
4833 :param params: Params provided by user
4834 :param instantiation_params: Instantiation params provided by user
4835 :return: a dictionary with the calculated params
4837 calculated_params
= {}
4838 for parameter
in primitive_desc
.get("parameter", ()):
4839 param_name
= parameter
["name"]
4840 if param_name
in params
:
4841 calculated_params
[param_name
] = params
[param_name
]
4842 elif "default-value" in parameter
or "value" in parameter
:
4843 if "value" in parameter
:
4844 calculated_params
[param_name
] = parameter
["value"]
4846 calculated_params
[param_name
] = parameter
["default-value"]
4848 isinstance(calculated_params
[param_name
], str)
4849 and calculated_params
[param_name
].startswith("<")
4850 and calculated_params
[param_name
].endswith(">")
4852 if calculated_params
[param_name
][1:-1] in instantiation_params
:
4853 calculated_params
[param_name
] = instantiation_params
[
4854 calculated_params
[param_name
][1:-1]
4858 "Parameter {} needed to execute primitive {} not provided".format(
4859 calculated_params
[param_name
], primitive_desc
["name"]
4864 "Parameter {} needed to execute primitive {} not provided".format(
4865 param_name
, primitive_desc
["name"]
4869 if isinstance(calculated_params
[param_name
], (dict, list, tuple)):
4870 calculated_params
[param_name
] = yaml
.safe_dump(
4871 calculated_params
[param_name
], default_flow_style
=True, width
=256
4873 elif isinstance(calculated_params
[param_name
], str) and calculated_params
[
4875 ].startswith("!!yaml "):
4876 calculated_params
[param_name
] = calculated_params
[param_name
][7:]
4877 if parameter
.get("data-type") == "INTEGER":
4879 calculated_params
[param_name
] = int(calculated_params
[param_name
])
4880 except ValueError: # error converting string to int
4882 "Parameter {} of primitive {} must be integer".format(
4883 param_name
, primitive_desc
["name"]
4886 elif parameter
.get("data-type") == "BOOLEAN":
4887 calculated_params
[param_name
] = not (
4888 (str(calculated_params
[param_name
])).lower() == "false"
4891 # add always ns_config_info if primitive name is config
4892 if primitive_desc
["name"] == "config":
4893 if "ns_config_info" in instantiation_params
:
4894 calculated_params
["ns_config_info"] = instantiation_params
[
4897 return calculated_params
4899 def _look_for_deployed_vca(
4906 ee_descriptor_id
=None,
4908 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4909 for vca
in deployed_vca
:
4912 if member_vnf_index
!= vca
["member-vnf-index"] or vdu_id
!= vca
["vdu_id"]:
4915 vdu_count_index
is not None
4916 and vdu_count_index
!= vca
["vdu_count_index"]
4919 if kdu_name
and kdu_name
!= vca
["kdu_name"]:
4921 if ee_descriptor_id
and ee_descriptor_id
!= vca
["ee_descriptor_id"]:
4925 # vca_deployed not found
4927 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4928 " is not deployed".format(
4937 ee_id
= vca
.get("ee_id")
4939 "type", "lxc_proxy_charm"
4940 ) # default value for backward compatibility - proxy charm
4943 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4944 "execution environment".format(
4945 member_vnf_index
, vdu_id
, kdu_name
, vdu_count_index
4948 return ee_id
, vca_type
4950 async def _ns_execute_primitive(
4956 retries_interval
=30,
4963 if primitive
== "config":
4964 primitive_params
= {"params": primitive_params
}
4966 vca_type
= vca_type
or "lxc_proxy_charm"
4970 output
= await asyncio
.wait_for(
4971 self
.vca_map
[vca_type
].exec_primitive(
4973 primitive_name
=primitive
,
4974 params_dict
=primitive_params
,
4975 progress_timeout
=self
.timeout
.progress_primitive
,
4976 total_timeout
=self
.timeout
.primitive
,
4981 timeout
=timeout
or self
.timeout
.primitive
,
4985 except asyncio
.CancelledError
:
4987 except Exception as e
:
4991 "Error executing action {} on {} -> {}".format(
4996 await asyncio
.sleep(retries_interval
)
4998 if isinstance(e
, asyncio
.TimeoutError
):
5000 message
="Timed out waiting for action to complete"
5002 return "FAILED", getattr(e
, "message", repr(e
))
5004 return "COMPLETED", output
5006 except (LcmException
, asyncio
.CancelledError
):
5008 except Exception as e
:
5009 return "FAIL", "Error executing action {}: {}".format(primitive
, e
)
5011 async def vca_status_refresh(self
, nsr_id
, nslcmop_id
):
5013 Updating the vca_status with latest juju information in nsrs record
5014 :param: nsr_id: Id of the nsr
5015 :param: nslcmop_id: Id of the nslcmop
5019 self
.logger
.debug("Task ns={} action={} Enter".format(nsr_id
, nslcmop_id
))
5020 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5021 vca_id
= self
.get_vca_id({}, db_nsr
)
5022 if db_nsr
["_admin"]["deployed"]["K8s"]:
5023 for _
, k8s
in enumerate(db_nsr
["_admin"]["deployed"]["K8s"]):
5024 cluster_uuid
, kdu_instance
, cluster_type
= (
5025 k8s
["k8scluster-uuid"],
5026 k8s
["kdu-instance"],
5027 k8s
["k8scluster-type"],
5029 await self
._on
_update
_k
8s
_db
(
5030 cluster_uuid
=cluster_uuid
,
5031 kdu_instance
=kdu_instance
,
5032 filter={"_id": nsr_id
},
5034 cluster_type
=cluster_type
,
5036 if db_nsr
["_admin"]["deployed"]["VCA"]:
5037 for vca_index
, _
in enumerate(db_nsr
["_admin"]["deployed"]["VCA"]):
5038 table
, filter = "nsrs", {"_id": nsr_id
}
5039 path
= "_admin.deployed.VCA.{}.".format(vca_index
)
5040 await self
._on
_update
_n
2vc
_db
(table
, filter, path
, {})
5042 self
.logger
.debug("Task ns={} action={} Exit".format(nsr_id
, nslcmop_id
))
5043 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_vca_status_refresh")
5045 async def action(self
, nsr_id
, nslcmop_id
):
5046 # Try to lock HA task here
5047 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5048 if not task_is_locked_by_me
:
5051 logging_text
= "Task ns={} action={} ".format(nsr_id
, nslcmop_id
)
5052 self
.logger
.debug(logging_text
+ "Enter")
5053 # get all needed from database
5057 db_nslcmop_update
= {}
5058 nslcmop_operation_state
= None
5059 error_description_nslcmop
= None
5063 # wait for any previous tasks in process
5064 step
= "Waiting for previous operations to terminate"
5065 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5067 self
._write
_ns
_status
(
5070 current_operation
="RUNNING ACTION",
5071 current_operation_id
=nslcmop_id
,
5074 step
= "Getting information from database"
5075 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5076 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5077 if db_nslcmop
["operationParams"].get("primitive_params"):
5078 db_nslcmop
["operationParams"]["primitive_params"] = json
.loads(
5079 db_nslcmop
["operationParams"]["primitive_params"]
5082 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5083 vnf_index
= db_nslcmop
["operationParams"].get("member_vnf_index")
5084 vdu_id
= db_nslcmop
["operationParams"].get("vdu_id")
5085 kdu_name
= db_nslcmop
["operationParams"].get("kdu_name")
5086 vdu_count_index
= db_nslcmop
["operationParams"].get("vdu_count_index")
5087 primitive
= db_nslcmop
["operationParams"]["primitive"]
5088 primitive_params
= db_nslcmop
["operationParams"]["primitive_params"]
5089 timeout_ns_action
= db_nslcmop
["operationParams"].get(
5090 "timeout_ns_action", self
.timeout
.primitive
5094 step
= "Getting vnfr from database"
5095 db_vnfr
= self
.db
.get_one(
5096 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
5098 if db_vnfr
.get("kdur"):
5100 for kdur
in db_vnfr
["kdur"]:
5101 if kdur
.get("additionalParams"):
5102 kdur
["additionalParams"] = json
.loads(
5103 kdur
["additionalParams"]
5105 kdur_list
.append(kdur
)
5106 db_vnfr
["kdur"] = kdur_list
5107 step
= "Getting vnfd from database"
5108 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
5110 # Sync filesystem before running a primitive
5111 self
.fs
.sync(db_vnfr
["vnfd-id"])
5113 step
= "Getting nsd from database"
5114 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
5116 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
5117 # for backward compatibility
5118 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
5119 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
5120 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
5121 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5123 # look for primitive
5124 config_primitive_desc
= descriptor_configuration
= None
5126 descriptor_configuration
= get_configuration(db_vnfd
, vdu_id
)
5128 descriptor_configuration
= get_configuration(db_vnfd
, kdu_name
)
5130 descriptor_configuration
= get_configuration(db_vnfd
, db_vnfd
["id"])
5132 descriptor_configuration
= db_nsd
.get("ns-configuration")
5134 if descriptor_configuration
and descriptor_configuration
.get(
5137 for config_primitive
in descriptor_configuration
["config-primitive"]:
5138 if config_primitive
["name"] == primitive
:
5139 config_primitive_desc
= config_primitive
5142 if not config_primitive_desc
:
5143 if not (kdu_name
and primitive
in ("upgrade", "rollback", "status")):
5145 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5149 primitive_name
= primitive
5150 ee_descriptor_id
= None
5152 primitive_name
= config_primitive_desc
.get(
5153 "execution-environment-primitive", primitive
5155 ee_descriptor_id
= config_primitive_desc
.get(
5156 "execution-environment-ref"
5162 (x
for x
in db_vnfr
["vdur"] if x
["vdu-id-ref"] == vdu_id
), None
5164 desc_params
= parse_yaml_strings(vdur
.get("additionalParams"))
5167 (x
for x
in db_vnfr
["kdur"] if x
["kdu-name"] == kdu_name
), None
5169 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
5171 desc_params
= parse_yaml_strings(
5172 db_vnfr
.get("additionalParamsForVnf")
5175 desc_params
= parse_yaml_strings(db_nsr
.get("additionalParamsForNs"))
5176 if kdu_name
and get_configuration(db_vnfd
, kdu_name
):
5177 kdu_configuration
= get_configuration(db_vnfd
, kdu_name
)
5179 for primitive
in kdu_configuration
.get("initial-config-primitive", []):
5180 actions
.add(primitive
["name"])
5181 for primitive
in kdu_configuration
.get("config-primitive", []):
5182 actions
.add(primitive
["name"])
5184 nsr_deployed
["K8s"],
5185 lambda kdu
: kdu_name
== kdu
["kdu-name"]
5186 and kdu
["member-vnf-index"] == vnf_index
,
5190 if primitive_name
in actions
5191 and kdu
["k8scluster-type"] != "helm-chart-v3"
5195 # TODO check if ns is in a proper status
5197 primitive_name
in ("upgrade", "rollback", "status") or kdu_action
5199 # kdur and desc_params already set from before
5200 if primitive_params
:
5201 desc_params
.update(primitive_params
)
5202 # TODO Check if we will need something at vnf level
5203 for index
, kdu
in enumerate(get_iterable(nsr_deployed
, "K8s")):
5205 kdu_name
== kdu
["kdu-name"]
5206 and kdu
["member-vnf-index"] == vnf_index
5211 "KDU '{}' for vnf '{}' not deployed".format(kdu_name
, vnf_index
)
5214 if kdu
.get("k8scluster-type") not in self
.k8scluster_map
:
5215 msg
= "unknown k8scluster-type '{}'".format(
5216 kdu
.get("k8scluster-type")
5218 raise LcmException(msg
)
5221 "collection": "nsrs",
5222 "filter": {"_id": nsr_id
},
5223 "path": "_admin.deployed.K8s.{}".format(index
),
5227 + "Exec k8s {} on {}.{}".format(primitive_name
, vnf_index
, kdu_name
)
5229 step
= "Executing kdu {}".format(primitive_name
)
5230 if primitive_name
== "upgrade":
5231 if desc_params
.get("kdu_model"):
5232 kdu_model
= desc_params
.get("kdu_model")
5233 del desc_params
["kdu_model"]
5235 kdu_model
= kdu
.get("kdu-model")
5236 if kdu_model
.count("/") < 2: # helm chart is not embedded
5237 parts
= kdu_model
.split(sep
=":")
5239 kdu_model
= parts
[0]
5240 if desc_params
.get("kdu_atomic_upgrade"):
5241 atomic_upgrade
= desc_params
.get(
5242 "kdu_atomic_upgrade"
5243 ).lower() in ("yes", "true", "1")
5244 del desc_params
["kdu_atomic_upgrade"]
5246 atomic_upgrade
= True
5248 detailed_status
= await asyncio
.wait_for(
5249 self
.k8scluster_map
[kdu
["k8scluster-type"]].upgrade(
5250 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5251 kdu_instance
=kdu
.get("kdu-instance"),
5252 atomic
=atomic_upgrade
,
5253 kdu_model
=kdu_model
,
5256 timeout
=timeout_ns_action
,
5258 timeout
=timeout_ns_action
+ 10,
5261 logging_text
+ " Upgrade of kdu {} done".format(detailed_status
)
5263 elif primitive_name
== "rollback":
5264 detailed_status
= await asyncio
.wait_for(
5265 self
.k8scluster_map
[kdu
["k8scluster-type"]].rollback(
5266 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5267 kdu_instance
=kdu
.get("kdu-instance"),
5270 timeout
=timeout_ns_action
,
5272 elif primitive_name
== "status":
5273 detailed_status
= await asyncio
.wait_for(
5274 self
.k8scluster_map
[kdu
["k8scluster-type"]].status_kdu(
5275 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5276 kdu_instance
=kdu
.get("kdu-instance"),
5279 timeout
=timeout_ns_action
,
5282 kdu_instance
= kdu
.get("kdu-instance") or "{}-{}".format(
5283 kdu
["kdu-name"], nsr_id
5285 params
= self
._map
_primitive
_params
(
5286 config_primitive_desc
, primitive_params
, desc_params
5289 detailed_status
= await asyncio
.wait_for(
5290 self
.k8scluster_map
[kdu
["k8scluster-type"]].exec_primitive(
5291 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5292 kdu_instance
=kdu_instance
,
5293 primitive_name
=primitive_name
,
5296 timeout
=timeout_ns_action
,
5299 timeout
=timeout_ns_action
,
5303 nslcmop_operation_state
= "COMPLETED"
5305 detailed_status
= ""
5306 nslcmop_operation_state
= "FAILED"
5308 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
5309 nsr_deployed
["VCA"],
5310 member_vnf_index
=vnf_index
,
5312 vdu_count_index
=vdu_count_index
,
5313 ee_descriptor_id
=ee_descriptor_id
,
5315 for vca_index
, vca_deployed
in enumerate(
5316 db_nsr
["_admin"]["deployed"]["VCA"]
5318 if vca_deployed
.get("member-vnf-index") == vnf_index
:
5320 "collection": "nsrs",
5321 "filter": {"_id": nsr_id
},
5322 "path": "_admin.deployed.VCA.{}.".format(vca_index
),
5326 nslcmop_operation_state
,
5328 ) = await self
._ns
_execute
_primitive
(
5330 primitive
=primitive_name
,
5331 primitive_params
=self
._map
_primitive
_params
(
5332 config_primitive_desc
, primitive_params
, desc_params
5334 timeout
=timeout_ns_action
,
5340 db_nslcmop_update
["detailed-status"] = detailed_status
5341 error_description_nslcmop
= (
5342 detailed_status
if nslcmop_operation_state
== "FAILED" else ""
5346 + "Done with result {} {}".format(
5347 nslcmop_operation_state
, detailed_status
5350 return # database update is called inside finally
5352 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
5353 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
5355 except asyncio
.CancelledError
:
5357 logging_text
+ "Cancelled Exception while '{}'".format(step
)
5359 exc
= "Operation was cancelled"
5360 except asyncio
.TimeoutError
:
5361 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
5363 except Exception as e
:
5364 exc
= traceback
.format_exc()
5365 self
.logger
.critical(
5366 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
5375 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
5376 nslcmop_operation_state
= "FAILED"
5378 self
._write
_ns
_status
(
5382 ], # TODO check if degraded. For the moment use previous status
5383 current_operation
="IDLE",
5384 current_operation_id
=None,
5385 # error_description=error_description_nsr,
5386 # error_detail=error_detail,
5387 other_update
=db_nsr_update
,
5390 self
._write
_op
_status
(
5393 error_message
=error_description_nslcmop
,
5394 operation_state
=nslcmop_operation_state
,
5395 other_update
=db_nslcmop_update
,
5398 if nslcmop_operation_state
:
5400 await self
.msg
.aiowrite(
5405 "nslcmop_id": nslcmop_id
,
5406 "operationState": nslcmop_operation_state
,
5409 except Exception as e
:
5411 logging_text
+ "kafka_write notification Exception {}".format(e
)
5413 self
.logger
.debug(logging_text
+ "Exit")
5414 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_action")
5415 return nslcmop_operation_state
, detailed_status
5417 async def terminate_vdus(
5418 self
, db_vnfr
, member_vnf_index
, db_nsr
, update_db_nslcmops
, stage
, logging_text
5420 """This method terminates VDUs
5423 db_vnfr: VNF instance record
5424 member_vnf_index: VNF index to identify the VDUs to be removed
5425 db_nsr: NS instance record
5426 update_db_nslcmops: Nslcmop update record
5428 vca_scaling_info
= []
5429 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5430 scaling_info
["scaling_direction"] = "IN"
5431 scaling_info
["vdu-delete"] = {}
5432 scaling_info
["kdu-delete"] = {}
5433 db_vdur
= db_vnfr
.get("vdur")
5434 vdur_list
= copy(db_vdur
)
5436 for index
, vdu
in enumerate(vdur_list
):
5437 vca_scaling_info
.append(
5439 "osm_vdu_id": vdu
["vdu-id-ref"],
5440 "member-vnf-index": member_vnf_index
,
5442 "vdu_index": count_index
,
5445 scaling_info
["vdu-delete"][vdu
["vdu-id-ref"]] = count_index
5446 scaling_info
["vdu"].append(
5448 "name": vdu
.get("name") or vdu
.get("vdu-name"),
5449 "vdu_id": vdu
["vdu-id-ref"],
5453 for interface
in vdu
["interfaces"]:
5454 scaling_info
["vdu"][index
]["interface"].append(
5456 "name": interface
["name"],
5457 "ip_address": interface
["ip-address"],
5458 "mac_address": interface
.get("mac-address"),
5461 self
.logger
.info("NS update scaling info{}".format(scaling_info
))
5462 stage
[2] = "Terminating VDUs"
5463 if scaling_info
.get("vdu-delete"):
5464 # scale_process = "RO"
5465 if self
.ro_config
.ng
:
5466 await self
._scale
_ng
_ro
(
5475 async def remove_vnf(self
, nsr_id
, nslcmop_id
, vnf_instance_id
):
5476 """This method is to Remove VNF instances from NS.
5479 nsr_id: NS instance id
5480 nslcmop_id: nslcmop id of update
5481 vnf_instance_id: id of the VNF instance to be removed
5484 result: (str, str) COMPLETED/FAILED, details
5488 logging_text
= "Task ns={} update ".format(nsr_id
)
5489 check_vnfr_count
= len(self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}))
5490 self
.logger
.info("check_vnfr_count {}".format(check_vnfr_count
))
5491 if check_vnfr_count
> 1:
5492 stage
= ["", "", ""]
5493 step
= "Getting nslcmop from database"
5495 step
+ " after having waited for previous tasks to be completed"
5497 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5498 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5499 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
5500 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5501 """ db_vnfr = self.db.get_one(
5502 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5504 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5505 await self
.terminate_vdus(
5514 constituent_vnfr
= db_nsr
.get("constituent-vnfr-ref")
5515 constituent_vnfr
.remove(db_vnfr
.get("_id"))
5516 db_nsr_update
["constituent-vnfr-ref"] = db_nsr
.get(
5517 "constituent-vnfr-ref"
5519 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5520 self
.db
.del_one("vnfrs", {"_id": db_vnfr
.get("_id")})
5521 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5522 return "COMPLETED", "Done"
5524 step
= "Terminate VNF Failed with"
5526 "{} Cannot terminate the last VNF in this NS.".format(
5530 except (LcmException
, asyncio
.CancelledError
):
5532 except Exception as e
:
5533 self
.logger
.debug("Error removing VNF {}".format(e
))
5534 return "FAILED", "Error removing VNF {}".format(e
)
5536 async def _ns_redeploy_vnf(
5544 """This method updates and redeploys VNF instances
5547 nsr_id: NS instance id
5548 nslcmop_id: nslcmop id
5549 db_vnfd: VNF descriptor
5550 db_vnfr: VNF instance record
5551 db_nsr: NS instance record
5554 result: (str, str) COMPLETED/FAILED, details
5558 stage
= ["", "", ""]
5559 logging_text
= "Task ns={} update ".format(nsr_id
)
5560 latest_vnfd_revision
= db_vnfd
["_admin"].get("revision")
5561 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5563 # Terminate old VNF resources
5564 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5565 await self
.terminate_vdus(
5574 # old_vnfd_id = db_vnfr["vnfd-id"]
5575 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5576 new_db_vnfd
= db_vnfd
5577 # new_vnfd_ref = new_db_vnfd["id"]
5578 # new_vnfd_id = vnfd_id
5582 for cp
in new_db_vnfd
.get("ext-cpd", ()):
5584 "name": cp
.get("id"),
5585 "connection-point-id": cp
.get("int-cpd", {}).get("cpd"),
5586 "connection-point-vdu-id": cp
.get("int-cpd", {}).get("vdu-id"),
5589 new_vnfr_cp
.append(vnf_cp
)
5590 new_vdur
= update_db_nslcmops
["operationParams"]["newVdur"]
5591 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5592 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5594 "revision": latest_vnfd_revision
,
5595 "connection-point": new_vnfr_cp
,
5599 self
.update_db_2("vnfrs", db_vnfr
["_id"], new_vnfr_update
)
5600 updated_db_vnfr
= self
.db
.get_one(
5602 {"member-vnf-index-ref": member_vnf_index
, "nsr-id-ref": nsr_id
},
5605 # Instantiate new VNF resources
5606 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5607 vca_scaling_info
= []
5608 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5609 scaling_info
["scaling_direction"] = "OUT"
5610 scaling_info
["vdu-create"] = {}
5611 scaling_info
["kdu-create"] = {}
5612 vdud_instantiate_list
= db_vnfd
["vdu"]
5613 for index
, vdud
in enumerate(vdud_instantiate_list
):
5614 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(vdud
, db_vnfd
)
5616 additional_params
= (
5617 self
._get
_vdu
_additional
_params
(updated_db_vnfr
, vdud
["id"])
5620 cloud_init_list
= []
5622 # TODO Information of its own ip is not available because db_vnfr is not updated.
5623 additional_params
["OSM"] = get_osm_params(
5624 updated_db_vnfr
, vdud
["id"], 1
5626 cloud_init_list
.append(
5627 self
._parse
_cloud
_init
(
5634 vca_scaling_info
.append(
5636 "osm_vdu_id": vdud
["id"],
5637 "member-vnf-index": member_vnf_index
,
5639 "vdu_index": count_index
,
5642 scaling_info
["vdu-create"][vdud
["id"]] = count_index
5643 if self
.ro_config
.ng
:
5645 "New Resources to be deployed: {}".format(scaling_info
)
5647 await self
._scale
_ng
_ro
(
5655 return "COMPLETED", "Done"
5656 except (LcmException
, asyncio
.CancelledError
):
5658 except Exception as e
:
5659 self
.logger
.debug("Error updating VNF {}".format(e
))
5660 return "FAILED", "Error updating VNF {}".format(e
)
5662 async def _ns_charm_upgrade(
5668 timeout
: float = None,
5670 """This method upgrade charms in VNF instances
5673 ee_id: Execution environment id
5674 path: Local path to the charm
5676 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5677 timeout: (Float) Timeout for the ns update operation
5680 result: (str, str) COMPLETED/FAILED, details
5683 charm_type
= charm_type
or "lxc_proxy_charm"
5684 output
= await self
.vca_map
[charm_type
].upgrade_charm(
5688 charm_type
=charm_type
,
5689 timeout
=timeout
or self
.timeout
.ns_update
,
5693 return "COMPLETED", output
5695 except (LcmException
, asyncio
.CancelledError
):
5698 except Exception as e
:
5699 self
.logger
.debug("Error upgrading charm {}".format(path
))
5701 return "FAILED", "Error upgrading charm {}: {}".format(path
, e
)
5703 async def update(self
, nsr_id
, nslcmop_id
):
5704 """Update NS according to different update types
5706 This method performs upgrade of VNF instances then updates the revision
5707 number in VNF record
5710 nsr_id: Network service will be updated
5711 nslcmop_id: ns lcm operation id
5714 It may raise DbException, LcmException, N2VCException, K8sException
5717 # Try to lock HA task here
5718 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5719 if not task_is_locked_by_me
:
5722 logging_text
= "Task ns={} update={} ".format(nsr_id
, nslcmop_id
)
5723 self
.logger
.debug(logging_text
+ "Enter")
5725 # Set the required variables to be filled up later
5727 db_nslcmop_update
= {}
5729 nslcmop_operation_state
= None
5731 error_description_nslcmop
= ""
5733 change_type
= "updated"
5734 detailed_status
= ""
5735 member_vnf_index
= None
5738 # wait for any previous tasks in process
5739 step
= "Waiting for previous operations to terminate"
5740 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5741 self
._write
_ns
_status
(
5744 current_operation
="UPDATING",
5745 current_operation_id
=nslcmop_id
,
5748 step
= "Getting nslcmop from database"
5749 db_nslcmop
= self
.db
.get_one(
5750 "nslcmops", {"_id": nslcmop_id
}, fail_on_empty
=False
5752 update_type
= db_nslcmop
["operationParams"]["updateType"]
5754 step
= "Getting nsr from database"
5755 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5756 old_operational_status
= db_nsr
["operational-status"]
5757 db_nsr_update
["operational-status"] = "updating"
5758 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5759 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5761 if update_type
== "CHANGE_VNFPKG":
5762 # Get the input parameters given through update request
5763 vnf_instance_id
= db_nslcmop
["operationParams"][
5764 "changeVnfPackageData"
5765 ].get("vnfInstanceId")
5767 vnfd_id
= db_nslcmop
["operationParams"]["changeVnfPackageData"].get(
5770 timeout_seconds
= db_nslcmop
["operationParams"].get("timeout_ns_update")
5772 step
= "Getting vnfr from database"
5773 db_vnfr
= self
.db
.get_one(
5774 "vnfrs", {"_id": vnf_instance_id
}, fail_on_empty
=False
5777 step
= "Getting vnfds from database"
5779 latest_vnfd
= self
.db
.get_one(
5780 "vnfds", {"_id": vnfd_id
}, fail_on_empty
=False
5782 latest_vnfd_revision
= latest_vnfd
["_admin"].get("revision")
5785 current_vnf_revision
= db_vnfr
.get("revision", 1)
5786 current_vnfd
= self
.db
.get_one(
5788 {"_id": vnfd_id
+ ":" + str(current_vnf_revision
)},
5789 fail_on_empty
=False,
5791 # Charm artifact paths will be filled up later
5793 current_charm_artifact_path
,
5794 target_charm_artifact_path
,
5795 charm_artifact_paths
,
5797 ) = ([], [], [], [])
5799 step
= "Checking if revision has changed in VNFD"
5800 if current_vnf_revision
!= latest_vnfd_revision
:
5801 change_type
= "policy_updated"
5803 # There is new revision of VNFD, update operation is required
5804 current_vnfd_path
= vnfd_id
+ ":" + str(current_vnf_revision
)
5805 latest_vnfd_path
= vnfd_id
+ ":" + str(latest_vnfd_revision
)
5807 step
= "Removing the VNFD packages if they exist in the local path"
5808 shutil
.rmtree(self
.fs
.path
+ current_vnfd_path
, ignore_errors
=True)
5809 shutil
.rmtree(self
.fs
.path
+ latest_vnfd_path
, ignore_errors
=True)
5811 step
= "Get the VNFD packages from FSMongo"
5812 self
.fs
.sync(from_path
=latest_vnfd_path
)
5813 self
.fs
.sync(from_path
=current_vnfd_path
)
5816 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5818 current_base_folder
= current_vnfd
["_admin"]["storage"]
5819 latest_base_folder
= latest_vnfd
["_admin"]["storage"]
5821 for vca_index
, vca_deployed
in enumerate(
5822 get_iterable(nsr_deployed
, "VCA")
5824 vnf_index
= db_vnfr
.get("member-vnf-index-ref")
5826 # Getting charm-id and charm-type
5827 if vca_deployed
.get("member-vnf-index") == vnf_index
:
5828 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
5829 vca_type
= vca_deployed
.get("type")
5830 vdu_count_index
= vca_deployed
.get("vdu_count_index")
5833 ee_id
= vca_deployed
.get("ee_id")
5835 step
= "Getting descriptor config"
5836 if current_vnfd
.get("kdu"):
5837 search_key
= "kdu_name"
5839 search_key
= "vnfd_id"
5841 entity_id
= vca_deployed
.get(search_key
)
5843 descriptor_config
= get_configuration(
5844 current_vnfd
, entity_id
5847 if "execution-environment-list" in descriptor_config
:
5848 ee_list
= descriptor_config
.get(
5849 "execution-environment-list", []
5854 # There could be several charm used in the same VNF
5855 for ee_item
in ee_list
:
5856 if ee_item
.get("juju"):
5857 step
= "Getting charm name"
5858 charm_name
= ee_item
["juju"].get("charm")
5860 step
= "Setting Charm artifact paths"
5861 current_charm_artifact_path
.append(
5862 get_charm_artifact_path(
5863 current_base_folder
,
5866 current_vnf_revision
,
5869 target_charm_artifact_path
.append(
5870 get_charm_artifact_path(
5874 latest_vnfd_revision
,
5877 elif ee_item
.get("helm-chart"):
5878 # add chart to list and all parameters
5879 step
= "Getting helm chart name"
5880 chart_name
= ee_item
.get("helm-chart")
5881 vca_type
= "helm-v3"
5882 step
= "Setting Helm chart artifact paths"
5884 helm_artifacts
.append(
5886 "current_artifact_path": get_charm_artifact_path(
5887 current_base_folder
,
5890 current_vnf_revision
,
5892 "target_artifact_path": get_charm_artifact_path(
5896 latest_vnfd_revision
,
5899 "vca_index": vca_index
,
5900 "vdu_index": vdu_count_index
,
5904 charm_artifact_paths
= zip(
5905 current_charm_artifact_path
, target_charm_artifact_path
5908 step
= "Checking if software version has changed in VNFD"
5909 if find_software_version(current_vnfd
) != find_software_version(
5912 step
= "Checking if existing VNF has charm"
5913 for current_charm_path
, target_charm_path
in list(
5914 charm_artifact_paths
5916 if current_charm_path
:
5918 "Software version change is not supported as VNF instance {} has charm.".format(
5923 step
= "Checking whether the descriptor has SFC"
5924 if db_nsr
.get("nsd", {}).get("vnffgd"):
5926 "Ns update is not allowed for NS with SFC"
5929 # There is no change in the charm package, then redeploy the VNF
5930 # based on new descriptor
5931 step
= "Redeploying VNF"
5932 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5933 (result
, detailed_status
) = await self
._ns
_redeploy
_vnf
(
5934 nsr_id
, nslcmop_id
, latest_vnfd
, db_vnfr
, db_nsr
5936 if result
== "FAILED":
5937 nslcmop_operation_state
= result
5938 error_description_nslcmop
= detailed_status
5939 old_operational_status
= "failed"
5940 db_nslcmop_update
["detailed-status"] = detailed_status
5941 db_nsr_update
["detailed-status"] = detailed_status
5942 scaling_aspect
= get_scaling_aspect(latest_vnfd
)
5943 scaling_group_desc
= db_nsr
.get("_admin").get(
5944 "scaling-group", None
5946 if scaling_group_desc
:
5947 for aspect
in scaling_aspect
:
5948 scaling_group_id
= aspect
.get("id")
5949 for scale_index
, scaling_group
in enumerate(
5952 if scaling_group
.get("name") == scaling_group_id
:
5954 "_admin.scaling-group.{}.nb-scale-op".format(
5960 + " step {} Done with result {} {}".format(
5961 step
, nslcmop_operation_state
, detailed_status
5966 step
= "Checking if any charm package has changed or not"
5967 for current_charm_path
, target_charm_path
in list(
5968 charm_artifact_paths
5972 and target_charm_path
5973 and self
.check_charm_hash_changed(
5974 current_charm_path
, target_charm_path
5977 step
= "Checking whether VNF uses juju bundle"
5978 if check_juju_bundle_existence(current_vnfd
):
5980 "Charm upgrade is not supported for the instance which"
5981 " uses juju-bundle: {}".format(
5982 check_juju_bundle_existence(current_vnfd
)
5986 step
= "Upgrading Charm"
5990 ) = await self
._ns
_charm
_upgrade
(
5993 charm_type
=vca_type
,
5994 path
=self
.fs
.path
+ target_charm_path
,
5995 timeout
=timeout_seconds
,
5998 if result
== "FAILED":
5999 nslcmop_operation_state
= result
6000 error_description_nslcmop
= detailed_status
6002 db_nslcmop_update
["detailed-status"] = detailed_status
6005 + " step {} Done with result {} {}".format(
6006 step
, nslcmop_operation_state
, detailed_status
6010 step
= "Updating policies"
6011 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6012 result
= "COMPLETED"
6013 detailed_status
= "Done"
6014 db_nslcmop_update
["detailed-status"] = "Done"
6017 for item
in helm_artifacts
:
6019 item
["current_artifact_path"]
6020 and item
["target_artifact_path"]
6021 and self
.check_charm_hash_changed(
6022 item
["current_artifact_path"],
6023 item
["target_artifact_path"],
6027 db_update_entry
= "_admin.deployed.VCA.{}.".format(
6030 vnfr_id
= db_vnfr
["_id"]
6031 osm_config
= {"osm": {"ns_id": nsr_id
, "vnf_id": vnfr_id
}}
6033 "collection": "nsrs",
6034 "filter": {"_id": nsr_id
},
6035 "path": db_update_entry
,
6037 vca_type
, namespace
, helm_id
= get_ee_id_parts(item
["ee_id"])
6038 await self
.vca_map
[vca_type
].upgrade_execution_environment(
6039 namespace
=namespace
,
6043 artifact_path
=item
["target_artifact_path"],
6046 vnf_id
= db_vnfr
.get("vnfd-ref")
6047 config_descriptor
= get_configuration(latest_vnfd
, vnf_id
)
6048 self
.logger
.debug("get ssh key block")
6052 ("config-access", "ssh-access", "required"),
6054 # Needed to inject a ssh key
6057 ("config-access", "ssh-access", "default-user"),
6060 "Install configuration Software, getting public ssh key"
6062 pub_key
= await self
.vca_map
[
6064 ].get_ee_ssh_public__key(
6065 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
6069 "Insert public key into VM user={} ssh_key={}".format(
6073 self
.logger
.debug(logging_text
+ step
)
6075 # wait for RO (ip-address) Insert pub_key into VM
6076 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
6086 initial_config_primitive_list
= config_descriptor
.get(
6087 "initial-config-primitive"
6089 config_primitive
= next(
6092 for p
in initial_config_primitive_list
6093 if p
["name"] == "config"
6097 if not config_primitive
:
6100 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
6102 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
6103 if db_vnfr
.get("additionalParamsForVnf"):
6104 deploy_params
.update(
6106 db_vnfr
["additionalParamsForVnf"].copy()
6109 primitive_params_
= self
._map
_primitive
_params
(
6110 config_primitive
, {}, deploy_params
6113 step
= "execute primitive '{}' params '{}'".format(
6114 config_primitive
["name"], primitive_params_
6116 self
.logger
.debug(logging_text
+ step
)
6117 await self
.vca_map
[vca_type
].exec_primitive(
6119 primitive_name
=config_primitive
["name"],
6120 params_dict
=primitive_params_
,
6126 step
= "Updating policies"
6127 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6128 detailed_status
= "Done"
6129 db_nslcmop_update
["detailed-status"] = "Done"
6131 # If nslcmop_operation_state is None, so any operation is not failed.
6132 if not nslcmop_operation_state
:
6133 nslcmop_operation_state
= "COMPLETED"
6135 # If update CHANGE_VNFPKG nslcmop_operation is successful
6136 # vnf revision need to be updated
6137 vnfr_update
["revision"] = latest_vnfd_revision
6138 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
6142 + " task Done with result {} {}".format(
6143 nslcmop_operation_state
, detailed_status
6146 elif update_type
== "REMOVE_VNF":
6147 # This part is included in https://osm.etsi.org/gerrit/11876
6148 vnf_instance_id
= db_nslcmop
["operationParams"]["removeVnfInstanceId"]
6149 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
6150 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6151 step
= "Removing VNF"
6152 (result
, detailed_status
) = await self
.remove_vnf(
6153 nsr_id
, nslcmop_id
, vnf_instance_id
6155 if result
== "FAILED":
6156 nslcmop_operation_state
= result
6157 error_description_nslcmop
= detailed_status
6158 db_nslcmop_update
["detailed-status"] = detailed_status
6159 change_type
= "vnf_terminated"
6160 if not nslcmop_operation_state
:
6161 nslcmop_operation_state
= "COMPLETED"
6164 + " task Done with result {} {}".format(
6165 nslcmop_operation_state
, detailed_status
6169 elif update_type
== "OPERATE_VNF":
6170 vnf_id
= db_nslcmop
["operationParams"]["operateVnfData"][
6173 operation_type
= db_nslcmop
["operationParams"]["operateVnfData"][
6176 additional_param
= db_nslcmop
["operationParams"]["operateVnfData"][
6179 (result
, detailed_status
) = await self
.rebuild_start_stop(
6180 nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
6182 if result
== "FAILED":
6183 nslcmop_operation_state
= result
6184 error_description_nslcmop
= detailed_status
6185 db_nslcmop_update
["detailed-status"] = detailed_status
6186 if not nslcmop_operation_state
:
6187 nslcmop_operation_state
= "COMPLETED"
6190 + " task Done with result {} {}".format(
6191 nslcmop_operation_state
, detailed_status
6194 elif update_type
== "VERTICAL_SCALE":
6196 "Prepare for VERTICAL_SCALE update operation {}".format(db_nslcmop
)
6198 # Get the input parameters given through update request
6199 vnf_instance_id
= db_nslcmop
["operationParams"]["verticalScaleVnf"].get(
6203 vnfd_id
= db_nslcmop
["operationParams"]["verticalScaleVnf"].get(
6206 step
= "Getting vnfr from database"
6207 db_vnfr
= self
.db
.get_one(
6208 "vnfrs", {"_id": vnf_instance_id
}, fail_on_empty
=False
6210 self
.logger
.debug(step
)
6211 step
= "Getting vnfds from database"
6212 self
.logger
.debug("Start" + step
)
6214 latest_vnfd
= self
.db
.get_one(
6215 "vnfds", {"_id": vnfd_id
}, fail_on_empty
=False
6217 latest_vnfd_revision
= latest_vnfd
["_admin"].get("revision")
6219 current_vnf_revision
= db_vnfr
.get("revision", 1)
6220 current_vnfd
= self
.db
.get_one(
6222 {"_id": vnfd_id
+ ":" + str(current_vnf_revision
)},
6223 fail_on_empty
=False,
6225 self
.logger
.debug("End" + step
)
6226 # verify flavor changes
6227 step
= "Checking for flavor change"
6228 if find_software_version(current_vnfd
) != find_software_version(
6231 self
.logger
.debug("Start" + step
)
6232 if current_vnfd
.get("virtual-compute-desc") == latest_vnfd
.get(
6233 "virtual-compute-desc"
6234 ) and current_vnfd
.get("virtual-storage-desc") == latest_vnfd
.get(
6235 "virtual-storage-desc"
6238 "No change in flavor check vnfd {}".format(vnfd_id
)
6242 "No change in software_version of vnfd {}".format(vnfd_id
)
6245 self
.logger
.debug("End" + step
)
6247 (result
, detailed_status
) = await self
.vertical_scale(
6251 "vertical_scale result: {} detailed_status :{}".format(
6252 result
, detailed_status
6255 if result
== "FAILED":
6256 nslcmop_operation_state
= result
6257 error_description_nslcmop
= detailed_status
6258 db_nslcmop_update
["detailed-status"] = detailed_status
6259 if not nslcmop_operation_state
:
6260 nslcmop_operation_state
= "COMPLETED"
6263 + " task Done with result {} {}".format(
6264 nslcmop_operation_state
, detailed_status
6268 # If nslcmop_operation_state is None, so any operation is not failed.
6269 # All operations are executed in overall.
6270 if not nslcmop_operation_state
:
6271 nslcmop_operation_state
= "COMPLETED"
6272 db_nsr_update
["operational-status"] = old_operational_status
6274 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
6275 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
6277 except asyncio
.CancelledError
:
6279 logging_text
+ "Cancelled Exception while '{}'".format(step
)
6281 exc
= "Operation was cancelled"
6282 except asyncio
.TimeoutError
:
6283 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
6285 except Exception as e
:
6286 exc
= traceback
.format_exc()
6287 self
.logger
.critical(
6288 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
6297 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
6298 nslcmop_operation_state
= "FAILED"
6299 db_nsr_update
["operational-status"] = old_operational_status
6301 self
._write
_ns
_status
(
6303 ns_state
=db_nsr
["nsState"],
6304 current_operation
="IDLE",
6305 current_operation_id
=None,
6306 other_update
=db_nsr_update
,
6309 self
._write
_op
_status
(
6312 error_message
=error_description_nslcmop
,
6313 operation_state
=nslcmop_operation_state
,
6314 other_update
=db_nslcmop_update
,
6317 if nslcmop_operation_state
:
6321 "nslcmop_id": nslcmop_id
,
6322 "operationState": nslcmop_operation_state
,
6325 change_type
in ("vnf_terminated", "policy_updated")
6326 and member_vnf_index
6328 msg
.update({"vnf_member_index": member_vnf_index
})
6329 await self
.msg
.aiowrite("ns", change_type
, msg
)
6330 except Exception as e
:
6332 logging_text
+ "kafka_write notification Exception {}".format(e
)
6334 self
.logger
.debug(logging_text
+ "Exit")
6335 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_update")
6336 return nslcmop_operation_state
, detailed_status
6338 async def scale(self
, nsr_id
, nslcmop_id
):
6339 # Try to lock HA task here
6340 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
6341 if not task_is_locked_by_me
:
6344 logging_text
= "Task ns={} scale={} ".format(nsr_id
, nslcmop_id
)
6345 stage
= ["", "", ""]
6346 tasks_dict_info
= {}
6347 # ^ stage, step, VIM progress
6348 self
.logger
.debug(logging_text
+ "Enter")
6349 # get all needed from database
6351 db_nslcmop_update
= {}
6354 # in case of error, indicates what part of scale was failed to put nsr at error status
6355 scale_process
= None
6356 old_operational_status
= ""
6357 old_config_status
= ""
6361 # wait for any previous tasks in process
6362 step
= "Waiting for previous operations to terminate"
6363 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
6364 self
._write
_ns
_status
(
6367 current_operation
="SCALING",
6368 current_operation_id
=nslcmop_id
,
6371 step
= "Getting nslcmop from database"
6373 step
+ " after having waited for previous tasks to be completed"
6375 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
6377 step
= "Getting nsr from database"
6378 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
6379 old_operational_status
= db_nsr
["operational-status"]
6380 old_config_status
= db_nsr
["config-status"]
6382 step
= "Checking whether the descriptor has SFC"
6383 if db_nsr
.get("nsd", {}).get("vnffgd"):
6384 raise LcmException("Scaling is not allowed for NS with SFC")
6386 step
= "Parsing scaling parameters"
6387 db_nsr_update
["operational-status"] = "scaling"
6388 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6389 nsr_deployed
= db_nsr
["_admin"].get("deployed")
6391 vnf_index
= db_nslcmop
["operationParams"]["scaleVnfData"][
6393 ]["member-vnf-index"]
6394 scaling_group
= db_nslcmop
["operationParams"]["scaleVnfData"][
6396 ]["scaling-group-descriptor"]
6397 scaling_type
= db_nslcmop
["operationParams"]["scaleVnfData"]["scaleVnfType"]
6398 # for backward compatibility
6399 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
6400 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
6401 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
6402 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6404 step
= "Getting vnfr from database"
6405 db_vnfr
= self
.db
.get_one(
6406 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
6409 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
6411 step
= "Getting vnfd from database"
6412 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
6414 base_folder
= db_vnfd
["_admin"]["storage"]
6416 step
= "Getting scaling-group-descriptor"
6417 scaling_descriptor
= find_in_list(
6418 get_scaling_aspect(db_vnfd
),
6419 lambda scale_desc
: scale_desc
["name"] == scaling_group
,
6421 if not scaling_descriptor
:
6423 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6424 "at vnfd:scaling-group-descriptor".format(scaling_group
)
6427 step
= "Sending scale order to VIM"
6428 # TODO check if ns is in a proper status
6430 if not db_nsr
["_admin"].get("scaling-group"):
6435 "_admin.scaling-group": [
6437 "name": scaling_group
,
6438 "vnf_index": vnf_index
,
6444 admin_scale_index
= 0
6446 for admin_scale_index
, admin_scale_info
in enumerate(
6447 db_nsr
["_admin"]["scaling-group"]
6450 admin_scale_info
["name"] == scaling_group
6451 and admin_scale_info
["vnf_index"] == vnf_index
6453 nb_scale_op
= admin_scale_info
.get("nb-scale-op", 0)
6455 else: # not found, set index one plus last element and add new entry with the name
6456 admin_scale_index
+= 1
6458 "_admin.scaling-group.{}.name".format(admin_scale_index
)
6461 "_admin.scaling-group.{}.vnf_index".format(admin_scale_index
)
6464 vca_scaling_info
= []
6465 scaling_info
= {"scaling_group_name": scaling_group
, "vdu": [], "kdu": []}
6466 if scaling_type
== "SCALE_OUT":
6467 if "aspect-delta-details" not in scaling_descriptor
:
6469 "Aspect delta details not fount in scaling descriptor {}".format(
6470 scaling_descriptor
["name"]
6473 # count if max-instance-count is reached
6474 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6476 scaling_info
["scaling_direction"] = "OUT"
6477 scaling_info
["vdu-create"] = {}
6478 scaling_info
["kdu-create"] = {}
6479 for delta
in deltas
:
6480 for vdu_delta
in delta
.get("vdu-delta", {}):
6481 vdud
= get_vdu(db_vnfd
, vdu_delta
["id"])
6482 # vdu_index also provides the number of instance of the targeted vdu
6483 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6484 if vdu_index
<= len(db_vnfr
["vdur"]):
6485 vdu_name_id
= db_vnfr
["vdur"][vdu_index
- 1]["vdu-name"]
6487 db_vnfr
["_id"] + vdu_name_id
+ str(vdu_index
- 1)
6489 prom_job_name
= prom_job_name
.replace("_", "")
6490 prom_job_name
= prom_job_name
.replace("-", "")
6492 prom_job_name
= None
6493 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(
6497 additional_params
= (
6498 self
._get
_vdu
_additional
_params
(db_vnfr
, vdud
["id"])
6501 cloud_init_list
= []
6503 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6504 max_instance_count
= 10
6505 if vdu_profile
and "max-number-of-instances" in vdu_profile
:
6506 max_instance_count
= vdu_profile
.get(
6507 "max-number-of-instances", 10
6510 default_instance_num
= get_number_of_instances(
6513 instances_number
= vdu_delta
.get("number-of-instances", 1)
6514 nb_scale_op
+= instances_number
6516 new_instance_count
= nb_scale_op
+ default_instance_num
6517 # Control if new count is over max and vdu count is less than max.
6518 # Then assign new instance count
6519 if new_instance_count
> max_instance_count
> vdu_count
:
6520 instances_number
= new_instance_count
- max_instance_count
6522 instances_number
= instances_number
6524 if new_instance_count
> max_instance_count
:
6526 "reached the limit of {} (max-instance-count) "
6527 "scaling-out operations for the "
6528 "scaling-group-descriptor '{}'".format(
6529 nb_scale_op
, scaling_group
6532 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6534 # TODO Information of its own ip is not available because db_vnfr is not updated.
6535 additional_params
["OSM"] = get_osm_params(
6536 db_vnfr
, vdu_delta
["id"], vdu_index
+ x
6538 cloud_init_list
.append(
6539 self
._parse
_cloud
_init
(
6546 vca_scaling_info
.append(
6548 "osm_vdu_id": vdu_delta
["id"],
6549 "member-vnf-index": vnf_index
,
6551 "vdu_index": vdu_index
+ x
,
6554 scaling_info
["vdu-create"][vdu_delta
["id"]] = instances_number
6555 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6556 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6557 kdu_name
= kdu_profile
["kdu-name"]
6558 resource_name
= kdu_profile
.get("resource-name", "")
6560 # Might have different kdus in the same delta
6561 # Should have list for each kdu
6562 if not scaling_info
["kdu-create"].get(kdu_name
, None):
6563 scaling_info
["kdu-create"][kdu_name
] = []
6565 kdur
= get_kdur(db_vnfr
, kdu_name
)
6566 if kdur
.get("helm-chart"):
6567 k8s_cluster_type
= "helm-chart-v3"
6568 self
.logger
.debug("kdur: {}".format(kdur
))
6569 elif kdur
.get("juju-bundle"):
6570 k8s_cluster_type
= "juju-bundle"
6573 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6574 "juju-bundle. Maybe an old NBI version is running".format(
6575 db_vnfr
["member-vnf-index-ref"], kdu_name
6579 max_instance_count
= 10
6580 if kdu_profile
and "max-number-of-instances" in kdu_profile
:
6581 max_instance_count
= kdu_profile
.get(
6582 "max-number-of-instances", 10
6585 nb_scale_op
+= kdu_delta
.get("number-of-instances", 1)
6586 deployed_kdu
, _
= get_deployed_kdu(
6587 nsr_deployed
, kdu_name
, vnf_index
6589 if deployed_kdu
is None:
6591 "KDU '{}' for vnf '{}' not deployed".format(
6595 kdu_instance
= deployed_kdu
.get("kdu-instance")
6596 instance_num
= await self
.k8scluster_map
[
6602 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6603 kdu_model
=deployed_kdu
.get("kdu-model"),
6605 kdu_replica_count
= instance_num
+ kdu_delta
.get(
6606 "number-of-instances", 1
6609 # Control if new count is over max and instance_num is less than max.
6610 # Then assign max instance number to kdu replica count
6611 if kdu_replica_count
> max_instance_count
> instance_num
:
6612 kdu_replica_count
= max_instance_count
6613 if kdu_replica_count
> max_instance_count
:
6615 "reached the limit of {} (max-instance-count) "
6616 "scaling-out operations for the "
6617 "scaling-group-descriptor '{}'".format(
6618 instance_num
, scaling_group
6622 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6623 vca_scaling_info
.append(
6625 "osm_kdu_id": kdu_name
,
6626 "member-vnf-index": vnf_index
,
6628 "kdu_index": instance_num
+ x
- 1,
6631 scaling_info
["kdu-create"][kdu_name
].append(
6633 "member-vnf-index": vnf_index
,
6635 "k8s-cluster-type": k8s_cluster_type
,
6636 "resource-name": resource_name
,
6637 "scale": kdu_replica_count
,
6640 elif scaling_type
== "SCALE_IN":
6641 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6643 scaling_info
["scaling_direction"] = "IN"
6644 scaling_info
["vdu-delete"] = {}
6645 scaling_info
["kdu-delete"] = {}
6647 for delta
in deltas
:
6648 for vdu_delta
in delta
.get("vdu-delta", {}):
6649 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6650 min_instance_count
= 0
6651 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6652 if vdu_profile
and "min-number-of-instances" in vdu_profile
:
6653 min_instance_count
= vdu_profile
["min-number-of-instances"]
6655 default_instance_num
= get_number_of_instances(
6656 db_vnfd
, vdu_delta
["id"]
6658 instance_num
= vdu_delta
.get("number-of-instances", 1)
6659 nb_scale_op
-= instance_num
6661 new_instance_count
= nb_scale_op
+ default_instance_num
6663 if new_instance_count
< min_instance_count
< vdu_count
:
6664 instances_number
= min_instance_count
- new_instance_count
6666 instances_number
= instance_num
6668 if new_instance_count
< min_instance_count
:
6670 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6671 "scaling-group-descriptor '{}'".format(
6672 nb_scale_op
, scaling_group
6675 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6676 vca_scaling_info
.append(
6678 "osm_vdu_id": vdu_delta
["id"],
6679 "member-vnf-index": vnf_index
,
6681 "vdu_index": vdu_index
- 1 - x
,
6684 scaling_info
["vdu-delete"][vdu_delta
["id"]] = instances_number
6685 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6686 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6687 kdu_name
= kdu_profile
["kdu-name"]
6688 resource_name
= kdu_profile
.get("resource-name", "")
6690 if not scaling_info
["kdu-delete"].get(kdu_name
, None):
6691 scaling_info
["kdu-delete"][kdu_name
] = []
6693 kdur
= get_kdur(db_vnfr
, kdu_name
)
6694 if kdur
.get("helm-chart"):
6695 k8s_cluster_type
= "helm-chart-v3"
6696 self
.logger
.debug("kdur: {}".format(kdur
))
6697 elif kdur
.get("juju-bundle"):
6698 k8s_cluster_type
= "juju-bundle"
6701 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6702 "juju-bundle. Maybe an old NBI version is running".format(
6703 db_vnfr
["member-vnf-index-ref"], kdur
["kdu-name"]
6707 min_instance_count
= 0
6708 if kdu_profile
and "min-number-of-instances" in kdu_profile
:
6709 min_instance_count
= kdu_profile
["min-number-of-instances"]
6711 nb_scale_op
-= kdu_delta
.get("number-of-instances", 1)
6712 deployed_kdu
, _
= get_deployed_kdu(
6713 nsr_deployed
, kdu_name
, vnf_index
6715 if deployed_kdu
is None:
6717 "KDU '{}' for vnf '{}' not deployed".format(
6721 kdu_instance
= deployed_kdu
.get("kdu-instance")
6722 instance_num
= await self
.k8scluster_map
[
6728 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6729 kdu_model
=deployed_kdu
.get("kdu-model"),
6731 kdu_replica_count
= instance_num
- kdu_delta
.get(
6732 "number-of-instances", 1
6735 if kdu_replica_count
< min_instance_count
< instance_num
:
6736 kdu_replica_count
= min_instance_count
6737 if kdu_replica_count
< min_instance_count
:
6739 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6740 "scaling-group-descriptor '{}'".format(
6741 instance_num
, scaling_group
6745 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6746 vca_scaling_info
.append(
6748 "osm_kdu_id": kdu_name
,
6749 "member-vnf-index": vnf_index
,
6751 "kdu_index": instance_num
- x
- 1,
6754 scaling_info
["kdu-delete"][kdu_name
].append(
6756 "member-vnf-index": vnf_index
,
6758 "k8s-cluster-type": k8s_cluster_type
,
6759 "resource-name": resource_name
,
6760 "scale": kdu_replica_count
,
6764 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6765 vdu_delete
= copy(scaling_info
.get("vdu-delete"))
6766 if scaling_info
["scaling_direction"] == "IN":
6767 for vdur
in reversed(db_vnfr
["vdur"]):
6768 if vdu_delete
.get(vdur
["vdu-id-ref"]):
6769 vdu_delete
[vdur
["vdu-id-ref"]] -= 1
6770 scaling_info
["vdu"].append(
6772 "name": vdur
.get("name") or vdur
.get("vdu-name"),
6773 "vdu_id": vdur
["vdu-id-ref"],
6777 for interface
in vdur
["interfaces"]:
6778 scaling_info
["vdu"][-1]["interface"].append(
6780 "name": interface
["name"],
6781 "ip_address": interface
["ip-address"],
6782 "mac_address": interface
.get("mac-address"),
6785 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6788 step
= "Executing pre-scale vnf-config-primitive"
6789 if scaling_descriptor
.get("scaling-config-action"):
6790 for scaling_config_action
in scaling_descriptor
[
6791 "scaling-config-action"
6794 scaling_config_action
.get("trigger") == "pre-scale-in"
6795 and scaling_type
== "SCALE_IN"
6797 scaling_config_action
.get("trigger") == "pre-scale-out"
6798 and scaling_type
== "SCALE_OUT"
6800 vnf_config_primitive
= scaling_config_action
[
6801 "vnf-config-primitive-name-ref"
6803 step
= db_nslcmop_update
[
6805 ] = "executing pre-scale scaling-config-action '{}'".format(
6806 vnf_config_primitive
6809 # look for primitive
6810 for config_primitive
in (
6811 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
6812 ).get("config-primitive", ()):
6813 if config_primitive
["name"] == vnf_config_primitive
:
6817 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6818 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6819 "primitive".format(scaling_group
, vnf_config_primitive
)
6822 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
6823 if db_vnfr
.get("additionalParamsForVnf"):
6824 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
6826 scale_process
= "VCA"
6827 db_nsr_update
["config-status"] = "configuring pre-scaling"
6828 primitive_params
= self
._map
_primitive
_params
(
6829 config_primitive
, {}, vnfr_params
6832 # Pre-scale retry check: Check if this sub-operation has been executed before
6833 op_index
= self
._check
_or
_add
_scale
_suboperation
(
6836 vnf_config_primitive
,
6840 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
6841 # Skip sub-operation
6842 result
= "COMPLETED"
6843 result_detail
= "Done"
6846 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6847 vnf_config_primitive
, result
, result_detail
6851 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
6852 # New sub-operation: Get index of this sub-operation
6854 len(db_nslcmop
.get("_admin", {}).get("operations"))
6859 + "vnf_config_primitive={} New sub-operation".format(
6860 vnf_config_primitive
6864 # retry: Get registered params for this existing sub-operation
6865 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
6868 vnf_index
= op
.get("member_vnf_index")
6869 vnf_config_primitive
= op
.get("primitive")
6870 primitive_params
= op
.get("primitive_params")
6873 + "vnf_config_primitive={} Sub-operation retry".format(
6874 vnf_config_primitive
6877 # Execute the primitive, either with new (first-time) or registered (reintent) args
6878 ee_descriptor_id
= config_primitive
.get(
6879 "execution-environment-ref"
6881 primitive_name
= config_primitive
.get(
6882 "execution-environment-primitive", vnf_config_primitive
6884 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
6885 nsr_deployed
["VCA"],
6886 member_vnf_index
=vnf_index
,
6888 vdu_count_index
=None,
6889 ee_descriptor_id
=ee_descriptor_id
,
6891 result
, result_detail
= await self
._ns
_execute
_primitive
(
6900 + "vnf_config_primitive={} Done with result {} {}".format(
6901 vnf_config_primitive
, result
, result_detail
6904 # Update operationState = COMPLETED | FAILED
6905 self
._update
_suboperation
_status
(
6906 db_nslcmop
, op_index
, result
, result_detail
6909 if result
== "FAILED":
6910 raise LcmException(result_detail
)
6911 db_nsr_update
["config-status"] = old_config_status
6912 scale_process
= None
6916 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index
)
6919 "_admin.scaling-group.{}.time".format(admin_scale_index
)
6922 # SCALE-IN VCA - BEGIN
6923 if vca_scaling_info
:
6924 step
= db_nslcmop_update
[
6926 ] = "Deleting the execution environments"
6927 scale_process
= "VCA"
6928 for vca_info
in vca_scaling_info
:
6929 if vca_info
["type"] == "delete" and not vca_info
.get("osm_kdu_id"):
6930 member_vnf_index
= str(vca_info
["member-vnf-index"])
6932 logging_text
+ "vdu info: {}".format(vca_info
)
6934 if vca_info
.get("osm_vdu_id"):
6935 vdu_id
= vca_info
["osm_vdu_id"]
6936 vdu_index
= int(vca_info
["vdu_index"])
6939 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6940 member_vnf_index
, vdu_id
, vdu_index
6942 stage
[2] = step
= "Scaling in VCA"
6943 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
6944 vca_update
= db_nsr
["_admin"]["deployed"]["VCA"]
6945 config_update
= db_nsr
["configurationStatus"]
6946 for vca_index
, vca
in enumerate(vca_update
):
6948 (vca
or vca
.get("ee_id"))
6949 and vca
["member-vnf-index"] == member_vnf_index
6950 and vca
["vdu_count_index"] == vdu_index
6952 if vca
.get("vdu_id"):
6953 config_descriptor
= get_configuration(
6954 db_vnfd
, vca
.get("vdu_id")
6956 elif vca
.get("kdu_name"):
6957 config_descriptor
= get_configuration(
6958 db_vnfd
, vca
.get("kdu_name")
6961 config_descriptor
= get_configuration(
6962 db_vnfd
, db_vnfd
["id"]
6964 operation_params
= (
6965 db_nslcmop
.get("operationParams") or {}
6967 exec_terminate_primitives
= not operation_params
.get(
6968 "skip_terminate_primitives"
6969 ) and vca
.get("needed_terminate")
6970 task
= asyncio
.ensure_future(
6979 exec_primitives
=exec_terminate_primitives
,
6983 timeout
=self
.timeout
.charm_delete
,
6986 tasks_dict_info
[task
] = "Terminating VCA {}".format(
6989 del vca_update
[vca_index
]
6990 del config_update
[vca_index
]
6991 # wait for pending tasks of terminate primitives
6995 + "Waiting for tasks {}".format(
6996 list(tasks_dict_info
.keys())
6999 error_list
= await self
._wait
_for
_tasks
(
7003 self
.timeout
.charm_delete
, self
.timeout
.ns_terminate
7008 tasks_dict_info
.clear()
7010 raise LcmException("; ".join(error_list
))
7012 db_vca_and_config_update
= {
7013 "_admin.deployed.VCA": vca_update
,
7014 "configurationStatus": config_update
,
7017 "nsrs", db_nsr
["_id"], db_vca_and_config_update
7019 scale_process
= None
7020 # SCALE-IN VCA - END
7023 if scaling_info
.get("vdu-create") or scaling_info
.get("vdu-delete"):
7024 scale_process
= "RO"
7025 if self
.ro_config
.ng
:
7026 await self
._scale
_ng
_ro
(
7027 logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, scaling_info
, stage
7029 scaling_info
.pop("vdu-create", None)
7030 scaling_info
.pop("vdu-delete", None)
7032 scale_process
= None
7036 if scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete"):
7037 scale_process
= "KDU"
7038 await self
._scale
_kdu
(
7039 logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
7041 scaling_info
.pop("kdu-create", None)
7042 scaling_info
.pop("kdu-delete", None)
7044 scale_process
= None
7048 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7050 # SCALE-UP VCA - BEGIN
7051 if vca_scaling_info
:
7052 step
= db_nslcmop_update
[
7054 ] = "Creating new execution environments"
7055 scale_process
= "VCA"
7056 for vca_info
in vca_scaling_info
:
7057 if vca_info
["type"] == "create" and not vca_info
.get("osm_kdu_id"):
7058 member_vnf_index
= str(vca_info
["member-vnf-index"])
7060 logging_text
+ "vdu info: {}".format(vca_info
)
7062 vnfd_id
= db_vnfr
["vnfd-ref"]
7063 if vca_info
.get("osm_vdu_id"):
7064 vdu_index
= int(vca_info
["vdu_index"])
7065 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
7066 if db_vnfr
.get("additionalParamsForVnf"):
7067 deploy_params
.update(
7069 db_vnfr
["additionalParamsForVnf"].copy()
7072 descriptor_config
= get_configuration(
7073 db_vnfd
, db_vnfd
["id"]
7075 if descriptor_config
:
7081 logging_text
=logging_text
7082 + "member_vnf_index={} ".format(member_vnf_index
),
7085 nslcmop_id
=nslcmop_id
,
7091 kdu_index
=kdu_index
,
7092 member_vnf_index
=member_vnf_index
,
7093 vdu_index
=vdu_index
,
7095 deploy_params
=deploy_params
,
7096 descriptor_config
=descriptor_config
,
7097 base_folder
=base_folder
,
7098 task_instantiation_info
=tasks_dict_info
,
7101 vdu_id
= vca_info
["osm_vdu_id"]
7102 vdur
= find_in_list(
7103 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
7105 descriptor_config
= get_configuration(db_vnfd
, vdu_id
)
7106 if vdur
.get("additionalParams"):
7107 deploy_params_vdu
= parse_yaml_strings(
7108 vdur
["additionalParams"]
7111 deploy_params_vdu
= deploy_params
7112 deploy_params_vdu
["OSM"] = get_osm_params(
7113 db_vnfr
, vdu_id
, vdu_count_index
=vdu_index
7115 if descriptor_config
:
7121 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7122 member_vnf_index
, vdu_id
, vdu_index
7124 stage
[2] = step
= "Scaling out VCA"
7125 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
7127 logging_text
=logging_text
7128 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7129 member_vnf_index
, vdu_id
, vdu_index
7133 nslcmop_id
=nslcmop_id
,
7139 member_vnf_index
=member_vnf_index
,
7140 vdu_index
=vdu_index
,
7141 kdu_index
=kdu_index
,
7143 deploy_params
=deploy_params_vdu
,
7144 descriptor_config
=descriptor_config
,
7145 base_folder
=base_folder
,
7146 task_instantiation_info
=tasks_dict_info
,
7149 # SCALE-UP VCA - END
7150 scale_process
= None
7153 # execute primitive service POST-SCALING
7154 step
= "Executing post-scale vnf-config-primitive"
7155 if scaling_descriptor
.get("scaling-config-action"):
7156 for scaling_config_action
in scaling_descriptor
[
7157 "scaling-config-action"
7160 scaling_config_action
.get("trigger") == "post-scale-in"
7161 and scaling_type
== "SCALE_IN"
7163 scaling_config_action
.get("trigger") == "post-scale-out"
7164 and scaling_type
== "SCALE_OUT"
7166 vnf_config_primitive
= scaling_config_action
[
7167 "vnf-config-primitive-name-ref"
7169 step
= db_nslcmop_update
[
7171 ] = "executing post-scale scaling-config-action '{}'".format(
7172 vnf_config_primitive
7175 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
7176 if db_vnfr
.get("additionalParamsForVnf"):
7177 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
7179 # look for primitive
7180 for config_primitive
in (
7181 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
7182 ).get("config-primitive", ()):
7183 if config_primitive
["name"] == vnf_config_primitive
:
7187 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7188 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7189 "config-primitive".format(
7190 scaling_group
, vnf_config_primitive
7193 scale_process
= "VCA"
7194 db_nsr_update
["config-status"] = "configuring post-scaling"
7195 primitive_params
= self
._map
_primitive
_params
(
7196 config_primitive
, {}, vnfr_params
7199 # Post-scale retry check: Check if this sub-operation has been executed before
7200 op_index
= self
._check
_or
_add
_scale
_suboperation
(
7203 vnf_config_primitive
,
7207 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
7208 # Skip sub-operation
7209 result
= "COMPLETED"
7210 result_detail
= "Done"
7213 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7214 vnf_config_primitive
, result
, result_detail
7218 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
7219 # New sub-operation: Get index of this sub-operation
7221 len(db_nslcmop
.get("_admin", {}).get("operations"))
7226 + "vnf_config_primitive={} New sub-operation".format(
7227 vnf_config_primitive
7231 # retry: Get registered params for this existing sub-operation
7232 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
7235 vnf_index
= op
.get("member_vnf_index")
7236 vnf_config_primitive
= op
.get("primitive")
7237 primitive_params
= op
.get("primitive_params")
7240 + "vnf_config_primitive={} Sub-operation retry".format(
7241 vnf_config_primitive
7244 # Execute the primitive, either with new (first-time) or registered (reintent) args
7245 ee_descriptor_id
= config_primitive
.get(
7246 "execution-environment-ref"
7248 primitive_name
= config_primitive
.get(
7249 "execution-environment-primitive", vnf_config_primitive
7251 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
7252 nsr_deployed
["VCA"],
7253 member_vnf_index
=vnf_index
,
7255 vdu_count_index
=None,
7256 ee_descriptor_id
=ee_descriptor_id
,
7258 result
, result_detail
= await self
._ns
_execute
_primitive
(
7267 + "vnf_config_primitive={} Done with result {} {}".format(
7268 vnf_config_primitive
, result
, result_detail
7271 # Update operationState = COMPLETED | FAILED
7272 self
._update
_suboperation
_status
(
7273 db_nslcmop
, op_index
, result
, result_detail
7276 if result
== "FAILED":
7277 raise LcmException(result_detail
)
7278 db_nsr_update
["config-status"] = old_config_status
7279 scale_process
= None
7281 # Check if each vnf has exporter for metric collection if so update prometheus job records
7282 if scaling_type
== "SCALE_OUT":
7283 if "exporters-endpoints" in db_vnfd
.get("df")[0]:
7284 vnfr_id
= db_vnfr
["id"]
7285 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
7286 exporter_config
= db_vnfd
.get("df")[0].get("exporters-endpoints")
7287 self
.logger
.debug("exporter config :{}".format(exporter_config
))
7288 artifact_path
= "{}/{}/{}".format(
7289 base_folder
["folder"],
7290 base_folder
["pkg-dir"],
7291 "exporter-endpoint",
7294 ee_config_descriptor
= exporter_config
7295 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
7299 vdu_id
=db_vnfr
["vdur"][-1]["vdu-id-ref"],
7300 vdu_index
=db_vnfr
["vdur"][-1]["count-index"],
7304 self
.logger
.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip
))
7305 self
.logger
.debug("Artifact_path:{}".format(artifact_path
))
7306 vdu_id_for_prom
= None
7307 vdu_index_for_prom
= None
7308 for x
in get_iterable(db_vnfr
, "vdur"):
7309 vdu_id_for_prom
= x
.get("vdu-id-ref")
7310 vdu_index_for_prom
= x
.get("count-index")
7311 vnfr_id
= vnfr_id
+ vdu_id
+ str(vdu_index
)
7312 vnfr_id
= vnfr_id
.replace("_", "")
7313 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
7315 artifact_path
=artifact_path
,
7316 ee_config_descriptor
=ee_config_descriptor
,
7319 target_ip
=rw_mgmt_ip
,
7321 vdu_id
=vdu_id_for_prom
,
7322 vdu_index
=vdu_index_for_prom
,
7325 self
.logger
.debug("Prometheus job:{}".format(prometheus_jobs
))
7328 "_admin.deployed.prometheus_jobs"
7336 for job
in prometheus_jobs
:
7342 fail_on_empty
=False,
7346 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7347 db_nsr_update
["operational-status"] = (
7349 if old_operational_status
== "failed"
7350 else old_operational_status
7352 db_nsr_update
["config-status"] = old_config_status
7355 ROclient
.ROClientException
,
7360 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
7362 except asyncio
.CancelledError
:
7364 logging_text
+ "Cancelled Exception while '{}'".format(step
)
7366 exc
= "Operation was cancelled"
7367 except Exception as e
:
7368 exc
= traceback
.format_exc()
7369 self
.logger
.critical(
7370 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
7376 error_list
.append(str(exc
))
7377 self
._write
_ns
_status
(
7380 current_operation
="IDLE",
7381 current_operation_id
=None,
7385 stage
[1] = "Waiting for instantiate pending tasks."
7386 self
.logger
.debug(logging_text
+ stage
[1])
7387 exc
= await self
._wait
_for
_tasks
(
7390 self
.timeout
.ns_deploy
,
7395 except asyncio
.CancelledError
:
7396 error_list
.append("Cancelled")
7397 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
7398 await self
._wait
_for
_tasks
(
7401 self
.timeout
.ns_deploy
,
7407 error_detail
= "; ".join(error_list
)
7410 ] = error_description_nslcmop
= "FAILED {}: {}".format(
7413 nslcmop_operation_state
= "FAILED"
7415 db_nsr_update
["operational-status"] = old_operational_status
7416 db_nsr_update
["config-status"] = old_config_status
7417 db_nsr_update
["detailed-status"] = ""
7419 if "VCA" in scale_process
:
7420 db_nsr_update
["config-status"] = "failed"
7421 if "RO" in scale_process
:
7422 db_nsr_update
["operational-status"] = "failed"
7425 ] = "FAILED scaling nslcmop={} {}: {}".format(
7426 nslcmop_id
, step
, error_detail
7429 error_description_nslcmop
= None
7430 nslcmop_operation_state
= "COMPLETED"
7431 db_nslcmop_update
["detailed-status"] = "Done"
7432 if scaling_type
== "SCALE_IN" and prom_job_name
is not None:
7435 {"job_name": prom_job_name
},
7436 fail_on_empty
=False,
7439 self
._write
_op
_status
(
7442 error_message
=error_description_nslcmop
,
7443 operation_state
=nslcmop_operation_state
,
7444 other_update
=db_nslcmop_update
,
7447 self
._write
_ns
_status
(
7450 current_operation
="IDLE",
7451 current_operation_id
=None,
7452 other_update
=db_nsr_update
,
7455 if nslcmop_operation_state
:
7459 "nslcmop_id": nslcmop_id
,
7460 "operationState": nslcmop_operation_state
,
7462 await self
.msg
.aiowrite("ns", "scaled", msg
)
7463 except Exception as e
:
7465 logging_text
+ "kafka_write notification Exception {}".format(e
)
7467 self
.logger
.debug(logging_text
+ "Exit")
7468 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_scale")
7470 async def _scale_kdu(
7471 self
, logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
7473 _scaling_info
= scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete")
7474 for kdu_name
in _scaling_info
:
7475 for kdu_scaling_info
in _scaling_info
[kdu_name
]:
7476 deployed_kdu
, index
= get_deployed_kdu(
7477 nsr_deployed
, kdu_name
, kdu_scaling_info
["member-vnf-index"]
7479 cluster_uuid
= deployed_kdu
["k8scluster-uuid"]
7480 kdu_instance
= deployed_kdu
["kdu-instance"]
7481 kdu_model
= deployed_kdu
.get("kdu-model")
7482 scale
= int(kdu_scaling_info
["scale"])
7483 k8s_cluster_type
= kdu_scaling_info
["k8s-cluster-type"]
7486 "collection": "nsrs",
7487 "filter": {"_id": nsr_id
},
7488 "path": "_admin.deployed.K8s.{}".format(index
),
7491 step
= "scaling application {}".format(
7492 kdu_scaling_info
["resource-name"]
7494 self
.logger
.debug(logging_text
+ step
)
7496 if kdu_scaling_info
["type"] == "delete":
7497 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7500 and kdu_config
.get("terminate-config-primitive")
7501 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7503 terminate_config_primitive_list
= kdu_config
.get(
7504 "terminate-config-primitive"
7506 terminate_config_primitive_list
.sort(
7507 key
=lambda val
: int(val
["seq"])
7511 terminate_config_primitive
7512 ) in terminate_config_primitive_list
:
7513 primitive_params_
= self
._map
_primitive
_params
(
7514 terminate_config_primitive
, {}, {}
7516 step
= "execute terminate config primitive"
7517 self
.logger
.debug(logging_text
+ step
)
7518 await asyncio
.wait_for(
7519 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7520 cluster_uuid
=cluster_uuid
,
7521 kdu_instance
=kdu_instance
,
7522 primitive_name
=terminate_config_primitive
["name"],
7523 params
=primitive_params_
,
7525 total_timeout
=self
.timeout
.primitive
,
7528 timeout
=self
.timeout
.primitive
7529 * self
.timeout
.primitive_outer_factor
,
7532 await asyncio
.wait_for(
7533 self
.k8scluster_map
[k8s_cluster_type
].scale(
7534 kdu_instance
=kdu_instance
,
7536 resource_name
=kdu_scaling_info
["resource-name"],
7537 total_timeout
=self
.timeout
.scale_on_error
,
7539 cluster_uuid
=cluster_uuid
,
7540 kdu_model
=kdu_model
,
7544 timeout
=self
.timeout
.scale_on_error
7545 * self
.timeout
.scale_on_error_outer_factor
,
7548 if kdu_scaling_info
["type"] == "create":
7549 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7552 and kdu_config
.get("initial-config-primitive")
7553 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7555 initial_config_primitive_list
= kdu_config
.get(
7556 "initial-config-primitive"
7558 initial_config_primitive_list
.sort(
7559 key
=lambda val
: int(val
["seq"])
7562 for initial_config_primitive
in initial_config_primitive_list
:
7563 primitive_params_
= self
._map
_primitive
_params
(
7564 initial_config_primitive
, {}, {}
7566 step
= "execute initial config primitive"
7567 self
.logger
.debug(logging_text
+ step
)
7568 await asyncio
.wait_for(
7569 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7570 cluster_uuid
=cluster_uuid
,
7571 kdu_instance
=kdu_instance
,
7572 primitive_name
=initial_config_primitive
["name"],
7573 params
=primitive_params_
,
7580 async def _scale_ng_ro(
7581 self
, logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, vdu_scaling_info
, stage
7583 nsr_id
= db_nslcmop
["nsInstanceId"]
7584 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
7587 # read from db: vnfd's for every vnf
7590 # for each vnf in ns, read vnfd
7591 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
7592 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
7593 vnfd_id
= vnfr
["vnfd-id"] # vnfd uuid for this vnf
7594 # if we haven't this vnfd, read it from db
7595 if not find_in_list(db_vnfds
, lambda a_vnfd
: a_vnfd
["id"] == vnfd_id
):
7597 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
7598 db_vnfds
.append(vnfd
)
7599 n2vc_key
= self
.n2vc
.get_public_key()
7600 n2vc_key_list
= [n2vc_key
]
7603 vdu_scaling_info
.get("vdu-create"),
7604 vdu_scaling_info
.get("vdu-delete"),
7607 # db_vnfr has been updated, update db_vnfrs to use it
7608 db_vnfrs
[db_vnfr
["member-vnf-index-ref"]] = db_vnfr
7609 await self
._instantiate
_ng
_ro
(
7619 start_deploy
=time(),
7620 timeout_ns_deploy
=self
.timeout
.ns_deploy
,
7622 if vdu_scaling_info
.get("vdu-delete"):
7624 db_vnfr
, None, vdu_scaling_info
["vdu-delete"], mark_delete
=False
7627 async def extract_prometheus_scrape_jobs(
7631 ee_config_descriptor
: dict,
7636 vnf_member_index
: str = "",
7638 vdu_index
: int = None,
7640 kdu_index
: int = None,
7642 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7643 This method will wait until the corresponding VDU or KDU is fully instantiated
7646 ee_id (str): Execution Environment ID
7647 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7648 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7649 vnfr_id (str): VNFR ID where this EE applies
7650 nsr_id (str): NSR ID where this EE applies
7651 target_ip (str): VDU/KDU instance IP address
7652 element_type (str): NS or VNF or VDU or KDU
7653 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7654 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7655 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7656 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7657 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7660 LcmException: When the VDU or KDU instance was not found in an hour
7663 _type_: Prometheus jobs
7665 # default the vdur and kdur names to an empty string, to avoid any later
7666 # problem with Prometheus when the element type is not VDU or KDU
7670 # look if exist a file called 'prometheus*.j2' and
7671 artifact_content
= self
.fs
.dir_ls(artifact_path
)
7675 for f
in artifact_content
7676 if f
.startswith("prometheus") and f
.endswith(".j2")
7682 self
.logger
.debug("Artifact path{}".format(artifact_path
))
7683 self
.logger
.debug("job file{}".format(job_file
))
7684 with self
.fs
.file_open((artifact_path
, job_file
), "r") as f
:
7687 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7688 if element_type
in ("VDU", "KDU"):
7689 for _
in range(360):
7690 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
7691 if vdu_id
and vdu_index
is not None:
7695 for x
in get_iterable(db_vnfr
, "vdur")
7697 x
.get("vdu-id-ref") == vdu_id
7698 and x
.get("count-index") == vdu_index
7703 if vdur
.get("name"):
7704 vdur_name
= vdur
.get("name")
7706 if kdu_name
and kdu_index
is not None:
7710 for x
in get_iterable(db_vnfr
, "kdur")
7712 x
.get("kdu-name") == kdu_name
7713 and x
.get("count-index") == kdu_index
7718 if kdur
.get("name"):
7719 kdur_name
= kdur
.get("name")
7722 await asyncio
.sleep(10)
7724 if vdu_id
and vdu_index
is not None:
7726 f
"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7728 if kdu_name
and kdu_index
is not None:
7730 f
"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7733 if ee_id
is not None:
7734 _
, namespace
, helm_id
= get_ee_id_parts(
7736 ) # get namespace and EE gRPC service name
7737 host_name
= f
'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7739 vnfr_id
= vnfr_id
.replace("-", "")
7741 "JOB_NAME": vnfr_id
,
7742 "TARGET_IP": target_ip
,
7743 "EXPORTER_POD_IP": host_name
,
7744 "EXPORTER_POD_PORT": host_port
,
7746 "VNF_MEMBER_INDEX": vnf_member_index
,
7747 "VDUR_NAME": vdur_name
,
7748 "KDUR_NAME": kdur_name
,
7749 "ELEMENT_TYPE": element_type
,
7752 metric_path
= ee_config_descriptor
["metric-path"]
7753 target_port
= ee_config_descriptor
["metric-port"]
7754 vnfr_id
= vnfr_id
.replace("-", "")
7756 "JOB_NAME": vnfr_id
,
7757 "TARGET_IP": target_ip
,
7758 "TARGET_PORT": target_port
,
7759 "METRIC_PATH": metric_path
,
7762 job_list
= parse_job(job_data
, variables
)
7763 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7764 for job
in job_list
:
7766 not isinstance(job
.get("job_name"), str)
7767 or vnfr_id
not in job
["job_name"]
7769 job
["job_name"] = vnfr_id
+ "_" + str(SystemRandom().randint(1, 10000))
7770 job
["nsr_id"] = nsr_id
7771 job
["vnfr_id"] = vnfr_id
7774 async def rebuild_start_stop(
7775 self
, nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
7777 logging_text
= "Task ns={} {}={} ".format(nsr_id
, operation_type
, nslcmop_id
)
7778 self
.logger
.info(logging_text
+ "Enter")
7779 stage
= ["Preparing the environment", ""]
7780 # database nsrs record
7784 # in case of error, indicates what part of scale was failed to put nsr at error status
7785 start_deploy
= time()
7787 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_id
})
7788 vim_account_id
= db_vnfr
.get("vim-account-id")
7789 vim_info_key
= "vim:" + vim_account_id
7790 vdu_id
= additional_param
["vdu_id"]
7791 vdurs
= [item
for item
in db_vnfr
["vdur"] if item
["vdu-id-ref"] == vdu_id
]
7792 vdur
= find_in_list(
7793 vdurs
, lambda vdu
: vdu
["count-index"] == additional_param
["count-index"]
7796 vdu_vim_name
= vdur
["name"]
7797 vim_vm_id
= vdur
["vim_info"][vim_info_key
]["vim_id"]
7798 target_vim
, _
= next(k_v
for k_v
in vdur
["vim_info"].items())
7800 raise LcmException("Target vdu is not found")
7801 self
.logger
.info("vdu_vim_name >> {} ".format(vdu_vim_name
))
7802 # wait for any previous tasks in process
7803 stage
[1] = "Waiting for previous operations to terminate"
7804 self
.logger
.info(stage
[1])
7805 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7807 stage
[1] = "Reading from database."
7808 self
.logger
.info(stage
[1])
7809 self
._write
_ns
_status
(
7812 current_operation
=operation_type
.upper(),
7813 current_operation_id
=nslcmop_id
,
7815 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7818 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
7819 db_nsr_update
["operational-status"] = operation_type
7820 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7824 "vim_vm_id": vim_vm_id
,
7826 "vdu_index": additional_param
["count-index"],
7827 "vdu_id": vdur
["id"],
7828 "target_vim": target_vim
,
7829 "vim_account_id": vim_account_id
,
7832 stage
[1] = "Sending rebuild request to RO... {}".format(desc
)
7833 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7834 self
.logger
.info("ro nsr id: {}".format(nsr_id
))
7835 result_dict
= await self
.RO
.operate(nsr_id
, desc
, operation_type
)
7836 self
.logger
.info("response from RO: {}".format(result_dict
))
7837 action_id
= result_dict
["action_id"]
7838 await self
._wait
_ng
_ro
(
7843 self
.timeout
.operate
,
7845 "start_stop_rebuild",
7847 return "COMPLETED", "Done"
7848 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7849 self
.logger
.error("Exit Exception {}".format(e
))
7851 except asyncio
.CancelledError
:
7852 self
.logger
.error("Cancelled Exception while '{}'".format(stage
))
7853 exc
= "Operation was cancelled"
7854 except Exception as e
:
7855 exc
= traceback
.format_exc()
7856 self
.logger
.critical(
7857 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7859 return "FAILED", "Error in operate VNF {}".format(exc
)
7861 async def migrate(self
, nsr_id
, nslcmop_id
):
7863 Migrate VNFs and VDUs instances in a NS
7865 :param: nsr_id: NS Instance ID
7866 :param: nslcmop_id: nslcmop ID of migrate
7869 # Try to lock HA task here
7870 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7871 if not task_is_locked_by_me
:
7873 logging_text
= "Task ns={} migrate ".format(nsr_id
)
7874 self
.logger
.debug(logging_text
+ "Enter")
7875 # get all needed from database
7877 db_nslcmop_update
= {}
7878 nslcmop_operation_state
= None
7882 # in case of error, indicates what part of scale was failed to put nsr at error status
7883 start_deploy
= time()
7886 # wait for any previous tasks in process
7887 step
= "Waiting for previous operations to terminate"
7888 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7890 self
._write
_ns
_status
(
7893 current_operation
="MIGRATING",
7894 current_operation_id
=nslcmop_id
,
7896 step
= "Getting nslcmop from database"
7898 step
+ " after having waited for previous tasks to be completed"
7900 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
7901 migrate_params
= db_nslcmop
.get("operationParams")
7904 target
.update(migrate_params
)
7905 desc
= await self
.RO
.migrate(nsr_id
, target
)
7906 self
.logger
.debug("RO return > {}".format(desc
))
7907 action_id
= desc
["action_id"]
7908 await self
._wait
_ng
_ro
(
7913 self
.timeout
.migrate
,
7914 operation
="migrate",
7916 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7917 self
.logger
.error("Exit Exception {}".format(e
))
7919 except asyncio
.CancelledError
:
7920 self
.logger
.error("Cancelled Exception while '{}'".format(step
))
7921 exc
= "Operation was cancelled"
7922 except Exception as e
:
7923 exc
= traceback
.format_exc()
7924 self
.logger
.critical(
7925 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7928 self
._write
_ns
_status
(
7931 current_operation
="IDLE",
7932 current_operation_id
=None,
7935 db_nslcmop_update
["detailed-status"] = "FAILED {}: {}".format(step
, exc
)
7936 nslcmop_operation_state
= "FAILED"
7938 nslcmop_operation_state
= "COMPLETED"
7939 db_nslcmop_update
["detailed-status"] = "Done"
7940 db_nsr_update
["detailed-status"] = "Done"
7942 self
._write
_op
_status
(
7946 operation_state
=nslcmop_operation_state
,
7947 other_update
=db_nslcmop_update
,
7949 if nslcmop_operation_state
:
7953 "nslcmop_id": nslcmop_id
,
7954 "operationState": nslcmop_operation_state
,
7956 await self
.msg
.aiowrite("ns", "migrated", msg
)
7957 except Exception as e
:
7959 logging_text
+ "kafka_write notification Exception {}".format(e
)
7961 self
.logger
.debug(logging_text
+ "Exit")
7962 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_migrate")
7964 async def heal(self
, nsr_id
, nslcmop_id
):
7968 :param nsr_id: ns instance to heal
7969 :param nslcmop_id: operation to run
7973 # Try to lock HA task here
7974 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7975 if not task_is_locked_by_me
:
7978 logging_text
= "Task ns={} heal={} ".format(nsr_id
, nslcmop_id
)
7979 stage
= ["", "", ""]
7980 tasks_dict_info
= {}
7981 # ^ stage, step, VIM progress
7982 self
.logger
.debug(logging_text
+ "Enter")
7983 # get all needed from database
7985 db_nslcmop_update
= {}
7987 db_vnfrs
= {} # vnf's info indexed by _id
7989 old_operational_status
= ""
7990 old_config_status
= ""
7993 # wait for any previous tasks in process
7994 step
= "Waiting for previous operations to terminate"
7995 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7996 self
._write
_ns
_status
(
7999 current_operation
="HEALING",
8000 current_operation_id
=nslcmop_id
,
8003 step
= "Getting nslcmop from database"
8005 step
+ " after having waited for previous tasks to be completed"
8007 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
8009 step
= "Getting nsr from database"
8010 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8011 old_operational_status
= db_nsr
["operational-status"]
8012 old_config_status
= db_nsr
["config-status"]
8015 "operational-status": "healing",
8016 "_admin.deployed.RO.operational-status": "healing",
8018 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
8020 step
= "Sending heal order to VIM"
8022 logging_text
=logging_text
,
8024 db_nslcmop
=db_nslcmop
,
8029 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
8030 self
.logger
.debug(logging_text
+ stage
[1])
8031 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
8032 self
.fs
.sync(db_nsr
["nsd-id"])
8034 # read from db: vnfr's of this ns
8035 step
= "Getting vnfrs from db"
8036 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
8037 for vnfr
in db_vnfrs_list
:
8038 db_vnfrs
[vnfr
["_id"]] = vnfr
8039 self
.logger
.debug("ns.heal db_vnfrs={}".format(db_vnfrs
))
8041 # Check for each target VNF
8042 target_list
= db_nslcmop
.get("operationParams", {}).get("healVnfData", {})
8043 for target_vnf
in target_list
:
8044 # Find this VNF in the list from DB
8045 vnfr_id
= target_vnf
.get("vnfInstanceId", None)
8047 db_vnfr
= db_vnfrs
[vnfr_id
]
8048 vnfd_id
= db_vnfr
.get("vnfd-id")
8049 vnfd_ref
= db_vnfr
.get("vnfd-ref")
8050 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
8051 base_folder
= vnfd
["_admin"]["storage"]
8056 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
8057 member_vnf_index
= db_vnfr
.get("member-vnf-index-ref")
8059 # Check each target VDU and deploy N2VC
8060 target_vdu_list
= target_vnf
.get("additionalParams", {}).get(
8063 if not target_vdu_list
:
8064 # Codigo nuevo para crear diccionario
8065 target_vdu_list
= []
8066 for existing_vdu
in db_vnfr
.get("vdur"):
8067 vdu_name
= existing_vdu
.get("vdu-name", None)
8068 vdu_index
= existing_vdu
.get("count-index", 0)
8069 vdu_run_day1
= target_vnf
.get("additionalParams", {}).get(
8072 vdu_to_be_healed
= {
8074 "count-index": vdu_index
,
8075 "run-day1": vdu_run_day1
,
8077 target_vdu_list
.append(vdu_to_be_healed
)
8078 for target_vdu
in target_vdu_list
:
8079 deploy_params_vdu
= target_vdu
8080 # Set run-day1 vnf level value if not vdu level value exists
8081 if not deploy_params_vdu
.get("run-day1") and target_vnf
.get(
8082 "additionalParams", {}
8084 deploy_params_vdu
["run-day1"] = target_vnf
[
8087 vdu_name
= target_vdu
.get("vdu-id", None)
8088 # TODO: Get vdu_id from vdud.
8090 # For multi instance VDU count-index is mandatory
8091 # For single session VDU count-indes is 0
8092 vdu_index
= target_vdu
.get("count-index", 0)
8094 # n2vc_redesign STEP 3 to 6 Deploy N2VC
8095 stage
[1] = "Deploying Execution Environments."
8096 self
.logger
.debug(logging_text
+ stage
[1])
8098 # VNF Level charm. Normal case when proxy charms.
8099 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
8100 descriptor_config
= get_configuration(vnfd
, vnfd_ref
)
8101 if descriptor_config
:
8102 # Continue if healed machine is management machine
8103 vnf_ip_address
= db_vnfr
.get("ip-address")
8104 target_instance
= None
8105 for instance
in db_vnfr
.get("vdur", None):
8107 instance
["vdu-name"] == vdu_name
8108 and instance
["count-index"] == vdu_index
8110 target_instance
= instance
8112 if vnf_ip_address
== target_instance
.get("ip-address"):
8114 logging_text
=logging_text
8115 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8116 member_vnf_index
, vdu_name
, vdu_index
8120 nslcmop_id
=nslcmop_id
,
8126 member_vnf_index
=member_vnf_index
,
8129 deploy_params
=deploy_params_vdu
,
8130 descriptor_config
=descriptor_config
,
8131 base_folder
=base_folder
,
8132 task_instantiation_info
=tasks_dict_info
,
8136 # VDU Level charm. Normal case with native charms.
8137 descriptor_config
= get_configuration(vnfd
, vdu_name
)
8138 if descriptor_config
:
8140 logging_text
=logging_text
8141 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8142 member_vnf_index
, vdu_name
, vdu_index
8146 nslcmop_id
=nslcmop_id
,
8152 member_vnf_index
=member_vnf_index
,
8153 vdu_index
=vdu_index
,
8155 deploy_params
=deploy_params_vdu
,
8156 descriptor_config
=descriptor_config
,
8157 base_folder
=base_folder
,
8158 task_instantiation_info
=tasks_dict_info
,
8162 ROclient
.ROClientException
,
8167 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
8169 except asyncio
.CancelledError
:
8171 logging_text
+ "Cancelled Exception while '{}'".format(step
)
8173 exc
= "Operation was cancelled"
8174 except Exception as e
:
8175 exc
= traceback
.format_exc()
8176 self
.logger
.critical(
8177 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
8182 if db_vnfrs_list
and target_list
:
8183 for vnfrs
in db_vnfrs_list
:
8184 for vnf_instance
in target_list
:
8185 if vnfrs
["_id"] == vnf_instance
.get("vnfInstanceId"):
8188 {"_id": vnfrs
["_id"]},
8189 {"_admin.modified": time()},
8192 error_list
.append(str(exc
))
8195 stage
[1] = "Waiting for healing pending tasks."
8196 self
.logger
.debug(logging_text
+ stage
[1])
8197 exc
= await self
._wait
_for
_tasks
(
8200 self
.timeout
.ns_deploy
,
8205 except asyncio
.CancelledError
:
8206 error_list
.append("Cancelled")
8207 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
8208 await self
._wait
_for
_tasks
(
8211 self
.timeout
.ns_deploy
,
8217 error_detail
= "; ".join(error_list
)
8220 ] = error_description_nslcmop
= "FAILED {}: {}".format(
8223 nslcmop_operation_state
= "FAILED"
8225 db_nsr_update
["operational-status"] = old_operational_status
8226 db_nsr_update
["config-status"] = old_config_status
8229 ] = "FAILED healing nslcmop={} {}: {}".format(
8230 nslcmop_id
, step
, error_detail
8232 for task
, task_name
in tasks_dict_info
.items():
8233 if not task
.done() or task
.cancelled() or task
.exception():
8234 if task_name
.startswith(self
.task_name_deploy_vca
):
8235 # A N2VC task is pending
8236 db_nsr_update
["config-status"] = "failed"
8238 # RO task is pending
8239 db_nsr_update
["operational-status"] = "failed"
8241 error_description_nslcmop
= None
8242 nslcmop_operation_state
= "COMPLETED"
8243 db_nslcmop_update
["detailed-status"] = "Done"
8244 db_nsr_update
["detailed-status"] = "Done"
8245 db_nsr_update
["operational-status"] = "running"
8246 db_nsr_update
["config-status"] = "configured"
8248 self
._write
_op
_status
(
8251 error_message
=error_description_nslcmop
,
8252 operation_state
=nslcmop_operation_state
,
8253 other_update
=db_nslcmop_update
,
8256 self
._write
_ns
_status
(
8259 current_operation
="IDLE",
8260 current_operation_id
=None,
8261 other_update
=db_nsr_update
,
8264 if nslcmop_operation_state
:
8268 "nslcmop_id": nslcmop_id
,
8269 "operationState": nslcmop_operation_state
,
8271 await self
.msg
.aiowrite("ns", "healed", msg
)
8272 except Exception as e
:
8274 logging_text
+ "kafka_write notification Exception {}".format(e
)
8276 self
.logger
.debug(logging_text
+ "Exit")
8277 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_heal")
8288 :param logging_text: preffix text to use at logging
8289 :param nsr_id: nsr identity
8290 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8291 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8292 :return: None or exception
8295 def get_vim_account(vim_account_id
):
8297 if vim_account_id
in db_vims
:
8298 return db_vims
[vim_account_id
]
8299 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
8300 db_vims
[vim_account_id
] = db_vim
8305 ns_params
= db_nslcmop
.get("operationParams")
8306 if ns_params
and ns_params
.get("timeout_ns_heal"):
8307 timeout_ns_heal
= ns_params
["timeout_ns_heal"]
8309 timeout_ns_heal
= self
.timeout
.ns_heal
8313 nslcmop_id
= db_nslcmop
["_id"]
8315 "action_id": nslcmop_id
,
8317 self
.logger
.warning(
8318 "db_nslcmop={} and timeout_ns_heal={}".format(
8319 db_nslcmop
, timeout_ns_heal
8322 target
.update(db_nslcmop
.get("operationParams", {}))
8324 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
8325 desc
= await self
.RO
.recreate(nsr_id
, target
)
8326 self
.logger
.debug("RO return > {}".format(desc
))
8327 action_id
= desc
["action_id"]
8328 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8329 await self
._wait
_ng
_ro
(
8336 operation
="healing",
8341 "_admin.deployed.RO.operational-status": "running",
8342 "detailed-status": " ".join(stage
),
8344 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
8345 self
._write
_op
_status
(nslcmop_id
, stage
)
8347 logging_text
+ "ns healed at RO. RO_id={}".format(action_id
)
8350 except Exception as e
:
8351 stage
[2] = "ERROR healing at VIM"
8352 # self.set_vnfr_at_error(db_vnfrs, str(e))
8354 "Error healing at VIM {}".format(e
),
8355 exc_info
=not isinstance(
8358 ROclient
.ROClientException
,
8384 task_instantiation_info
,
8387 # launch instantiate_N2VC in a asyncio task and register task object
8388 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8389 # if not found, create one entry and update database
8390 # fill db_nsr._admin.deployed.VCA.<index>
8393 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
8397 get_charm_name
= False
8398 if "execution-environment-list" in descriptor_config
:
8399 ee_list
= descriptor_config
.get("execution-environment-list", [])
8400 elif "juju" in descriptor_config
:
8401 ee_list
= [descriptor_config
] # ns charms
8402 if "execution-environment-list" not in descriptor_config
:
8403 # charm name is only required for ns charms
8404 get_charm_name
= True
8405 else: # other types as script are not supported
8408 for ee_item
in ee_list
:
8411 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8412 ee_item
.get("juju"), ee_item
.get("helm-chart")
8415 ee_descriptor_id
= ee_item
.get("id")
8416 vca_name
, charm_name
, vca_type
= self
.get_vca_info(
8417 ee_item
, db_nsr
, get_charm_name
8421 logging_text
+ "skipping, non juju/charm/helm configuration"
8426 for vca_index
, vca_deployed
in enumerate(
8427 db_nsr
["_admin"]["deployed"]["VCA"]
8429 if not vca_deployed
:
8432 vca_deployed
.get("member-vnf-index") == member_vnf_index
8433 and vca_deployed
.get("vdu_id") == vdu_id
8434 and vca_deployed
.get("kdu_name") == kdu_name
8435 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
8436 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
8440 # not found, create one.
8442 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
8445 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
8447 target
+= "/kdu/{}".format(kdu_name
)
8449 "target_element": target
,
8450 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8451 "member-vnf-index": member_vnf_index
,
8453 "kdu_name": kdu_name
,
8454 "vdu_count_index": vdu_index
,
8455 "operational-status": "init", # TODO revise
8456 "detailed-status": "", # TODO revise
8457 "step": "initial-deploy", # TODO revise
8459 "vdu_name": vdu_name
,
8461 "ee_descriptor_id": ee_descriptor_id
,
8462 "charm_name": charm_name
,
8466 # create VCA and configurationStatus in db
8468 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
8469 "configurationStatus.{}".format(vca_index
): dict(),
8471 self
.update_db_2("nsrs", nsr_id
, db_dict
)
8473 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
8475 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
8476 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
8477 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
8480 task_n2vc
= asyncio
.ensure_future(
8482 logging_text
=logging_text
,
8483 vca_index
=vca_index
,
8489 vdu_index
=vdu_index
,
8490 deploy_params
=deploy_params
,
8491 config_descriptor
=descriptor_config
,
8492 base_folder
=base_folder
,
8493 nslcmop_id
=nslcmop_id
,
8497 ee_config_descriptor
=ee_item
,
8500 self
.lcm_tasks
.register(
8504 "instantiate_N2VC-{}".format(vca_index
),
8507 task_instantiation_info
[
8509 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
8510 member_vnf_index
or "", vdu_id
or ""
8513 async def heal_N2VC(
8530 ee_config_descriptor
,
8532 nsr_id
= db_nsr
["_id"]
8533 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
8534 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
8535 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
8536 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
8538 "collection": "nsrs",
8539 "filter": {"_id": nsr_id
},
8540 "path": db_update_entry
,
8545 element_under_configuration
= nsr_id
8549 vnfr_id
= db_vnfr
["_id"]
8550 osm_config
["osm"]["vnf_id"] = vnfr_id
8552 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
8554 if vca_type
== "native_charm":
8557 index_number
= vdu_index
or 0
8560 element_type
= "VNF"
8561 element_under_configuration
= vnfr_id
8562 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
8564 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
8565 element_type
= "VDU"
8566 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
8567 osm_config
["osm"]["vdu_id"] = vdu_id
8569 namespace
+= ".{}".format(kdu_name
)
8570 element_type
= "KDU"
8571 element_under_configuration
= kdu_name
8572 osm_config
["osm"]["kdu_name"] = kdu_name
8575 if base_folder
["pkg-dir"]:
8576 artifact_path
= "{}/{}/{}/{}".format(
8577 base_folder
["folder"],
8578 base_folder
["pkg-dir"],
8581 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8586 artifact_path
= "{}/Scripts/{}/{}/".format(
8587 base_folder
["folder"],
8590 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8595 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
8597 # get initial_config_primitive_list that applies to this element
8598 initial_config_primitive_list
= config_descriptor
.get(
8599 "initial-config-primitive"
8603 "Initial config primitive list > {}".format(
8604 initial_config_primitive_list
8608 # add config if not present for NS charm
8609 ee_descriptor_id
= ee_config_descriptor
.get("id")
8610 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
8611 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
8612 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
8616 "Initial config primitive list #2 > {}".format(
8617 initial_config_primitive_list
8620 # n2vc_redesign STEP 3.1
8621 # find old ee_id if exists
8622 ee_id
= vca_deployed
.get("ee_id")
8624 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
8625 # create or register execution environment in VCA. Only for native charms when healing
8626 if vca_type
== "native_charm":
8627 step
= "Waiting to VM being up and getting IP address"
8628 self
.logger
.debug(logging_text
+ step
)
8629 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8638 credentials
= {"hostname": rw_mgmt_ip
}
8640 username
= deep_get(
8641 config_descriptor
, ("config-access", "ssh-access", "default-user")
8643 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8644 # merged. Meanwhile let's get username from initial-config-primitive
8645 if not username
and initial_config_primitive_list
:
8646 for config_primitive
in initial_config_primitive_list
:
8647 for param
in config_primitive
.get("parameter", ()):
8648 if param
["name"] == "ssh-username":
8649 username
= param
["value"]
8653 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8654 "'config-access.ssh-access.default-user'"
8656 credentials
["username"] = username
8658 # n2vc_redesign STEP 3.2
8659 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8660 self
._write
_configuration
_status
(
8662 vca_index
=vca_index
,
8663 status
="REGISTERING",
8664 element_under_configuration
=element_under_configuration
,
8665 element_type
=element_type
,
8668 step
= "register execution environment {}".format(credentials
)
8669 self
.logger
.debug(logging_text
+ step
)
8670 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
8671 credentials
=credentials
,
8672 namespace
=namespace
,
8677 # update ee_id en db
8679 "_admin.deployed.VCA.{}.ee_id".format(vca_index
): ee_id
,
8681 self
.update_db_2("nsrs", nsr_id
, db_dict_ee_id
)
8683 # for compatibility with MON/POL modules, the need model and application name at database
8684 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8685 # Not sure if this need to be done when healing
8687 ee_id_parts = ee_id.split(".")
8688 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8689 if len(ee_id_parts) >= 2:
8690 model_name = ee_id_parts[0]
8691 application_name = ee_id_parts[1]
8692 db_nsr_update[db_update_entry + "model"] = model_name
8693 db_nsr_update[db_update_entry + "application"] = application_name
8696 # n2vc_redesign STEP 3.3
8697 # Install configuration software. Only for native charms.
8698 step
= "Install configuration Software"
8700 self
._write
_configuration
_status
(
8702 vca_index
=vca_index
,
8703 status
="INSTALLING SW",
8704 element_under_configuration
=element_under_configuration
,
8705 element_type
=element_type
,
8706 # other_update=db_nsr_update,
8710 # TODO check if already done
8711 self
.logger
.debug(logging_text
+ step
)
8713 if vca_type
== "native_charm":
8714 config_primitive
= next(
8715 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
8718 if config_primitive
:
8719 config
= self
._map
_primitive
_params
(
8720 config_primitive
, {}, deploy_params
8722 await self
.vca_map
[vca_type
].install_configuration_sw(
8724 artifact_path
=artifact_path
,
8732 # write in db flag of configuration_sw already installed
8734 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
8737 # Not sure if this need to be done when healing
8739 # add relations for this VCA (wait for other peers related with this VCA)
8740 await self._add_vca_relations(
8741 logging_text=logging_text,
8744 vca_index=vca_index,
8748 # if SSH access is required, then get execution environment SSH public
8749 # if native charm we have waited already to VM be UP
8750 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
8753 # self.logger.debug("get ssh key block")
8755 config_descriptor
, ("config-access", "ssh-access", "required")
8757 # self.logger.debug("ssh key needed")
8758 # Needed to inject a ssh key
8761 ("config-access", "ssh-access", "default-user"),
8763 step
= "Install configuration Software, getting public ssh key"
8764 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
8765 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
8768 step
= "Insert public key into VM user={} ssh_key={}".format(
8772 # self.logger.debug("no need to get ssh key")
8773 step
= "Waiting to VM being up and getting IP address"
8774 self
.logger
.debug(logging_text
+ step
)
8776 # n2vc_redesign STEP 5.1
8777 # wait for RO (ip-address) Insert pub_key into VM
8778 # IMPORTANT: We need do wait for RO to complete healing operation.
8779 await self
._wait
_heal
_ro
(nsr_id
, self
.timeout
.ns_heal
)
8782 rw_mgmt_ip
= await self
.wait_kdu_up(
8783 logging_text
, nsr_id
, vnfr_id
, kdu_name
8786 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8796 rw_mgmt_ip
= None # This is for a NS configuration
8798 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
8800 # store rw_mgmt_ip in deploy params for later replacement
8801 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
8804 # get run-day1 operation parameter
8805 runDay1
= deploy_params
.get("run-day1", False)
8807 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id
, vdu_id
, runDay1
)
8810 # n2vc_redesign STEP 6 Execute initial config primitive
8811 step
= "execute initial config primitive"
8813 # wait for dependent primitives execution (NS -> VNF -> VDU)
8814 if initial_config_primitive_list
:
8815 await self
._wait
_dependent
_n
2vc
(
8816 nsr_id
, vca_deployed_list
, vca_index
8819 # stage, in function of element type: vdu, kdu, vnf or ns
8820 my_vca
= vca_deployed_list
[vca_index
]
8821 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
8823 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
8824 elif my_vca
.get("member-vnf-index"):
8826 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
8829 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
8831 self
._write
_configuration
_status
(
8832 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
8835 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
8837 check_if_terminated_needed
= True
8838 for initial_config_primitive
in initial_config_primitive_list
:
8839 # adding information on the vca_deployed if it is a NS execution environment
8840 if not vca_deployed
["member-vnf-index"]:
8841 deploy_params
["ns_config_info"] = json
.dumps(
8842 self
._get
_ns
_config
_info
(nsr_id
)
8844 # TODO check if already done
8845 primitive_params_
= self
._map
_primitive
_params
(
8846 initial_config_primitive
, {}, deploy_params
8849 step
= "execute primitive '{}' params '{}'".format(
8850 initial_config_primitive
["name"], primitive_params_
8852 self
.logger
.debug(logging_text
+ step
)
8853 await self
.vca_map
[vca_type
].exec_primitive(
8855 primitive_name
=initial_config_primitive
["name"],
8856 params_dict
=primitive_params_
,
8861 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8862 if check_if_terminated_needed
:
8863 if config_descriptor
.get("terminate-config-primitive"):
8867 {db_update_entry
+ "needed_terminate": True},
8869 check_if_terminated_needed
= False
8871 # TODO register in database that primitive is done
8873 # STEP 7 Configure metrics
8874 # Not sure if this need to be done when healing
8876 if vca_type == "helm" or vca_type == "helm-v3":
8877 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8879 artifact_path=artifact_path,
8880 ee_config_descriptor=ee_config_descriptor,
8883 target_ip=rw_mgmt_ip,
8889 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8892 for job in prometheus_jobs:
8895 {"job_name": job["job_name"]},
8898 fail_on_empty=False,
8902 step
= "instantiated at VCA"
8903 self
.logger
.debug(logging_text
+ step
)
8905 self
._write
_configuration
_status
(
8906 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
8909 except Exception as e
: # TODO not use Exception but N2VC exception
8910 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8912 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
8915 "Exception while {} : {}".format(step
, e
), exc_info
=True
8917 self
._write
_configuration
_status
(
8918 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
8920 raise LcmException("{} {}".format(step
, e
)) from e
8922 async def _wait_heal_ro(
8928 while time() <= start_time
+ timeout
:
8929 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8930 operational_status_ro
= db_nsr
["_admin"]["deployed"]["RO"][
8931 "operational-status"
8933 self
.logger
.debug("Wait Heal RO > {}".format(operational_status_ro
))
8934 if operational_status_ro
!= "healing":
8936 await asyncio
.sleep(15)
8937 else: # timeout_ns_deploy
8938 raise NgRoException("Timeout waiting ns to deploy")
8940 async def vertical_scale(self
, nsr_id
, nslcmop_id
):
8942 Vertical Scale the VDUs in a NS
8944 :param: nsr_id: NS Instance ID
8945 :param: nslcmop_id: nslcmop ID of migrate
8948 logging_text
= "Task ns={} vertical scale ".format(nsr_id
)
8949 self
.logger
.info(logging_text
+ "Enter")
8950 stage
= ["Preparing the environment", ""]
8951 # get all needed from database
8956 # in case of error, indicates what part of scale was failed to put nsr at error status
8957 start_deploy
= time()
8960 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
8961 operationParams
= db_nslcmop
.get("operationParams")
8962 vertical_scale_data
= operationParams
["verticalScaleVnf"]
8963 vnfd_id
= vertical_scale_data
["vnfdId"]
8964 count_index
= vertical_scale_data
["countIndex"]
8965 vdu_id_ref
= vertical_scale_data
["vduId"]
8966 vnfr_id
= vertical_scale_data
["vnfInstanceId"]
8967 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8968 db_flavor
= db_nsr
.get("flavor")
8969 db_flavor_index
= str(len(db_flavor
))
8971 def set_flavor_refrence_to_vdur(diff
=0):
8973 Utility function to add and remove the
8974 ref to new ns-flavor-id to vdurs
8975 :param: diff: default 0
8978 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
8979 for vdu_index
, vdur
in enumerate(db_vnfr
.get("vdur", ())):
8981 vdur
.get("count-index") == count_index
8982 and vdur
.get("vdu-id-ref") == vdu_id_ref
8986 "vdur.count-index": count_index
,
8987 "vdur.vdu-id-ref": vdu_id_ref
,
8989 q_filter
.update(filter_text
)
8991 db_update
["vdur.{}.ns-flavor-id".format(vdu_index
)] = str(
8992 int(db_flavor_index
) - diff
8997 update_dict
=db_update
,
9001 # wait for any previous tasks in process
9002 stage
[1] = "Waiting for previous operations to terminate"
9003 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
9005 self
._write
_ns
_status
(
9008 current_operation
="VERTICALSCALE",
9009 current_operation_id
=nslcmop_id
,
9011 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
9013 stage
[1] + " after having waited for previous tasks to be completed"
9015 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
9016 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
9017 virtual_compute
= vnfd
["virtual-compute-desc"][0]
9018 virtual_memory
= round(
9019 float(virtual_compute
["virtual-memory"]["size"]) * 1024
9021 virtual_cpu
= virtual_compute
["virtual-cpu"]["num-virtual-cpu"]
9022 virtual_storage
= vnfd
["virtual-storage-desc"][0]["size-of-storage"]
9023 flavor_dict_update
= {
9024 "id": db_flavor_index
,
9025 "memory-mb": virtual_memory
,
9026 "name": f
"{vdu_id_ref}-{count_index}-flv",
9027 "storage-gb": str(virtual_storage
),
9028 "vcpu-count": virtual_cpu
,
9030 db_flavor
.append(flavor_dict_update
)
9032 db_update
["flavor"] = db_flavor
9036 # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
9040 update_dict
=db_update
,
9043 set_flavor_refrence_to_vdur()
9045 new_operationParams
= {
9046 "lcmOperationType": "verticalscale",
9047 "verticalScale": "CHANGE_VNFFLAVOR",
9048 "nsInstanceId": nsr_id
,
9049 "changeVnfFlavorData": {
9050 "vnfInstanceId": vnfr_id
,
9051 "additionalParams": {
9052 "vduid": vdu_id_ref
,
9053 "vduCountIndex": count_index
,
9054 "virtualMemory": virtual_memory
,
9055 "numVirtualCpu": int(virtual_cpu
),
9056 "sizeOfStorage": int(virtual_storage
),
9060 target
.update(new_operationParams
)
9062 stage
[1] = "Sending vertical scale request to RO... {}".format(target
)
9063 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
9064 self
.logger
.info("RO target > {}".format(target
))
9065 desc
= await self
.RO
.vertical_scale(nsr_id
, target
)
9066 self
.logger
.info("RO.vertical_scale return value - {}".format(desc
))
9067 action_id
= desc
["action_id"]
9068 await self
._wait
_ng
_ro
(
9073 self
.timeout
.verticalscale
,
9074 operation
="verticalscale",
9078 ROclient
.ROClientException
,
9082 self
.logger
.error("Exit Exception {}".format(e
))
9084 except asyncio
.CancelledError
:
9085 self
.logger
.error("Cancelled Exception while '{}'".format(stage
))
9086 exc
= "Operation was cancelled"
9087 except Exception as e
:
9088 exc
= traceback
.format_exc()
9089 self
.logger
.critical(
9090 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
9094 self
.logger
.critical(
9095 "Vertical-Scale operation Failed, cleaning up nsrs and vnfrs flavor detail"
9101 pull
={"flavor": {"id": db_flavor_index
}},
9103 set_flavor_refrence_to_vdur(diff
=1)
9104 return "FAILED", "Error in verticalscale VNF {}".format(exc
)
9106 return "COMPLETED", "Done"