1 # -*- coding: utf-8 -*-
4 # Copyright 2018 Telefonica S.A.
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
21 from typing
import Any
, Dict
, List
24 import logging
.handlers
37 from osm_lcm
import ROclient
38 from osm_lcm
.data_utils
.lcm_config
import LcmCfg
39 from osm_lcm
.data_utils
.nsr
import (
42 get_deployed_vca_list
,
45 from osm_lcm
.data_utils
.vca
import (
54 from osm_lcm
.ng_ro
import NgRoClient
, NgRoException
55 from osm_lcm
.lcm_utils
import (
61 check_juju_bundle_existence
,
62 get_charm_artifact_path
,
66 from osm_lcm
.data_utils
.nsd
import (
67 get_ns_configuration_relation_list
,
71 from osm_lcm
.data_utils
.vnfd
import (
77 get_ee_sorted_initial_config_primitive_list
,
78 get_ee_sorted_terminate_config_primitive_list
,
80 get_virtual_link_profiles
,
85 get_number_of_instances
,
87 get_kdu_resource_profile
,
88 find_software_version
,
91 from osm_lcm
.data_utils
.list_utils
import find_in_list
92 from osm_lcm
.data_utils
.vnfr
import (
96 get_volumes_from_instantiation_params
,
98 from osm_lcm
.data_utils
.dict_utils
import parse_yaml_strings
99 from osm_lcm
.data_utils
.database
.vim_account
import VimAccountDB
100 from n2vc
.definitions
import RelationEndpoint
101 from n2vc
.k8s_helm3_conn
import K8sHelm3Connector
102 from n2vc
.k8s_juju_conn
import K8sJujuConnector
104 from osm_common
.dbbase
import DbException
105 from osm_common
.fsbase
import FsException
107 from osm_lcm
.data_utils
.database
.database
import Database
108 from osm_lcm
.data_utils
.filesystem
.filesystem
import Filesystem
109 from osm_lcm
.data_utils
.wim
import (
111 get_target_wim_attrs
,
112 select_feasible_wim_account
,
115 from n2vc
.n2vc_juju_conn
import N2VCJujuConnector
116 from n2vc
.exceptions
import N2VCException
, N2VCNotFound
, K8sException
118 from osm_lcm
.lcm_helm_conn
import LCMHelmConn
119 from osm_lcm
.osm_config
import OsmConfigBuilder
120 from osm_lcm
.prometheus
import parse_job
122 from copy
import copy
, deepcopy
123 from time
import time
124 from uuid
import uuid4
126 from random
import SystemRandom
128 __author__
= "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
131 class NsLcm(LcmBase
):
132 SUBOPERATION_STATUS_NOT_FOUND
= -1
133 SUBOPERATION_STATUS_NEW
= -2
134 SUBOPERATION_STATUS_SKIP
= -3
135 EE_TLS_NAME
= "ee-tls"
136 task_name_deploy_vca
= "Deploying VCA"
137 rel_operation_types
= {
146 def __init__(self
, msg
, lcm_tasks
, config
: LcmCfg
):
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
152 super().__init
__(msg
=msg
, logger
=logging
.getLogger("lcm.ns"))
154 self
.db
= Database().instance
.db
155 self
.fs
= Filesystem().instance
.fs
156 self
.lcm_tasks
= lcm_tasks
157 self
.timeout
= config
.timeout
158 self
.ro_config
= config
.RO
159 self
.vca_config
= config
.VCA
161 # create N2VC connector
162 self
.n2vc
= N2VCJujuConnector(
164 on_update_db
=self
._on
_update
_n
2vc
_db
,
169 self
.conn_helm_ee
= LCMHelmConn(
171 vca_config
=self
.vca_config
,
172 on_update_db
=self
._on
_update
_n
2vc
_db
,
175 self
.k8sclusterhelm3
= K8sHelm3Connector(
176 kubectl_command
=self
.vca_config
.kubectlpath
,
177 helm_command
=self
.vca_config
.helm3path
,
184 self
.k8sclusterjuju
= K8sJujuConnector(
185 kubectl_command
=self
.vca_config
.kubectlpath
,
186 juju_command
=self
.vca_config
.jujupath
,
188 on_update_db
=self
._on
_update
_k
8s
_db
,
193 self
.k8scluster_map
= {
194 "helm-chart-v3": self
.k8sclusterhelm3
,
195 "chart": self
.k8sclusterhelm3
,
196 "juju-bundle": self
.k8sclusterjuju
,
197 "juju": self
.k8sclusterjuju
,
201 "lxc_proxy_charm": self
.n2vc
,
202 "native_charm": self
.n2vc
,
203 "k8s_proxy_charm": self
.n2vc
,
204 "helm": self
.conn_helm_ee
,
205 "helm-v3": self
.conn_helm_ee
,
209 self
.RO
= NgRoClient(**self
.ro_config
.to_dict())
211 self
.op_status_map
= {
212 "instantiation": self
.RO
.status
,
213 "termination": self
.RO
.status
,
214 "migrate": self
.RO
.status
,
215 "healing": self
.RO
.recreate_status
,
216 "verticalscale": self
.RO
.status
,
217 "start_stop_rebuild": self
.RO
.status
,
221 def increment_ip_mac(ip_mac
, vm_index
=1):
222 if not isinstance(ip_mac
, str):
227 dual_ip
= ip_mac
.split(";")
228 if len(dual_ip
) == 2:
230 if ipaddress
.ip_address(ip
).version
== 6:
231 ipv6
= ipaddress
.IPv6Address(ip
)
232 next_ipv6
= str(ipaddress
.IPv6Address(int(ipv6
) + 1))
233 elif ipaddress
.ip_address(ip
).version
== 4:
234 ipv4
= ipaddress
.IPv4Address(ip
)
235 next_ipv4
= str(ipaddress
.IPv4Address(int(ipv4
) + 1))
236 return [next_ipv4
, next_ipv6
]
237 # try with ipv4 look for last dot
238 i
= ip_mac
.rfind(".")
241 return "{}{}".format(ip_mac
[:i
], int(ip_mac
[i
:]) + vm_index
)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i
= ip_mac
.rfind(":")
246 # format in hex, len can be 2 for mac or 4 for ipv6
247 return ("{}{:0" + str(len(ip_mac
) - i
) + "x}").format(
248 ip_mac
[:i
], int(ip_mac
[i
:], 16) + vm_index
254 async def _on_update_n2vc_db(self
, table
, filter, path
, updated_data
, vca_id
=None):
255 # remove last dot from path (if exists)
256 if path
.endswith("."):
259 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
260 # .format(table, filter, path, updated_data))
262 nsr_id
= filter.get("_id")
264 # read ns record from database
265 nsr
= self
.db
.get_one(table
="nsrs", q_filter
=filter)
266 current_ns_status
= nsr
.get("nsState")
268 # First, we need to verify if the current vcaStatus is null, because if that is the case,
269 # MongoDB will not be able to create the fields used within the update key in the database
270 if not nsr
.get("vcaStatus"):
271 # Write an empty dictionary to the vcaStatus field, it its value is null
272 self
.update_db_2("nsrs", nsr_id
, {"vcaStatus": dict()})
274 # Get vca status for NS
275 status_dict
= await self
.n2vc
.get_status(
276 namespace
="." + nsr_id
, yaml_format
=False, vca_id
=vca_id
279 # Update the vcaStatus
280 db_key
= f
"vcaStatus.{nsr_id}.VNF"
283 db_dict
[db_key
] = status_dict
[nsr_id
]
284 await self
.n2vc
.update_vca_status(db_dict
[db_key
], vca_id
=vca_id
)
286 # update configurationStatus for this VCA
288 vca_index
= int(path
[path
.rfind(".") + 1 :])
291 target_dict
=nsr
, key_list
=("_admin", "deployed", "VCA")
293 vca_status
= vca_list
[vca_index
].get("status")
295 configuration_status_list
= nsr
.get("configurationStatus")
296 config_status
= configuration_status_list
[vca_index
].get("status")
298 if config_status
== "BROKEN" and vca_status
!= "failed":
299 db_dict
["configurationStatus"][vca_index
] = "READY"
300 elif config_status
!= "BROKEN" and vca_status
== "failed":
301 db_dict
["configurationStatus"][vca_index
] = "BROKEN"
302 except Exception as e
:
303 # not update configurationStatus
304 self
.logger
.debug("Error updating vca_index (ignore): {}".format(e
))
306 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
307 # if nsState = 'DEGRADED' check if all is OK
309 if current_ns_status
in ("READY", "DEGRADED"):
310 error_description
= ""
312 if status_dict
.get("machines"):
313 for machine_id
in status_dict
.get("machines"):
314 machine
= status_dict
.get("machines").get(machine_id
)
315 # check machine agent-status
316 if machine
.get("agent-status"):
317 s
= machine
.get("agent-status").get("status")
320 error_description
+= (
321 "machine {} agent-status={} ; ".format(
325 # check machine instance status
326 if machine
.get("instance-status"):
327 s
= machine
.get("instance-status").get("status")
330 error_description
+= (
331 "machine {} instance-status={} ; ".format(
336 if status_dict
.get("applications"):
337 for app_id
in status_dict
.get("applications"):
338 app
= status_dict
.get("applications").get(app_id
)
339 # check application status
340 if app
.get("status"):
341 s
= app
.get("status").get("status")
344 error_description
+= (
345 "application {} status={} ; ".format(app_id
, s
)
348 if error_description
:
349 db_dict
["errorDescription"] = error_description
350 if current_ns_status
== "READY" and is_degraded
:
351 db_dict
["nsState"] = "DEGRADED"
352 if current_ns_status
== "DEGRADED" and not is_degraded
:
353 db_dict
["nsState"] = "READY"
356 self
.update_db_2("nsrs", nsr_id
, db_dict
)
358 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
360 except Exception as e
:
361 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
363 async def _on_update_k8s_db(
364 self
, cluster_uuid
, kdu_instance
, filter=None, vca_id
=None, cluster_type
="juju"
367 Updating vca status in NSR record
368 :param cluster_uuid: UUID of a k8s cluster
369 :param kdu_instance: The unique name of the KDU instance
370 :param filter: To get nsr_id
371 :cluster_type: The cluster type (juju, k8s)
375 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
376 # .format(cluster_uuid, kdu_instance, filter))
378 nsr_id
= filter.get("_id")
380 vca_status
= await self
.k8scluster_map
[cluster_type
].status_kdu(
381 cluster_uuid
=cluster_uuid
,
382 kdu_instance
=kdu_instance
,
384 complete_status
=True,
388 # First, we need to verify if the current vcaStatus is null, because if that is the case,
389 # MongoDB will not be able to create the fields used within the update key in the database
390 nsr
= self
.db
.get_one(table
="nsrs", q_filter
=filter)
391 if not nsr
.get("vcaStatus"):
392 # Write an empty dictionary to the vcaStatus field, it its value is null
393 self
.update_db_2("nsrs", nsr_id
, {"vcaStatus": dict()})
395 # Update the vcaStatus
396 db_key
= f
"vcaStatus.{nsr_id}.KNF"
399 db_dict
[db_key
] = vca_status
401 if cluster_type
in ("juju-bundle", "juju"):
402 # TODO -> this should be done in a more uniform way, I think in N2VC, in order to update the K8s VCA
403 # status in a similar way between Juju Bundles and Helm Charts on this side
404 await self
.k8sclusterjuju
.update_vca_status(
411 f
"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
415 self
.update_db_2("nsrs", nsr_id
, db_dict
)
416 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
418 except Exception as e
:
419 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
422 def _parse_cloud_init(cloud_init_text
, additional_params
, vnfd_id
, vdu_id
):
425 undefined
=StrictUndefined
,
426 autoescape
=select_autoescape(default_for_string
=True, default
=True),
428 template
= env
.from_string(cloud_init_text
)
429 return template
.render(additional_params
or {})
430 except UndefinedError
as e
:
432 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
433 "file, must be provided in the instantiation parameters inside the "
434 "'additionalParamsForVnf/Vdu' block".format(e
, vnfd_id
, vdu_id
)
436 except (TemplateError
, TemplateNotFound
) as e
:
438 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
443 def _get_vdu_cloud_init_content(self
, vdu
, vnfd
):
444 cloud_init_content
= cloud_init_file
= None
446 if vdu
.get("cloud-init-file"):
447 base_folder
= vnfd
["_admin"]["storage"]
448 if base_folder
["pkg-dir"]:
449 cloud_init_file
= "{}/{}/cloud_init/{}".format(
450 base_folder
["folder"],
451 base_folder
["pkg-dir"],
452 vdu
["cloud-init-file"],
455 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
456 base_folder
["folder"],
457 vdu
["cloud-init-file"],
459 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
460 cloud_init_content
= ci_file
.read()
461 elif vdu
.get("cloud-init"):
462 cloud_init_content
= vdu
["cloud-init"]
464 return cloud_init_content
465 except FsException
as e
:
467 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
468 vnfd
["id"], vdu
["id"], cloud_init_file
, e
472 def _get_vdu_additional_params(self
, db_vnfr
, vdu_id
):
474 (vdur
for vdur
in db_vnfr
.get("vdur") if vdu_id
== vdur
["vdu-id-ref"]), {}
476 additional_params
= vdur
.get("additionalParams")
477 return parse_yaml_strings(additional_params
)
480 def ip_profile_2_RO(ip_profile
):
481 RO_ip_profile
= deepcopy(ip_profile
)
482 if "dns-server" in RO_ip_profile
:
483 if isinstance(RO_ip_profile
["dns-server"], list):
484 RO_ip_profile
["dns-address"] = []
485 for ds
in RO_ip_profile
.pop("dns-server"):
486 RO_ip_profile
["dns-address"].append(ds
["address"])
488 RO_ip_profile
["dns-address"] = RO_ip_profile
.pop("dns-server")
489 if RO_ip_profile
.get("ip-version") == "ipv4":
490 RO_ip_profile
["ip-version"] = "IPv4"
491 if RO_ip_profile
.get("ip-version") == "ipv6":
492 RO_ip_profile
["ip-version"] = "IPv6"
493 if "dhcp-params" in RO_ip_profile
:
494 RO_ip_profile
["dhcp"] = RO_ip_profile
.pop("dhcp-params")
497 def scale_vnfr(self
, db_vnfr
, vdu_create
=None, vdu_delete
=None, mark_delete
=False):
498 db_vdu_push_list
= []
500 db_update
= {"_admin.modified": time()}
502 for vdu_id
, vdu_count
in vdu_create
.items():
506 for vdur
in reversed(db_vnfr
["vdur"])
507 if vdur
["vdu-id-ref"] == vdu_id
512 # Read the template saved in the db:
514 "No vdur in the database. Using the vdur-template to scale"
516 vdur_template
= db_vnfr
.get("vdur-template")
517 if not vdur_template
:
519 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
523 vdur
= vdur_template
[0]
524 # Delete a template from the database after using it
527 {"_id": db_vnfr
["_id"]},
529 pull
={"vdur-template": {"_id": vdur
["_id"]}},
531 for count
in range(vdu_count
):
532 vdur_copy
= deepcopy(vdur
)
533 vdur_copy
["status"] = "BUILD"
534 vdur_copy
["status-detailed"] = None
535 vdur_copy
["ip-address"] = None
536 vdur_copy
["_id"] = str(uuid4())
537 vdur_copy
["count-index"] += count
+ 1
538 vdur_copy
["id"] = "{}-{}".format(
539 vdur_copy
["vdu-id-ref"], vdur_copy
["count-index"]
541 vdur_copy
.pop("vim_info", None)
542 for iface
in vdur_copy
["interfaces"]:
543 if iface
.get("fixed-ip"):
544 iface
["ip-address"] = self
.increment_ip_mac(
545 iface
["ip-address"], count
+ 1
548 iface
.pop("ip-address", None)
549 if iface
.get("fixed-mac"):
550 iface
["mac-address"] = self
.increment_ip_mac(
551 iface
["mac-address"], count
+ 1
554 iface
.pop("mac-address", None)
558 ) # only first vdu can be managment of vnf
559 db_vdu_push_list
.append(vdur_copy
)
560 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
562 if len(db_vnfr
["vdur"]) == 1:
563 # The scale will move to 0 instances
565 "Scaling to 0 !, creating the template with the last vdur"
567 template_vdur
= [db_vnfr
["vdur"][0]]
568 for vdu_id
, vdu_count
in vdu_delete
.items():
570 indexes_to_delete
= [
572 for iv
in enumerate(db_vnfr
["vdur"])
573 if iv
[1]["vdu-id-ref"] == vdu_id
577 "vdur.{}.status".format(i
): "DELETING"
578 for i
in indexes_to_delete
[-vdu_count
:]
582 # it must be deleted one by one because common.db does not allow otherwise
585 for v
in reversed(db_vnfr
["vdur"])
586 if v
["vdu-id-ref"] == vdu_id
588 for vdu
in vdus_to_delete
[:vdu_count
]:
591 {"_id": db_vnfr
["_id"]},
593 pull
={"vdur": {"_id": vdu
["_id"]}},
597 db_push
["vdur"] = db_vdu_push_list
599 db_push
["vdur-template"] = template_vdur
602 db_vnfr
["vdur-template"] = template_vdur
603 self
.db
.set_one("vnfrs", {"_id": db_vnfr
["_id"]}, db_update
, push_list
=db_push
)
604 # modify passed dictionary db_vnfr
605 db_vnfr_
= self
.db
.get_one("vnfrs", {"_id": db_vnfr
["_id"]})
606 db_vnfr
["vdur"] = db_vnfr_
["vdur"]
608 def ns_update_nsr(self
, ns_update_nsr
, db_nsr
, nsr_desc_RO
):
610 Updates database nsr with the RO info for the created vld
611 :param ns_update_nsr: dictionary to be filled with the updated info
612 :param db_nsr: content of db_nsr. This is also modified
613 :param nsr_desc_RO: nsr descriptor from RO
614 :return: Nothing, LcmException is raised on errors
617 for vld_index
, vld
in enumerate(get_iterable(db_nsr
, "vld")):
618 for net_RO
in get_iterable(nsr_desc_RO
, "nets"):
619 if vld
["id"] != net_RO
.get("ns_net_osm_id"):
621 vld
["vim-id"] = net_RO
.get("vim_net_id")
622 vld
["name"] = net_RO
.get("vim_name")
623 vld
["status"] = net_RO
.get("status")
624 vld
["status-detailed"] = net_RO
.get("error_msg")
625 ns_update_nsr
["vld.{}".format(vld_index
)] = vld
629 "ns_update_nsr: Not found vld={} at RO info".format(vld
["id"])
632 def set_vnfr_at_error(self
, db_vnfrs
, error_text
):
634 for db_vnfr
in db_vnfrs
.values():
635 vnfr_update
= {"status": "ERROR"}
636 for vdu_index
, vdur
in enumerate(get_iterable(db_vnfr
, "vdur")):
637 if "status" not in vdur
:
638 vdur
["status"] = "ERROR"
639 vnfr_update
["vdur.{}.status".format(vdu_index
)] = "ERROR"
641 vdur
["status-detailed"] = str(error_text
)
643 "vdur.{}.status-detailed".format(vdu_index
)
645 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
646 except DbException
as e
:
647 self
.logger
.error("Cannot update vnf. {}".format(e
))
649 def _get_ns_config_info(self
, nsr_id
):
651 Generates a mapping between vnf,vdu elements and the N2VC id
652 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
653 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
654 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
655 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
657 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
658 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
660 ns_config_info
= {"osm-config-mapping": mapping
}
661 for vca
in vca_deployed_list
:
662 if not vca
["member-vnf-index"]:
664 if not vca
["vdu_id"]:
665 mapping
[vca
["member-vnf-index"]] = vca
["application"]
669 vca
["member-vnf-index"], vca
["vdu_id"], vca
["vdu_count_index"]
671 ] = vca
["application"]
672 return ns_config_info
674 async def _instantiate_ng_ro(
690 def get_vim_account(vim_account_id
):
692 if vim_account_id
in db_vims
:
693 return db_vims
[vim_account_id
]
694 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
695 db_vims
[vim_account_id
] = db_vim
698 # modify target_vld info with instantiation parameters
699 def parse_vld_instantiation_params(
700 target_vim
, target_vld
, vld_params
, target_sdn
702 if vld_params
.get("ip-profile"):
703 target_vld
["vim_info"][target_vim
]["ip_profile"] = vld_to_ro_ip_profile(
704 vld_params
["ip-profile"]
706 if vld_params
.get("provider-network"):
707 target_vld
["vim_info"][target_vim
]["provider_network"] = vld_params
[
710 if "sdn-ports" in vld_params
["provider-network"] and target_sdn
:
711 target_vld
["vim_info"][target_sdn
]["sdn-ports"] = vld_params
[
715 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
716 # if wim_account_id is specified in vld_params, validate if it is feasible.
717 wim_account_id
, db_wim
= select_feasible_wim_account(
718 db_nsr
, db_vnfrs
, target_vld
, vld_params
, self
.logger
722 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
723 self
.logger
.info("WIM selected: {:s}".format(str(wim_account_id
)))
724 # update vld_params with correct WIM account Id
725 vld_params
["wimAccountId"] = wim_account_id
727 target_wim
= "wim:{}".format(wim_account_id
)
728 target_wim_attrs
= get_target_wim_attrs(nsr_id
, target_vld
, vld_params
)
729 sdn_ports
= get_sdn_ports(vld_params
, db_wim
)
730 if len(sdn_ports
) > 0:
731 target_vld
["vim_info"][target_wim
] = target_wim_attrs
732 target_vld
["vim_info"][target_wim
]["sdn-ports"] = sdn_ports
735 "Target VLD with WIM data: {:s}".format(str(target_vld
))
738 for param
in ("vim-network-name", "vim-network-id"):
739 if vld_params
.get(param
):
740 if isinstance(vld_params
[param
], dict):
741 for vim
, vim_net
in vld_params
[param
].items():
742 other_target_vim
= "vim:" + vim
744 target_vld
["vim_info"],
745 (other_target_vim
, param
.replace("-", "_")),
748 else: # isinstance str
749 target_vld
["vim_info"][target_vim
][
750 param
.replace("-", "_")
751 ] = vld_params
[param
]
752 if vld_params
.get("common_id"):
753 target_vld
["common_id"] = vld_params
.get("common_id")
755 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
756 def update_ns_vld_target(target
, ns_params
):
757 for vnf_params
in ns_params
.get("vnf", ()):
758 if vnf_params
.get("vimAccountId"):
762 for vnfr
in db_vnfrs
.values()
763 if vnf_params
["member-vnf-index"]
764 == vnfr
["member-vnf-index-ref"]
768 vdur
= next((vdur
for vdur
in target_vnf
.get("vdur", ())), None)
771 for a_index
, a_vld
in enumerate(target
["ns"]["vld"]):
772 target_vld
= find_in_list(
773 get_iterable(vdur
, "interfaces"),
774 lambda iface
: iface
.get("ns-vld-id") == a_vld
["name"],
777 vld_params
= find_in_list(
778 get_iterable(ns_params
, "vld"),
779 lambda v_vld
: v_vld
["name"] in (a_vld
["name"], a_vld
["id"]),
782 if vnf_params
.get("vimAccountId") not in a_vld
.get(
785 target_vim_network_list
= [
786 v
for _
, v
in a_vld
.get("vim_info").items()
788 target_vim_network_name
= next(
790 item
.get("vim_network_name", "")
791 for item
in target_vim_network_list
796 target
["ns"]["vld"][a_index
].get("vim_info").update(
798 "vim:{}".format(vnf_params
["vimAccountId"]): {
799 "vim_network_name": target_vim_network_name
,
805 for param
in ("vim-network-name", "vim-network-id"):
806 if vld_params
.get(param
) and isinstance(
807 vld_params
[param
], dict
809 for vim
, vim_net
in vld_params
[
812 other_target_vim
= "vim:" + vim
814 target
["ns"]["vld"][a_index
].get(
819 param
.replace("-", "_"),
824 nslcmop_id
= db_nslcmop
["_id"]
826 "name": db_nsr
["name"],
829 "image": deepcopy(db_nsr
["image"]),
830 "flavor": deepcopy(db_nsr
["flavor"]),
831 "action_id": nslcmop_id
,
832 "cloud_init_content": {},
834 for image
in target
["image"]:
835 image
["vim_info"] = {}
836 for flavor
in target
["flavor"]:
837 flavor
["vim_info"] = {}
838 if db_nsr
.get("shared-volumes"):
839 target
["shared-volumes"] = deepcopy(db_nsr
["shared-volumes"])
840 for shared_volumes
in target
["shared-volumes"]:
841 shared_volumes
["vim_info"] = {}
842 if db_nsr
.get("affinity-or-anti-affinity-group"):
843 target
["affinity-or-anti-affinity-group"] = deepcopy(
844 db_nsr
["affinity-or-anti-affinity-group"]
846 for affinity_or_anti_affinity_group
in target
[
847 "affinity-or-anti-affinity-group"
849 affinity_or_anti_affinity_group
["vim_info"] = {}
851 if db_nslcmop
.get("lcmOperationType") != "instantiate":
852 # get parameters of instantiation:
853 db_nslcmop_instantiate
= self
.db
.get_list(
856 "nsInstanceId": db_nslcmop
["nsInstanceId"],
857 "lcmOperationType": "instantiate",
860 ns_params
= db_nslcmop_instantiate
.get("operationParams")
862 ns_params
= db_nslcmop
.get("operationParams")
863 ssh_keys_instantiation
= ns_params
.get("ssh_keys") or []
864 ssh_keys_all
= ssh_keys_instantiation
+ (n2vc_key_list
or [])
867 for vld_index
, vld
in enumerate(db_nsr
.get("vld")):
868 target_vim
= "vim:{}".format(ns_params
["vimAccountId"])
872 "mgmt-network": vld
.get("mgmt-network", False),
873 "type": vld
.get("type"),
876 "vim_network_name": vld
.get("vim-network-name"),
877 "vim_account_id": ns_params
["vimAccountId"],
881 # check if this network needs SDN assist
882 if vld
.get("pci-interfaces"):
883 db_vim
= get_vim_account(ns_params
["vimAccountId"])
884 if vim_config
:= db_vim
.get("config"):
885 if sdnc_id
:= vim_config
.get("sdn-controller"):
886 sdn_vld
= "nsrs:{}:vld.{}".format(nsr_id
, vld
["id"])
887 target_sdn
= "sdn:{}".format(sdnc_id
)
888 target_vld
["vim_info"][target_sdn
] = {
890 "target_vim": target_vim
,
892 "type": vld
.get("type"),
895 nsd_vnf_profiles
= get_vnf_profiles(nsd
)
896 for nsd_vnf_profile
in nsd_vnf_profiles
:
897 for cp
in nsd_vnf_profile
["virtual-link-connectivity"]:
898 if cp
["virtual-link-profile-id"] == vld
["id"]:
900 "member_vnf:{}.{}".format(
901 cp
["constituent-cpd-id"][0][
902 "constituent-base-element-id"
904 cp
["constituent-cpd-id"][0]["constituent-cpd-id"],
906 ] = "nsrs:{}:vld.{}".format(nsr_id
, vld_index
)
908 # check at nsd descriptor, if there is an ip-profile
910 nsd_vlp
= find_in_list(
911 get_virtual_link_profiles(nsd
),
912 lambda a_link_profile
: a_link_profile
["virtual-link-desc-id"]
917 and nsd_vlp
.get("virtual-link-protocol-data")
918 and nsd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
920 vld_params
["ip-profile"] = nsd_vlp
["virtual-link-protocol-data"][
924 # update vld_params with instantiation params
925 vld_instantiation_params
= find_in_list(
926 get_iterable(ns_params
, "vld"),
927 lambda a_vld
: a_vld
["name"] in (vld
["name"], vld
["id"]),
929 if vld_instantiation_params
:
930 vld_params
.update(vld_instantiation_params
)
931 parse_vld_instantiation_params(target_vim
, target_vld
, vld_params
, None)
932 target
["ns"]["vld"].append(target_vld
)
933 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
934 update_ns_vld_target(target
, ns_params
)
936 for vnfr
in db_vnfrs
.values():
938 db_vnfds
, lambda db_vnf
: db_vnf
["id"] == vnfr
["vnfd-ref"]
940 vnf_params
= find_in_list(
941 get_iterable(ns_params
, "vnf"),
942 lambda a_vnf
: a_vnf
["member-vnf-index"] == vnfr
["member-vnf-index-ref"],
944 target_vnf
= deepcopy(vnfr
)
945 target_vim
= "vim:{}".format(vnfr
["vim-account-id"])
946 for vld
in target_vnf
.get("vld", ()):
947 # check if connected to a ns.vld, to fill target'
948 vnf_cp
= find_in_list(
949 vnfd
.get("int-virtual-link-desc", ()),
950 lambda cpd
: cpd
.get("id") == vld
["id"],
953 ns_cp
= "member_vnf:{}.{}".format(
954 vnfr
["member-vnf-index-ref"], vnf_cp
["id"]
956 if cp2target
.get(ns_cp
):
957 vld
["target"] = cp2target
[ns_cp
]
960 target_vim
: {"vim_network_name": vld
.get("vim-network-name")}
962 # check if this network needs SDN assist
964 if vld
.get("pci-interfaces"):
965 db_vim
= get_vim_account(vnfr
["vim-account-id"])
966 sdnc_id
= db_vim
["config"].get("sdn-controller")
968 sdn_vld
= "vnfrs:{}:vld.{}".format(target_vnf
["_id"], vld
["id"])
969 target_sdn
= "sdn:{}".format(sdnc_id
)
970 vld
["vim_info"][target_sdn
] = {
972 "target_vim": target_vim
,
974 "type": vld
.get("type"),
977 # check at vnfd descriptor, if there is an ip-profile
979 vnfd_vlp
= find_in_list(
980 get_virtual_link_profiles(vnfd
),
981 lambda a_link_profile
: a_link_profile
["id"] == vld
["id"],
985 and vnfd_vlp
.get("virtual-link-protocol-data")
986 and vnfd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
988 vld_params
["ip-profile"] = vnfd_vlp
["virtual-link-protocol-data"][
991 # update vld_params with instantiation params
993 vld_instantiation_params
= find_in_list(
994 get_iterable(vnf_params
, "internal-vld"),
995 lambda i_vld
: i_vld
["name"] == vld
["id"],
997 if vld_instantiation_params
:
998 vld_params
.update(vld_instantiation_params
)
999 parse_vld_instantiation_params(target_vim
, vld
, vld_params
, target_sdn
)
1002 for vdur
in target_vnf
.get("vdur", ()):
1003 if vdur
.get("status") == "DELETING" or vdur
.get("pdu-type"):
1004 continue # This vdu must not be created
1005 vdur
["vim_info"] = {"vim_account_id": vnfr
["vim-account-id"]}
1007 self
.logger
.debug("NS > ssh_keys > {}".format(ssh_keys_all
))
1010 vdu_configuration
= get_configuration(vnfd
, vdur
["vdu-id-ref"])
1011 vnf_configuration
= get_configuration(vnfd
, vnfd
["id"])
1014 and vdu_configuration
.get("config-access")
1015 and vdu_configuration
.get("config-access").get("ssh-access")
1017 vdur
["ssh-keys"] = ssh_keys_all
1018 vdur
["ssh-access-required"] = vdu_configuration
[
1020 ]["ssh-access"]["required"]
1023 and vnf_configuration
.get("config-access")
1024 and vnf_configuration
.get("config-access").get("ssh-access")
1025 and any(iface
.get("mgmt-vnf") for iface
in vdur
["interfaces"])
1027 vdur
["ssh-keys"] = ssh_keys_all
1028 vdur
["ssh-access-required"] = vnf_configuration
[
1030 ]["ssh-access"]["required"]
1031 elif ssh_keys_instantiation
and find_in_list(
1032 vdur
["interfaces"], lambda iface
: iface
.get("mgmt-vnf")
1034 vdur
["ssh-keys"] = ssh_keys_instantiation
1036 self
.logger
.debug("NS > vdur > {}".format(vdur
))
1038 vdud
= get_vdu(vnfd
, vdur
["vdu-id-ref"])
1040 if vdud
.get("cloud-init-file"):
1041 vdur
["cloud-init"] = "{}:file:{}".format(
1042 vnfd
["_id"], vdud
.get("cloud-init-file")
1044 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1045 if vdur
["cloud-init"] not in target
["cloud_init_content"]:
1046 base_folder
= vnfd
["_admin"]["storage"]
1047 if base_folder
["pkg-dir"]:
1048 cloud_init_file
= "{}/{}/cloud_init/{}".format(
1049 base_folder
["folder"],
1050 base_folder
["pkg-dir"],
1051 vdud
.get("cloud-init-file"),
1054 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
1055 base_folder
["folder"],
1056 vdud
.get("cloud-init-file"),
1058 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
1059 target
["cloud_init_content"][
1062 elif vdud
.get("cloud-init"):
1063 vdur
["cloud-init"] = "{}:vdu:{}".format(
1064 vnfd
["_id"], get_vdu_index(vnfd
, vdur
["vdu-id-ref"])
1066 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1067 target
["cloud_init_content"][vdur
["cloud-init"]] = vdud
[
1070 vdur
["additionalParams"] = vdur
.get("additionalParams") or {}
1071 deploy_params_vdu
= self
._format
_additional
_params
(
1072 vdur
.get("additionalParams") or {}
1074 deploy_params_vdu
["OSM"] = get_osm_params(
1075 vnfr
, vdur
["vdu-id-ref"], vdur
["count-index"]
1077 vdur
["additionalParams"] = deploy_params_vdu
1080 ns_flavor
= target
["flavor"][int(vdur
["ns-flavor-id"])]
1081 if target_vim
not in ns_flavor
["vim_info"]:
1082 ns_flavor
["vim_info"][target_vim
] = {}
1085 # in case alternative images are provided we must check if they should be applied
1086 # for the vim_type, modify the vim_type taking into account
1087 ns_image_id
= int(vdur
["ns-image-id"])
1088 if vdur
.get("alt-image-ids"):
1089 db_vim
= get_vim_account(vnfr
["vim-account-id"])
1090 vim_type
= db_vim
["vim_type"]
1091 for alt_image_id
in vdur
.get("alt-image-ids"):
1092 ns_alt_image
= target
["image"][int(alt_image_id
)]
1093 if vim_type
== ns_alt_image
.get("vim-type"):
1094 # must use alternative image
1096 "use alternative image id: {}".format(alt_image_id
)
1098 ns_image_id
= alt_image_id
1099 vdur
["ns-image-id"] = ns_image_id
1101 ns_image
= target
["image"][int(ns_image_id
)]
1102 if target_vim
not in ns_image
["vim_info"]:
1103 ns_image
["vim_info"][target_vim
] = {}
1106 if vdur
.get("affinity-or-anti-affinity-group-id"):
1107 for ags_id
in vdur
["affinity-or-anti-affinity-group-id"]:
1108 ns_ags
= target
["affinity-or-anti-affinity-group"][int(ags_id
)]
1109 if target_vim
not in ns_ags
["vim_info"]:
1110 ns_ags
["vim_info"][target_vim
] = {}
1113 if vdur
.get("shared-volumes-id"):
1114 for sv_id
in vdur
["shared-volumes-id"]:
1115 ns_sv
= find_in_list(
1116 target
["shared-volumes"], lambda sv
: sv_id
in sv
["id"]
1119 ns_sv
["vim_info"][target_vim
] = {}
1121 vdur
["vim_info"] = {target_vim
: {}}
1122 # instantiation parameters
1124 vdu_instantiation_params
= find_in_list(
1125 get_iterable(vnf_params
, "vdu"),
1126 lambda i_vdu
: i_vdu
["id"] == vdud
["id"],
1128 if vdu_instantiation_params
:
1129 # Parse the vdu_volumes from the instantiation params
1130 vdu_volumes
= get_volumes_from_instantiation_params(
1131 vdu_instantiation_params
, vdud
1133 vdur
["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1134 vdur
["additionalParams"]["OSM"][
1136 ] = vdu_instantiation_params
.get("vim-flavor-id")
1137 vdur_list
.append(vdur
)
1138 target_vnf
["vdur"] = vdur_list
1139 target
["vnf"].append(target_vnf
)
1141 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
1142 desc
= await self
.RO
.deploy(nsr_id
, target
)
1143 self
.logger
.debug("RO return > {}".format(desc
))
1144 action_id
= desc
["action_id"]
1145 await self
._wait
_ng
_ro
(
1152 operation
="instantiation",
1157 "_admin.deployed.RO.operational-status": "running",
1158 "detailed-status": " ".join(stage
),
1160 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1161 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1162 self
._write
_op
_status
(nslcmop_id
, stage
)
1164 logging_text
+ "ns deployed at RO. RO_id={}".format(action_id
)
1168 async def _wait_ng_ro(
1178 detailed_status_old
= None
1180 start_time
= start_time
or time()
1181 while time() <= start_time
+ timeout
:
1182 desc_status
= await self
.op_status_map
[operation
](nsr_id
, action_id
)
1183 self
.logger
.debug("Wait NG RO > {}".format(desc_status
))
1184 if desc_status
["status"] == "FAILED":
1185 raise NgRoException(desc_status
["details"])
1186 elif desc_status
["status"] == "BUILD":
1188 stage
[2] = "VIM: ({})".format(desc_status
["details"])
1189 elif desc_status
["status"] == "DONE":
1191 stage
[2] = "Deployed at VIM"
1194 assert False, "ROclient.check_ns_status returns unknown {}".format(
1195 desc_status
["status"]
1197 if stage
and nslcmop_id
and stage
[2] != detailed_status_old
:
1198 detailed_status_old
= stage
[2]
1199 db_nsr_update
["detailed-status"] = " ".join(stage
)
1200 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1201 self
._write
_op
_status
(nslcmop_id
, stage
)
1202 await asyncio
.sleep(15)
1203 else: # timeout_ns_deploy
1204 raise NgRoException("Timeout waiting ns to deploy")
1206 async def _terminate_ng_ro(
1207 self
, logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
1212 start_deploy
= time()
1219 "action_id": nslcmop_id
,
1221 desc
= await self
.RO
.deploy(nsr_id
, target
)
1222 action_id
= desc
["action_id"]
1223 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETING"
1226 + "ns terminate action at RO. action_id={}".format(action_id
)
1230 delete_timeout
= 20 * 60 # 20 minutes
1231 await self
._wait
_ng
_ro
(
1238 operation
="termination",
1240 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1242 await self
.RO
.delete(nsr_id
)
1243 except NgRoException
as e
:
1244 if e
.http_code
== 404: # not found
1245 db_nsr_update
["_admin.deployed.RO.nsr_id"] = None
1246 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1248 logging_text
+ "RO_action_id={} already deleted".format(action_id
)
1250 elif e
.http_code
== 409: # conflict
1251 failed_detail
.append("delete conflict: {}".format(e
))
1254 + "RO_action_id={} delete conflict: {}".format(action_id
, e
)
1257 failed_detail
.append("delete error: {}".format(e
))
1260 + "RO_action_id={} delete error: {}".format(action_id
, e
)
1262 except Exception as e
:
1263 failed_detail
.append("delete error: {}".format(e
))
1265 logging_text
+ "RO_action_id={} delete error: {}".format(action_id
, e
)
1269 stage
[2] = "Error deleting from VIM"
1271 stage
[2] = "Deleted from VIM"
1272 db_nsr_update
["detailed-status"] = " ".join(stage
)
1273 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1274 self
._write
_op
_status
(nslcmop_id
, stage
)
1277 raise LcmException("; ".join(failed_detail
))
1280 async def instantiate_RO(
1294 :param logging_text: preffix text to use at logging
1295 :param nsr_id: nsr identity
1296 :param nsd: database content of ns descriptor
1297 :param db_nsr: database content of ns record
1298 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1300 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1301 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1302 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1303 :return: None or exception
1306 start_deploy
= time()
1307 ns_params
= db_nslcmop
.get("operationParams")
1308 if ns_params
and ns_params
.get("timeout_ns_deploy"):
1309 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
1311 timeout_ns_deploy
= self
.timeout
.ns_deploy
1313 # Check for and optionally request placement optimization. Database will be updated if placement activated
1314 stage
[2] = "Waiting for Placement."
1315 if await self
._do
_placement
(logging_text
, db_nslcmop
, db_vnfrs
):
1316 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1317 for vnfr
in db_vnfrs
.values():
1318 if ns_params
["vimAccountId"] == vnfr
["vim-account-id"]:
1321 ns_params
["vimAccountId"] == vnfr
["vim-account-id"]
1323 return await self
._instantiate
_ng
_ro
(
1336 except Exception as e
:
1337 stage
[2] = "ERROR deploying at VIM"
1338 self
.set_vnfr_at_error(db_vnfrs
, str(e
))
1340 "Error deploying at VIM {}".format(e
),
1341 exc_info
=not isinstance(
1344 ROclient
.ROClientException
,
1353 async def wait_kdu_up(self
, logging_text
, nsr_id
, vnfr_id
, kdu_name
):
1355 Wait for kdu to be up, get ip address
1356 :param logging_text: prefix use for logging
1360 :return: IP address, K8s services
1363 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1366 while nb_tries
< 360:
1367 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1371 for x
in get_iterable(db_vnfr
, "kdur")
1372 if x
.get("kdu-name") == kdu_name
1378 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id
, kdu_name
)
1380 if kdur
.get("status"):
1381 if kdur
["status"] in ("READY", "ENABLED"):
1382 return kdur
.get("ip-address"), kdur
.get("services")
1385 "target KDU={} is in error state".format(kdu_name
)
1388 await asyncio
.sleep(10)
1390 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name
))
1392 async def wait_vm_up_insert_key_ro(
1393 self
, logging_text
, nsr_id
, vnfr_id
, vdu_id
, vdu_index
, pub_key
=None, user
=None
1396 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1397 :param logging_text: prefix use for logging
1402 :param pub_key: public ssh key to inject, None to skip
1403 :param user: user to apply the public ssh key
1407 self
.logger
.debug(logging_text
+ "Starting wait_vm_up_insert_key_ro")
1409 target_vdu_id
= None
1414 if ro_retries
>= 360: # 1 hour
1416 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id
)
1419 await asyncio
.sleep(10)
1422 if not target_vdu_id
:
1423 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1425 if not vdu_id
: # for the VNF case
1426 if db_vnfr
.get("status") == "ERROR":
1428 "Cannot inject ssh-key because target VNF is in error state"
1430 ip_address
= db_vnfr
.get("ip-address")
1436 for x
in get_iterable(db_vnfr
, "vdur")
1437 if x
.get("ip-address") == ip_address
1445 for x
in get_iterable(db_vnfr
, "vdur")
1446 if x
.get("vdu-id-ref") == vdu_id
1447 and x
.get("count-index") == vdu_index
1453 not vdur
and len(db_vnfr
.get("vdur", ())) == 1
1454 ): # If only one, this should be the target vdu
1455 vdur
= db_vnfr
["vdur"][0]
1458 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1459 vnfr_id
, vdu_id
, vdu_index
1462 # New generation RO stores information at "vim_info"
1465 if vdur
.get("vim_info"):
1467 t
for t
in vdur
["vim_info"]
1468 ) # there should be only one key
1469 ng_ro_status
= vdur
["vim_info"][target_vim
].get("vim_status")
1471 vdur
.get("pdu-type")
1472 or vdur
.get("status") == "ACTIVE"
1473 or ng_ro_status
== "ACTIVE"
1475 ip_address
= vdur
.get("ip-address")
1478 target_vdu_id
= vdur
["vdu-id-ref"]
1479 elif vdur
.get("status") == "ERROR" or ng_ro_status
== "ERROR":
1481 "Cannot inject ssh-key because target VM is in error state"
1484 if not target_vdu_id
:
1487 # inject public key into machine
1488 if pub_key
and user
:
1489 self
.logger
.debug(logging_text
+ "Inserting RO key")
1490 self
.logger
.debug("SSH > PubKey > {}".format(pub_key
))
1491 if vdur
.get("pdu-type"):
1492 self
.logger
.error(logging_text
+ "Cannot inject ssh-ky to a PDU")
1497 "action": "inject_ssh_key",
1501 "vnf": [{"_id": vnfr_id
, "vdur": [{"id": vdur
["id"]}]}],
1503 desc
= await self
.RO
.deploy(nsr_id
, target
)
1504 action_id
= desc
["action_id"]
1505 await self
._wait
_ng
_ro
(
1506 nsr_id
, action_id
, timeout
=600, operation
="instantiation"
1509 except NgRoException
as e
:
1511 "Reaching max tries injecting key. Error: {}".format(e
)
1518 async def _wait_dependent_n2vc(self
, nsr_id
, vca_deployed_list
, vca_index
):
1520 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1522 my_vca
= vca_deployed_list
[vca_index
]
1523 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
1524 # vdu or kdu: no dependencies
1528 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
1529 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1530 configuration_status_list
= db_nsr
["configurationStatus"]
1531 for index
, vca_deployed
in enumerate(configuration_status_list
):
1532 if index
== vca_index
:
1535 if not my_vca
.get("member-vnf-index") or (
1536 vca_deployed
.get("member-vnf-index")
1537 == my_vca
.get("member-vnf-index")
1539 internal_status
= configuration_status_list
[index
].get("status")
1540 if internal_status
== "READY":
1542 elif internal_status
== "BROKEN":
1544 "Configuration aborted because dependent charm/s has failed"
1549 # no dependencies, return
1551 await asyncio
.sleep(10)
1554 raise LcmException("Configuration aborted because dependent charm/s timeout")
1556 def get_vca_id(self
, db_vnfr
: dict, db_nsr
: dict):
1559 vca_id
= deep_get(db_vnfr
, ("vca-id",))
1561 vim_account_id
= deep_get(db_nsr
, ("instantiate_params", "vimAccountId"))
1562 vca_id
= VimAccountDB
.get_vim_account_with_id(vim_account_id
).get("vca")
1565 async def instantiate_N2VC(
1583 ee_config_descriptor
,
1585 nsr_id
= db_nsr
["_id"]
1586 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
1587 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1588 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
1589 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
1591 "collection": "nsrs",
1592 "filter": {"_id": nsr_id
},
1593 "path": db_update_entry
,
1598 element_under_configuration
= nsr_id
1602 vnfr_id
= db_vnfr
["_id"]
1603 osm_config
["osm"]["vnf_id"] = vnfr_id
1605 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
1607 if vca_type
== "native_charm":
1610 index_number
= vdu_index
or 0
1613 element_type
= "VNF"
1614 element_under_configuration
= vnfr_id
1615 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
1617 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
1618 element_type
= "VDU"
1619 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
1620 osm_config
["osm"]["vdu_id"] = vdu_id
1622 namespace
+= ".{}".format(kdu_name
)
1623 element_type
= "KDU"
1624 element_under_configuration
= kdu_name
1625 osm_config
["osm"]["kdu_name"] = kdu_name
1628 if base_folder
["pkg-dir"]:
1629 artifact_path
= "{}/{}/{}/{}".format(
1630 base_folder
["folder"],
1631 base_folder
["pkg-dir"],
1634 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1639 artifact_path
= "{}/Scripts/{}/{}/".format(
1640 base_folder
["folder"],
1643 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1648 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
1650 # get initial_config_primitive_list that applies to this element
1651 initial_config_primitive_list
= config_descriptor
.get(
1652 "initial-config-primitive"
1656 "Initial config primitive list > {}".format(
1657 initial_config_primitive_list
1661 # add config if not present for NS charm
1662 ee_descriptor_id
= ee_config_descriptor
.get("id")
1663 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
1664 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
1665 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
1669 "Initial config primitive list #2 > {}".format(
1670 initial_config_primitive_list
1673 # n2vc_redesign STEP 3.1
1674 # find old ee_id if exists
1675 ee_id
= vca_deployed
.get("ee_id")
1677 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
1678 # create or register execution environment in VCA
1679 if vca_type
in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
1680 self
._write
_configuration
_status
(
1682 vca_index
=vca_index
,
1684 element_under_configuration
=element_under_configuration
,
1685 element_type
=element_type
,
1688 step
= "create execution environment"
1689 self
.logger
.debug(logging_text
+ step
)
1693 if vca_type
== "k8s_proxy_charm":
1694 ee_id
= await self
.vca_map
[vca_type
].install_k8s_proxy_charm(
1695 charm_name
=artifact_path
[artifact_path
.rfind("/") + 1 :],
1696 namespace
=namespace
,
1697 artifact_path
=artifact_path
,
1701 elif vca_type
== "helm-v3":
1702 ee_id
, credentials
= await self
.vca_map
[
1704 ].create_execution_environment(
1709 artifact_path
=artifact_path
,
1710 chart_model
=vca_name
,
1714 ee_id
, credentials
= await self
.vca_map
[
1716 ].create_execution_environment(
1717 namespace
=namespace
,
1723 elif vca_type
== "native_charm":
1724 step
= "Waiting to VM being up and getting IP address"
1725 self
.logger
.debug(logging_text
+ step
)
1726 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
1735 credentials
= {"hostname": rw_mgmt_ip
}
1737 username
= deep_get(
1738 config_descriptor
, ("config-access", "ssh-access", "default-user")
1740 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1741 # merged. Meanwhile let's get username from initial-config-primitive
1742 if not username
and initial_config_primitive_list
:
1743 for config_primitive
in initial_config_primitive_list
:
1744 for param
in config_primitive
.get("parameter", ()):
1745 if param
["name"] == "ssh-username":
1746 username
= param
["value"]
1750 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1751 "'config-access.ssh-access.default-user'"
1753 credentials
["username"] = username
1754 # n2vc_redesign STEP 3.2
1756 self
._write
_configuration
_status
(
1758 vca_index
=vca_index
,
1759 status
="REGISTERING",
1760 element_under_configuration
=element_under_configuration
,
1761 element_type
=element_type
,
1764 step
= "register execution environment {}".format(credentials
)
1765 self
.logger
.debug(logging_text
+ step
)
1766 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
1767 credentials
=credentials
,
1768 namespace
=namespace
,
1773 # for compatibility with MON/POL modules, the need model and application name at database
1774 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1775 ee_id_parts
= ee_id
.split(".")
1776 db_nsr_update
= {db_update_entry
+ "ee_id": ee_id
}
1777 if len(ee_id_parts
) >= 2:
1778 model_name
= ee_id_parts
[0]
1779 application_name
= ee_id_parts
[1]
1780 db_nsr_update
[db_update_entry
+ "model"] = model_name
1781 db_nsr_update
[db_update_entry
+ "application"] = application_name
1783 # n2vc_redesign STEP 3.3
1784 step
= "Install configuration Software"
1786 self
._write
_configuration
_status
(
1788 vca_index
=vca_index
,
1789 status
="INSTALLING SW",
1790 element_under_configuration
=element_under_configuration
,
1791 element_type
=element_type
,
1792 other_update
=db_nsr_update
,
1795 # TODO check if already done
1796 self
.logger
.debug(logging_text
+ step
)
1798 if vca_type
== "native_charm":
1799 config_primitive
= next(
1800 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
1803 if config_primitive
:
1804 config
= self
._map
_primitive
_params
(
1805 config_primitive
, {}, deploy_params
1808 if vca_type
== "lxc_proxy_charm":
1809 if element_type
== "NS":
1810 num_units
= db_nsr
.get("config-units") or 1
1811 elif element_type
== "VNF":
1812 num_units
= db_vnfr
.get("config-units") or 1
1813 elif element_type
== "VDU":
1814 for v
in db_vnfr
["vdur"]:
1815 if vdu_id
== v
["vdu-id-ref"]:
1816 num_units
= v
.get("config-units") or 1
1818 if vca_type
!= "k8s_proxy_charm":
1819 await self
.vca_map
[vca_type
].install_configuration_sw(
1821 artifact_path
=artifact_path
,
1824 num_units
=num_units
,
1829 # write in db flag of configuration_sw already installed
1831 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
1834 # add relations for this VCA (wait for other peers related with this VCA)
1835 is_relation_added
= await self
._add
_vca
_relations
(
1836 logging_text
=logging_text
,
1839 vca_index
=vca_index
,
1842 if not is_relation_added
:
1843 raise LcmException("Relations could not be added to VCA.")
1845 # if SSH access is required, then get execution environment SSH public
1846 # if native charm we have waited already to VM be UP
1847 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
1850 # self.logger.debug("get ssh key block")
1852 config_descriptor
, ("config-access", "ssh-access", "required")
1854 # self.logger.debug("ssh key needed")
1855 # Needed to inject a ssh key
1858 ("config-access", "ssh-access", "default-user"),
1860 step
= "Install configuration Software, getting public ssh key"
1861 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
1862 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
1865 step
= "Insert public key into VM user={} ssh_key={}".format(
1869 # self.logger.debug("no need to get ssh key")
1870 step
= "Waiting to VM being up and getting IP address"
1871 self
.logger
.debug(logging_text
+ step
)
1873 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1876 # n2vc_redesign STEP 5.1
1877 # wait for RO (ip-address) Insert pub_key into VM
1880 rw_mgmt_ip
, services
= await self
.wait_kdu_up(
1881 logging_text
, nsr_id
, vnfr_id
, kdu_name
1883 vnfd
= self
.db
.get_one(
1885 {"_id": f
'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1887 kdu
= get_kdu(vnfd
, kdu_name
)
1889 service
["name"] for service
in get_kdu_services(kdu
)
1891 exposed_services
= []
1892 for service
in services
:
1893 if any(s
in service
["name"] for s
in kdu_services
):
1894 exposed_services
.append(service
)
1895 await self
.vca_map
[vca_type
].exec_primitive(
1897 primitive_name
="config",
1899 "osm-config": json
.dumps(
1901 k8s
={"services": exposed_services
}
1908 # This verification is needed in order to avoid trying to add a public key
1909 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1910 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1911 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1913 elif db_vnfr
.get("vdur"):
1914 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
1924 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
1926 # store rw_mgmt_ip in deploy params for later replacement
1927 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
1929 # n2vc_redesign STEP 6 Execute initial config primitive
1930 step
= "execute initial config primitive"
1932 # wait for dependent primitives execution (NS -> VNF -> VDU)
1933 if initial_config_primitive_list
:
1934 await self
._wait
_dependent
_n
2vc
(nsr_id
, vca_deployed_list
, vca_index
)
1936 # stage, in function of element type: vdu, kdu, vnf or ns
1937 my_vca
= vca_deployed_list
[vca_index
]
1938 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
1940 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
1941 elif my_vca
.get("member-vnf-index"):
1943 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
1946 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
1948 self
._write
_configuration
_status
(
1949 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
1952 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
1954 check_if_terminated_needed
= True
1955 for initial_config_primitive
in initial_config_primitive_list
:
1956 # adding information on the vca_deployed if it is a NS execution environment
1957 if not vca_deployed
["member-vnf-index"]:
1958 deploy_params
["ns_config_info"] = json
.dumps(
1959 self
._get
_ns
_config
_info
(nsr_id
)
1961 # TODO check if already done
1962 primitive_params_
= self
._map
_primitive
_params
(
1963 initial_config_primitive
, {}, deploy_params
1966 step
= "execute primitive '{}' params '{}'".format(
1967 initial_config_primitive
["name"], primitive_params_
1969 self
.logger
.debug(logging_text
+ step
)
1970 await self
.vca_map
[vca_type
].exec_primitive(
1972 primitive_name
=initial_config_primitive
["name"],
1973 params_dict
=primitive_params_
,
1978 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1979 if check_if_terminated_needed
:
1980 if config_descriptor
.get("terminate-config-primitive"):
1982 "nsrs", nsr_id
, {db_update_entry
+ "needed_terminate": True}
1984 check_if_terminated_needed
= False
1986 # TODO register in database that primitive is done
1988 # STEP 7 Configure metrics
1989 if vca_type
== "helm-v3":
1990 # TODO: review for those cases where the helm chart is a reference and
1991 # is not part of the NF package
1992 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
1994 artifact_path
=artifact_path
,
1995 ee_config_descriptor
=ee_config_descriptor
,
1998 target_ip
=rw_mgmt_ip
,
1999 element_type
=element_type
,
2000 vnf_member_index
=db_vnfr
.get("member-vnf-index-ref", ""),
2002 vdu_index
=vdu_index
,
2004 kdu_index
=kdu_index
,
2010 {db_update_entry
+ "prometheus_jobs": prometheus_jobs
},
2013 for job
in prometheus_jobs
:
2016 {"job_name": job
["job_name"]},
2019 fail_on_empty
=False,
2022 step
= "instantiated at VCA"
2023 self
.logger
.debug(logging_text
+ step
)
2025 self
._write
_configuration
_status
(
2026 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
2029 except Exception as e
: # TODO not use Exception but N2VC exception
2030 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2032 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
2035 "Exception while {} : {}".format(step
, e
), exc_info
=True
2037 self
._write
_configuration
_status
(
2038 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
2040 raise LcmException("{}. {}".format(step
, e
)) from e
2042 def _write_ns_status(
2046 current_operation
: str,
2047 current_operation_id
: str,
2048 error_description
: str = None,
2049 error_detail
: str = None,
2050 other_update
: dict = None,
2053 Update db_nsr fields.
2056 :param current_operation:
2057 :param current_operation_id:
2058 :param error_description:
2059 :param error_detail:
2060 :param other_update: Other required changes at database if provided, will be cleared
2064 db_dict
= other_update
or {}
2067 ] = current_operation_id
# for backward compatibility
2068 db_dict
["_admin.current-operation"] = current_operation_id
2069 db_dict
["_admin.operation-type"] = (
2070 current_operation
if current_operation
!= "IDLE" else None
2072 db_dict
["currentOperation"] = current_operation
2073 db_dict
["currentOperationID"] = current_operation_id
2074 db_dict
["errorDescription"] = error_description
2075 db_dict
["errorDetail"] = error_detail
2078 db_dict
["nsState"] = ns_state
2079 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2080 except DbException
as e
:
2081 self
.logger
.warn("Error writing NS status, ns={}: {}".format(nsr_id
, e
))
2083 def _write_op_status(
2087 error_message
: str = None,
2088 queuePosition
: int = 0,
2089 operation_state
: str = None,
2090 other_update
: dict = None,
2093 db_dict
= other_update
or {}
2094 db_dict
["queuePosition"] = queuePosition
2095 if isinstance(stage
, list):
2096 db_dict
["stage"] = stage
[0]
2097 db_dict
["detailed-status"] = " ".join(stage
)
2098 elif stage
is not None:
2099 db_dict
["stage"] = str(stage
)
2101 if error_message
is not None:
2102 db_dict
["errorMessage"] = error_message
2103 if operation_state
is not None:
2104 db_dict
["operationState"] = operation_state
2105 db_dict
["statusEnteredTime"] = time()
2106 self
.update_db_2("nslcmops", op_id
, db_dict
)
2107 except DbException
as e
:
2109 "Error writing OPERATION status for op_id: {} -> {}".format(op_id
, e
)
2112 def _write_all_config_status(self
, db_nsr
: dict, status
: str):
2114 nsr_id
= db_nsr
["_id"]
2115 # configurationStatus
2116 config_status
= db_nsr
.get("configurationStatus")
2119 "configurationStatus.{}.status".format(index
): status
2120 for index
, v
in enumerate(config_status
)
2124 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2126 except DbException
as e
:
2128 "Error writing all configuration status, ns={}: {}".format(nsr_id
, e
)
2131 def _write_configuration_status(
2136 element_under_configuration
: str = None,
2137 element_type
: str = None,
2138 other_update
: dict = None,
2140 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2141 # .format(vca_index, status))
2144 db_path
= "configurationStatus.{}.".format(vca_index
)
2145 db_dict
= other_update
or {}
2147 db_dict
[db_path
+ "status"] = status
2148 if element_under_configuration
:
2150 db_path
+ "elementUnderConfiguration"
2151 ] = element_under_configuration
2153 db_dict
[db_path
+ "elementType"] = element_type
2154 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2155 except DbException
as e
:
2157 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2158 status
, nsr_id
, vca_index
, e
2162 async def _do_placement(self
, logging_text
, db_nslcmop
, db_vnfrs
):
2164 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2165 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2166 Database is used because the result can be obtained from a different LCM worker in case of HA.
2167 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2168 :param db_nslcmop: database content of nslcmop
2169 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2170 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2171 computed 'vim-account-id'
2174 nslcmop_id
= db_nslcmop
["_id"]
2175 placement_engine
= deep_get(db_nslcmop
, ("operationParams", "placement-engine"))
2176 if placement_engine
== "PLA":
2178 logging_text
+ "Invoke and wait for placement optimization"
2180 await self
.msg
.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id
})
2181 db_poll_interval
= 5
2182 wait
= db_poll_interval
* 10
2184 while not pla_result
and wait
>= 0:
2185 await asyncio
.sleep(db_poll_interval
)
2186 wait
-= db_poll_interval
2187 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2188 pla_result
= deep_get(db_nslcmop
, ("_admin", "pla"))
2192 "Placement timeout for nslcmopId={}".format(nslcmop_id
)
2195 for pla_vnf
in pla_result
["vnf"]:
2196 vnfr
= db_vnfrs
.get(pla_vnf
["member-vnf-index"])
2197 if not pla_vnf
.get("vimAccountId") or not vnfr
:
2202 {"_id": vnfr
["_id"]},
2203 {"vim-account-id": pla_vnf
["vimAccountId"]},
2206 vnfr
["vim-account-id"] = pla_vnf
["vimAccountId"]
2209 def _gather_vnfr_healing_alerts(self
, vnfr
, vnfd
):
2211 nsr_id
= vnfr
["nsr-id-ref"]
2212 df
= vnfd
.get("df", [{}])[0]
2213 # Checking for auto-healing configuration
2214 if "healing-aspect" in df
:
2215 healing_aspects
= df
["healing-aspect"]
2216 for healing
in healing_aspects
:
2217 for healing_policy
in healing
.get("healing-policy", ()):
2218 vdu_id
= healing_policy
["vdu-id"]
2220 (vdur
for vdur
in vnfr
["vdur"] if vdu_id
== vdur
["vdu-id-ref"]),
2225 metric_name
= "vm_status"
2226 vdu_name
= vdur
.get("name")
2227 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2229 name
= f
"healing_{uuid}"
2230 action
= healing_policy
2231 # action_on_recovery = healing.get("action-on-recovery")
2232 # cooldown_time = healing.get("cooldown-time")
2233 # day1 = healing.get("day1")
2237 "metric": metric_name
,
2240 "vnf_member_index": vnf_member_index
,
2241 "vdu_name": vdu_name
,
2243 "alarm_status": "ok",
2244 "action_type": "healing",
2247 alerts
.append(alert
)
2250 def _gather_vnfr_scaling_alerts(self
, vnfr
, vnfd
):
2252 nsr_id
= vnfr
["nsr-id-ref"]
2253 df
= vnfd
.get("df", [{}])[0]
2254 # Checking for auto-scaling configuration
2255 if "scaling-aspect" in df
:
2256 scaling_aspects
= df
["scaling-aspect"]
2257 all_vnfd_monitoring_params
= {}
2258 for ivld
in vnfd
.get("int-virtual-link-desc", ()):
2259 for mp
in ivld
.get("monitoring-parameters", ()):
2260 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2261 for vdu
in vnfd
.get("vdu", ()):
2262 for mp
in vdu
.get("monitoring-parameter", ()):
2263 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2264 for df
in vnfd
.get("df", ()):
2265 for mp
in df
.get("monitoring-parameter", ()):
2266 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2267 for scaling_aspect
in scaling_aspects
:
2268 scaling_group_name
= scaling_aspect
.get("name", "")
2269 # Get monitored VDUs
2270 all_monitored_vdus
= set()
2271 for delta
in scaling_aspect
.get("aspect-delta-details", {}).get(
2274 for vdu_delta
in delta
.get("vdu-delta", ()):
2275 all_monitored_vdus
.add(vdu_delta
.get("id"))
2276 monitored_vdurs
= list(
2278 lambda vdur
: vdur
["vdu-id-ref"] in all_monitored_vdus
,
2282 if not monitored_vdurs
:
2284 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2287 for scaling_policy
in scaling_aspect
.get("scaling-policy", ()):
2288 if scaling_policy
["scaling-type"] != "automatic":
2290 threshold_time
= scaling_policy
.get("threshold-time", "1")
2291 cooldown_time
= scaling_policy
.get("cooldown-time", "0")
2292 for scaling_criteria
in scaling_policy
["scaling-criteria"]:
2293 monitoring_param_ref
= scaling_criteria
.get(
2294 "vnf-monitoring-param-ref"
2296 vnf_monitoring_param
= all_vnfd_monitoring_params
[
2297 monitoring_param_ref
2299 for vdur
in monitored_vdurs
:
2300 vdu_id
= vdur
["vdu-id-ref"]
2301 metric_name
= vnf_monitoring_param
.get("performance-metric")
2302 metric_name
= f
"osm_{metric_name}"
2303 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2304 scalein_threshold
= scaling_criteria
.get(
2305 "scale-in-threshold"
2307 scaleout_threshold
= scaling_criteria
.get(
2308 "scale-out-threshold"
2310 # Looking for min/max-number-of-instances
2311 instances_min_number
= 1
2312 instances_max_number
= 1
2313 vdu_profile
= df
["vdu-profile"]
2316 item
for item
in vdu_profile
if item
["id"] == vdu_id
2318 instances_min_number
= profile
.get(
2319 "min-number-of-instances", 1
2321 instances_max_number
= profile
.get(
2322 "max-number-of-instances", 1
2325 if scalein_threshold
:
2327 name
= f
"scalein_{uuid}"
2328 operation
= scaling_criteria
[
2329 "scale-in-relational-operation"
2331 rel_operator
= self
.rel_operation_types
.get(
2334 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2335 expression
= f
"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2338 "vnf_member_index": vnf_member_index
,
2344 "for": str(threshold_time
) + "m",
2347 action
= scaling_policy
2349 "scaling-group": scaling_group_name
,
2350 "cooldown-time": cooldown_time
,
2355 "metric": metric_name
,
2358 "vnf_member_index": vnf_member_index
,
2361 "alarm_status": "ok",
2362 "action_type": "scale_in",
2364 "prometheus_config": prom_cfg
,
2366 alerts
.append(alert
)
2368 if scaleout_threshold
:
2370 name
= f
"scaleout_{uuid}"
2371 operation
= scaling_criteria
[
2372 "scale-out-relational-operation"
2374 rel_operator
= self
.rel_operation_types
.get(
2377 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2378 expression
= f
"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2381 "vnf_member_index": vnf_member_index
,
2387 "for": str(threshold_time
) + "m",
2390 action
= scaling_policy
2392 "scaling-group": scaling_group_name
,
2393 "cooldown-time": cooldown_time
,
2398 "metric": metric_name
,
2401 "vnf_member_index": vnf_member_index
,
2404 "alarm_status": "ok",
2405 "action_type": "scale_out",
2407 "prometheus_config": prom_cfg
,
2409 alerts
.append(alert
)
2412 def _gather_vnfr_alarm_alerts(self
, vnfr
, vnfd
):
2414 nsr_id
= vnfr
["nsr-id-ref"]
2415 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2417 # Checking for VNF alarm configuration
2418 for vdur
in vnfr
["vdur"]:
2419 vdu_id
= vdur
["vdu-id-ref"]
2420 vdu
= next(filter(lambda vdu
: vdu
["id"] == vdu_id
, vnfd
["vdu"]))
2422 # Get VDU monitoring params, since alerts are based on them
2423 vdu_monitoring_params
= {}
2424 for mp
in vdu
.get("monitoring-parameter", []):
2425 vdu_monitoring_params
[mp
.get("id")] = mp
2426 if not vdu_monitoring_params
:
2428 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2431 # Get alarms in the VDU
2432 alarm_descriptors
= vdu
["alarm"]
2433 # Create VDU alarms for each alarm in the VDU
2434 for alarm_descriptor
in alarm_descriptors
:
2435 # Check that the VDU alarm refers to a proper monitoring param
2436 alarm_monitoring_param
= alarm_descriptor
.get(
2437 "vnf-monitoring-param-ref", ""
2439 vdu_specific_monitoring_param
= vdu_monitoring_params
.get(
2440 alarm_monitoring_param
, {}
2442 if not vdu_specific_monitoring_param
:
2444 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2447 metric_name
= vdu_specific_monitoring_param
.get(
2448 "performance-metric"
2452 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2455 # Set params of the alarm to be created in Prometheus
2456 metric_name
= f
"osm_{metric_name}"
2457 metric_threshold
= alarm_descriptor
.get("value")
2459 alert_name
= f
"vdu_alarm_{uuid}"
2460 operation
= alarm_descriptor
["operation"]
2461 rel_operator
= self
.rel_operation_types
.get(operation
, "<=")
2462 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2463 expression
= f
"{metric_selector} {rel_operator} {metric_threshold}"
2466 "vnf_member_index": vnf_member_index
,
2468 "vdu_name": "{{ $labels.vdu_name }}",
2471 "alert": alert_name
,
2473 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2476 alarm_action
= dict()
2477 for action_type
in ["ok", "insufficient-data", "alarm"]:
2479 "actions" in alarm_descriptor
2480 and action_type
in alarm_descriptor
["actions"]
2482 alarm_action
[action_type
] = alarm_descriptor
["actions"][
2488 "metric": metric_name
,
2491 "vnf_member_index": vnf_member_index
,
2494 "alarm_status": "ok",
2495 "action_type": "vdu_alarm",
2496 "action": alarm_action
,
2497 "prometheus_config": prom_cfg
,
2499 alerts
.append(alert
)
2502 def update_nsrs_with_pla_result(self
, params
):
2504 nslcmop_id
= deep_get(params
, ("placement", "nslcmopId"))
2506 "nslcmops", nslcmop_id
, {"_admin.pla": params
.get("placement")}
2508 except Exception as e
:
2509 self
.logger
.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id
, e
))
2511 async def instantiate(self
, nsr_id
, nslcmop_id
):
2514 :param nsr_id: ns instance to deploy
2515 :param nslcmop_id: operation to run
2519 # Try to lock HA task here
2520 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
2521 if not task_is_locked_by_me
:
2523 "instantiate() task is not locked by me, ns={}".format(nsr_id
)
2527 logging_text
= "Task ns={} instantiate={} ".format(nsr_id
, nslcmop_id
)
2528 self
.logger
.debug(logging_text
+ "Enter")
2530 # get all needed from database
2532 # database nsrs record
2535 # database nslcmops record
2538 # update operation on nsrs
2540 # update operation on nslcmops
2541 db_nslcmop_update
= {}
2543 timeout_ns_deploy
= self
.timeout
.ns_deploy
2545 nslcmop_operation_state
= None
2546 db_vnfrs
= {} # vnf's info indexed by member-index
2548 tasks_dict_info
= {} # from task to info text
2552 "Stage 1/5: preparation of the environment.",
2553 "Waiting for previous operations to terminate.",
2556 # ^ stage, step, VIM progress
2558 # wait for any previous tasks in process
2559 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
2561 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2562 stage
[1] = "Reading from database."
2563 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2564 db_nsr_update
["detailed-status"] = "creating"
2565 db_nsr_update
["operational-status"] = "init"
2566 self
._write
_ns
_status
(
2568 ns_state
="BUILDING",
2569 current_operation
="INSTANTIATING",
2570 current_operation_id
=nslcmop_id
,
2571 other_update
=db_nsr_update
,
2573 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
2575 # read from db: operation
2576 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
2577 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2578 if db_nslcmop
["operationParams"].get("additionalParamsForVnf"):
2579 db_nslcmop
["operationParams"]["additionalParamsForVnf"] = json
.loads(
2580 db_nslcmop
["operationParams"]["additionalParamsForVnf"]
2582 ns_params
= db_nslcmop
.get("operationParams")
2583 if ns_params
and ns_params
.get("timeout_ns_deploy"):
2584 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
2587 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
2588 self
.logger
.debug(logging_text
+ stage
[1])
2589 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
2590 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
2591 self
.logger
.debug(logging_text
+ stage
[1])
2592 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
2593 self
.fs
.sync(db_nsr
["nsd-id"])
2595 # nsr_name = db_nsr["name"] # TODO short-name??
2597 # read from db: vnf's of this ns
2598 stage
[1] = "Getting vnfrs from db."
2599 self
.logger
.debug(logging_text
+ stage
[1])
2600 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
2602 # read from db: vnfd's for every vnf
2603 db_vnfds
= [] # every vnfd data
2605 # for each vnf in ns, read vnfd
2606 for vnfr
in db_vnfrs_list
:
2607 if vnfr
.get("kdur"):
2609 for kdur
in vnfr
["kdur"]:
2610 if kdur
.get("additionalParams"):
2611 kdur
["additionalParams"] = json
.loads(
2612 kdur
["additionalParams"]
2614 kdur_list
.append(kdur
)
2615 vnfr
["kdur"] = kdur_list
2617 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
2618 vnfd_id
= vnfr
["vnfd-id"]
2619 vnfd_ref
= vnfr
["vnfd-ref"]
2620 self
.fs
.sync(vnfd_id
)
2622 # if we haven't this vnfd, read it from db
2623 if vnfd_id
not in db_vnfds
:
2625 stage
[1] = "Getting vnfd={} id='{}' from db.".format(
2628 self
.logger
.debug(logging_text
+ stage
[1])
2629 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
2632 db_vnfds
.append(vnfd
)
2634 # Get or generates the _admin.deployed.VCA list
2635 vca_deployed_list
= None
2636 if db_nsr
["_admin"].get("deployed"):
2637 vca_deployed_list
= db_nsr
["_admin"]["deployed"].get("VCA")
2638 if vca_deployed_list
is None:
2639 vca_deployed_list
= []
2640 configuration_status_list
= []
2641 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2642 db_nsr_update
["configurationStatus"] = configuration_status_list
2643 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2644 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2645 elif isinstance(vca_deployed_list
, dict):
2646 # maintain backward compatibility. Change a dict to list at database
2647 vca_deployed_list
= list(vca_deployed_list
.values())
2648 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2649 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2652 deep_get(db_nsr
, ("_admin", "deployed", "RO", "vnfd")), list
2654 populate_dict(db_nsr
, ("_admin", "deployed", "RO", "vnfd"), [])
2655 db_nsr_update
["_admin.deployed.RO.vnfd"] = []
2657 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2658 db_nsr_update
["_admin.nsState"] = "INSTANTIATED"
2659 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2661 "vnfrs", {"nsr-id-ref": nsr_id
}, {"_admin.nsState": "INSTANTIATED"}
2664 # n2vc_redesign STEP 2 Deploy Network Scenario
2665 stage
[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2666 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
2668 stage
[1] = "Deploying KDUs."
2669 # self.logger.debug(logging_text + "Before deploy_kdus")
2670 # Call to deploy_kdus in case exists the "vdu:kdu" param
2671 await self
.deploy_kdus(
2672 logging_text
=logging_text
,
2674 nslcmop_id
=nslcmop_id
,
2677 task_instantiation_info
=tasks_dict_info
,
2680 stage
[1] = "Getting VCA public key."
2681 # n2vc_redesign STEP 1 Get VCA public ssh-key
2682 # feature 1429. Add n2vc public key to needed VMs
2683 n2vc_key
= self
.n2vc
.get_public_key()
2684 n2vc_key_list
= [n2vc_key
]
2685 if self
.vca_config
.public_key
:
2686 n2vc_key_list
.append(self
.vca_config
.public_key
)
2688 stage
[1] = "Deploying NS at VIM."
2689 task_ro
= asyncio
.ensure_future(
2690 self
.instantiate_RO(
2691 logging_text
=logging_text
,
2695 db_nslcmop
=db_nslcmop
,
2698 n2vc_key_list
=n2vc_key_list
,
2702 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "instantiate_RO", task_ro
)
2703 tasks_dict_info
[task_ro
] = "Deploying at VIM"
2705 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2706 stage
[1] = "Deploying Execution Environments."
2707 self
.logger
.debug(logging_text
+ stage
[1])
2709 # create namespace and certificate if any helm based EE is present in the NS
2710 if check_helm_ee_in_ns(db_vnfds
):
2711 await self
.vca_map
["helm-v3"].setup_ns_namespace(
2714 # create TLS certificates
2715 await self
.vca_map
["helm-v3"].create_tls_certificate(
2716 secret_name
=self
.EE_TLS_NAME
,
2719 usage
="server auth",
2723 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
2724 for vnf_profile
in get_vnf_profiles(nsd
):
2725 vnfd_id
= vnf_profile
["vnfd-id"]
2726 vnfd
= find_in_list(db_vnfds
, lambda a_vnf
: a_vnf
["id"] == vnfd_id
)
2727 member_vnf_index
= str(vnf_profile
["id"])
2728 db_vnfr
= db_vnfrs
[member_vnf_index
]
2729 base_folder
= vnfd
["_admin"]["storage"]
2736 # Get additional parameters
2737 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
2738 if db_vnfr
.get("additionalParamsForVnf"):
2739 deploy_params
.update(
2740 parse_yaml_strings(db_vnfr
["additionalParamsForVnf"].copy())
2743 descriptor_config
= get_configuration(vnfd
, vnfd
["id"])
2744 if descriptor_config
:
2746 logging_text
=logging_text
2747 + "member_vnf_index={} ".format(member_vnf_index
),
2750 nslcmop_id
=nslcmop_id
,
2756 member_vnf_index
=member_vnf_index
,
2757 vdu_index
=vdu_index
,
2758 kdu_index
=kdu_index
,
2760 deploy_params
=deploy_params
,
2761 descriptor_config
=descriptor_config
,
2762 base_folder
=base_folder
,
2763 task_instantiation_info
=tasks_dict_info
,
2767 # Deploy charms for each VDU that supports one.
2768 for vdud
in get_vdu_list(vnfd
):
2770 descriptor_config
= get_configuration(vnfd
, vdu_id
)
2771 vdur
= find_in_list(
2772 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
2775 if vdur
.get("additionalParams"):
2776 deploy_params_vdu
= parse_yaml_strings(vdur
["additionalParams"])
2778 deploy_params_vdu
= deploy_params
2779 deploy_params_vdu
["OSM"] = get_osm_params(
2780 db_vnfr
, vdu_id
, vdu_count_index
=0
2782 vdud_count
= get_number_of_instances(vnfd
, vdu_id
)
2784 self
.logger
.debug("VDUD > {}".format(vdud
))
2786 "Descriptor config > {}".format(descriptor_config
)
2788 if descriptor_config
:
2792 for vdu_index
in range(vdud_count
):
2793 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2795 logging_text
=logging_text
2796 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2797 member_vnf_index
, vdu_id
, vdu_index
2801 nslcmop_id
=nslcmop_id
,
2807 kdu_index
=kdu_index
,
2808 member_vnf_index
=member_vnf_index
,
2809 vdu_index
=vdu_index
,
2811 deploy_params
=deploy_params_vdu
,
2812 descriptor_config
=descriptor_config
,
2813 base_folder
=base_folder
,
2814 task_instantiation_info
=tasks_dict_info
,
2817 for kdud
in get_kdu_list(vnfd
):
2818 kdu_name
= kdud
["name"]
2819 descriptor_config
= get_configuration(vnfd
, kdu_name
)
2820 if descriptor_config
:
2824 kdu_index
, kdur
= next(
2826 for x
in enumerate(db_vnfr
["kdur"])
2827 if x
[1]["kdu-name"] == kdu_name
2829 deploy_params_kdu
= {"OSM": get_osm_params(db_vnfr
)}
2830 if kdur
.get("additionalParams"):
2831 deploy_params_kdu
.update(
2832 parse_yaml_strings(kdur
["additionalParams"].copy())
2836 logging_text
=logging_text
,
2839 nslcmop_id
=nslcmop_id
,
2845 member_vnf_index
=member_vnf_index
,
2846 vdu_index
=vdu_index
,
2847 kdu_index
=kdu_index
,
2849 deploy_params
=deploy_params_kdu
,
2850 descriptor_config
=descriptor_config
,
2851 base_folder
=base_folder
,
2852 task_instantiation_info
=tasks_dict_info
,
2856 # Check if each vnf has exporter for metric collection if so update prometheus job records
2857 if "exporters-endpoints" in vnfd
.get("df")[0]:
2858 exporter_config
= vnfd
.get("df")[0].get("exporters-endpoints")
2859 self
.logger
.debug("exporter config :{}".format(exporter_config
))
2860 artifact_path
= "{}/{}/{}".format(
2861 base_folder
["folder"],
2862 base_folder
["pkg-dir"],
2863 "exporter-endpoint",
2866 ee_config_descriptor
= exporter_config
2867 vnfr_id
= db_vnfr
["id"]
2868 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
2877 self
.logger
.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip
))
2878 self
.logger
.debug("Artifact_path:{}".format(artifact_path
))
2879 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
2880 vdu_id_for_prom
= None
2881 vdu_index_for_prom
= None
2882 for x
in get_iterable(db_vnfr
, "vdur"):
2883 vdu_id_for_prom
= x
.get("vdu-id-ref")
2884 vdu_index_for_prom
= x
.get("count-index")
2885 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
2887 artifact_path
=artifact_path
,
2888 ee_config_descriptor
=ee_config_descriptor
,
2891 target_ip
=rw_mgmt_ip
,
2893 vdu_id
=vdu_id_for_prom
,
2894 vdu_index
=vdu_index_for_prom
,
2897 self
.logger
.debug("Prometheus job:{}".format(prometheus_jobs
))
2899 db_nsr_update
["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2906 for job
in prometheus_jobs
:
2909 {"job_name": job
["job_name"]},
2912 fail_on_empty
=False,
2915 # Check if this NS has a charm configuration
2916 descriptor_config
= nsd
.get("ns-configuration")
2917 if descriptor_config
and descriptor_config
.get("juju"):
2920 member_vnf_index
= None
2927 # Get additional parameters
2928 deploy_params
= {"OSM": {"vim_account_id": ns_params
["vimAccountId"]}}
2929 if db_nsr
.get("additionalParamsForNs"):
2930 deploy_params
.update(
2931 parse_yaml_strings(db_nsr
["additionalParamsForNs"].copy())
2933 base_folder
= nsd
["_admin"]["storage"]
2935 logging_text
=logging_text
,
2938 nslcmop_id
=nslcmop_id
,
2944 member_vnf_index
=member_vnf_index
,
2945 vdu_index
=vdu_index
,
2946 kdu_index
=kdu_index
,
2948 deploy_params
=deploy_params
,
2949 descriptor_config
=descriptor_config
,
2950 base_folder
=base_folder
,
2951 task_instantiation_info
=tasks_dict_info
,
2955 # rest of staff will be done at finally
2958 ROclient
.ROClientException
,
2964 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
)
2967 except asyncio
.CancelledError
:
2969 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
2971 exc
= "Operation was cancelled"
2972 except Exception as e
:
2973 exc
= traceback
.format_exc()
2974 self
.logger
.critical(
2975 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
2980 error_list
.append(str(exc
))
2982 # wait for pending tasks
2984 stage
[1] = "Waiting for instantiate pending tasks."
2985 self
.logger
.debug(logging_text
+ stage
[1])
2986 error_list
+= await self
._wait
_for
_tasks
(
2994 stage
[1] = stage
[2] = ""
2995 except asyncio
.CancelledError
:
2996 error_list
.append("Cancelled")
2997 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
2998 await self
._wait
_for
_tasks
(
3006 except Exception as exc
:
3007 error_list
.append(str(exc
))
3009 # update operation-status
3010 db_nsr_update
["operational-status"] = "running"
3011 # let's begin with VCA 'configured' status (later we can change it)
3012 db_nsr_update
["config-status"] = "configured"
3013 for task
, task_name
in tasks_dict_info
.items():
3014 if not task
.done() or task
.cancelled() or task
.exception():
3015 if task_name
.startswith(self
.task_name_deploy_vca
):
3016 # A N2VC task is pending
3017 db_nsr_update
["config-status"] = "failed"
3019 # RO or KDU task is pending
3020 db_nsr_update
["operational-status"] = "failed"
3022 # update status at database
3024 error_detail
= ". ".join(error_list
)
3025 self
.logger
.error(logging_text
+ error_detail
)
3026 error_description_nslcmop
= "{} Detail: {}".format(
3027 stage
[0], error_detail
3029 error_description_nsr
= "Operation: INSTANTIATING.{}, {}".format(
3030 nslcmop_id
, stage
[0]
3033 db_nsr_update
["detailed-status"] = (
3034 error_description_nsr
+ " Detail: " + error_detail
3036 db_nslcmop_update
["detailed-status"] = error_detail
3037 nslcmop_operation_state
= "FAILED"
3041 error_description_nsr
= error_description_nslcmop
= None
3043 db_nsr_update
["detailed-status"] = "Done"
3044 db_nslcmop_update
["detailed-status"] = "Done"
3045 nslcmop_operation_state
= "COMPLETED"
3046 # Gather auto-healing and auto-scaling alerts for each vnfr
3049 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
3051 (sub
for sub
in db_vnfds
if sub
["_id"] == vnfr
["vnfd-id"]), None
3053 healing_alerts
= self
._gather
_vnfr
_healing
_alerts
(vnfr
, vnfd
)
3054 for alert
in healing_alerts
:
3055 self
.logger
.info(f
"Storing healing alert in MongoDB: {alert}")
3056 self
.db
.create("alerts", alert
)
3058 scaling_alerts
= self
._gather
_vnfr
_scaling
_alerts
(vnfr
, vnfd
)
3059 for alert
in scaling_alerts
:
3060 self
.logger
.info(f
"Storing scaling alert in MongoDB: {alert}")
3061 self
.db
.create("alerts", alert
)
3063 alarm_alerts
= self
._gather
_vnfr
_alarm
_alerts
(vnfr
, vnfd
)
3064 for alert
in alarm_alerts
:
3065 self
.logger
.info(f
"Storing VNF alarm alert in MongoDB: {alert}")
3066 self
.db
.create("alerts", alert
)
3068 self
._write
_ns
_status
(
3071 current_operation
="IDLE",
3072 current_operation_id
=None,
3073 error_description
=error_description_nsr
,
3074 error_detail
=error_detail
,
3075 other_update
=db_nsr_update
,
3077 self
._write
_op
_status
(
3080 error_message
=error_description_nslcmop
,
3081 operation_state
=nslcmop_operation_state
,
3082 other_update
=db_nslcmop_update
,
3085 if nslcmop_operation_state
:
3087 await self
.msg
.aiowrite(
3092 "nslcmop_id": nslcmop_id
,
3093 "operationState": nslcmop_operation_state
,
3094 "startTime": db_nslcmop
["startTime"],
3095 "links": db_nslcmop
["links"],
3096 "operationParams": {
3097 "nsInstanceId": nsr_id
,
3098 "nsdId": db_nsr
["nsd-id"],
3102 except Exception as e
:
3104 logging_text
+ "kafka_write notification Exception {}".format(e
)
3107 self
.logger
.debug(logging_text
+ "Exit")
3108 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_instantiate")
3110 def _get_vnfd(self
, vnfd_id
: str, projects_read
: str, cached_vnfds
: Dict
[str, Any
]):
3111 if vnfd_id
not in cached_vnfds
:
3112 cached_vnfds
[vnfd_id
] = self
.db
.get_one(
3113 "vnfds", {"id": vnfd_id
, "_admin.projects_read": projects_read
}
3115 return cached_vnfds
[vnfd_id
]
3117 def _get_vnfr(self
, nsr_id
: str, vnf_profile_id
: str, cached_vnfrs
: Dict
[str, Any
]):
3118 if vnf_profile_id
not in cached_vnfrs
:
3119 cached_vnfrs
[vnf_profile_id
] = self
.db
.get_one(
3122 "member-vnf-index-ref": vnf_profile_id
,
3123 "nsr-id-ref": nsr_id
,
3126 return cached_vnfrs
[vnf_profile_id
]
3128 def _is_deployed_vca_in_relation(
3129 self
, vca
: DeployedVCA
, relation
: Relation
3132 for endpoint
in (relation
.provider
, relation
.requirer
):
3133 if endpoint
["kdu-resource-profile-id"]:
3136 vca
.vnf_profile_id
== endpoint
.vnf_profile_id
3137 and vca
.vdu_profile_id
== endpoint
.vdu_profile_id
3138 and vca
.execution_environment_ref
== endpoint
.execution_environment_ref
3144 def _update_ee_relation_data_with_implicit_data(
3145 self
, nsr_id
, nsd
, ee_relation_data
, cached_vnfds
, vnf_profile_id
: str = None
3147 ee_relation_data
= safe_get_ee_relation(
3148 nsr_id
, ee_relation_data
, vnf_profile_id
=vnf_profile_id
3150 ee_relation_level
= EELevel
.get_level(ee_relation_data
)
3151 if (ee_relation_level
in (EELevel
.VNF
, EELevel
.VDU
)) and not ee_relation_data
[
3152 "execution-environment-ref"
3154 vnf_profile
= get_vnf_profile(nsd
, ee_relation_data
["vnf-profile-id"])
3155 vnfd_id
= vnf_profile
["vnfd-id"]
3156 project
= nsd
["_admin"]["projects_read"][0]
3157 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3160 if ee_relation_level
== EELevel
.VNF
3161 else ee_relation_data
["vdu-profile-id"]
3163 ee
= get_juju_ee_ref(db_vnfd
, entity_id
)
3166 f
"not execution environments found for ee_relation {ee_relation_data}"
3168 ee_relation_data
["execution-environment-ref"] = ee
["id"]
3169 return ee_relation_data
3171 def _get_ns_relations(
3174 nsd
: Dict
[str, Any
],
3176 cached_vnfds
: Dict
[str, Any
],
3177 ) -> List
[Relation
]:
3179 db_ns_relations
= get_ns_configuration_relation_list(nsd
)
3180 for r
in db_ns_relations
:
3181 provider_dict
= None
3182 requirer_dict
= None
3183 if all(key
in r
for key
in ("provider", "requirer")):
3184 provider_dict
= r
["provider"]
3185 requirer_dict
= r
["requirer"]
3186 elif "entities" in r
:
3187 provider_id
= r
["entities"][0]["id"]
3190 "endpoint": r
["entities"][0]["endpoint"],
3192 if provider_id
!= nsd
["id"]:
3193 provider_dict
["vnf-profile-id"] = provider_id
3194 requirer_id
= r
["entities"][1]["id"]
3197 "endpoint": r
["entities"][1]["endpoint"],
3199 if requirer_id
!= nsd
["id"]:
3200 requirer_dict
["vnf-profile-id"] = requirer_id
3203 "provider/requirer or entities must be included in the relation."
3205 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3206 nsr_id
, nsd
, provider_dict
, cached_vnfds
3208 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3209 nsr_id
, nsd
, requirer_dict
, cached_vnfds
3211 provider
= EERelation(relation_provider
)
3212 requirer
= EERelation(relation_requirer
)
3213 relation
= Relation(r
["name"], provider
, requirer
)
3214 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3216 relations
.append(relation
)
3219 def _get_vnf_relations(
3222 nsd
: Dict
[str, Any
],
3224 cached_vnfds
: Dict
[str, Any
],
3225 ) -> List
[Relation
]:
3227 if vca
.target_element
== "ns":
3228 self
.logger
.debug("VCA is a NS charm, not a VNF.")
3230 vnf_profile
= get_vnf_profile(nsd
, vca
.vnf_profile_id
)
3231 vnf_profile_id
= vnf_profile
["id"]
3232 vnfd_id
= vnf_profile
["vnfd-id"]
3233 project
= nsd
["_admin"]["projects_read"][0]
3234 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3235 db_vnf_relations
= get_relation_list(db_vnfd
, vnfd_id
)
3236 for r
in db_vnf_relations
:
3237 provider_dict
= None
3238 requirer_dict
= None
3239 if all(key
in r
for key
in ("provider", "requirer")):
3240 provider_dict
= r
["provider"]
3241 requirer_dict
= r
["requirer"]
3242 elif "entities" in r
:
3243 provider_id
= r
["entities"][0]["id"]
3246 "vnf-profile-id": vnf_profile_id
,
3247 "endpoint": r
["entities"][0]["endpoint"],
3249 if provider_id
!= vnfd_id
:
3250 provider_dict
["vdu-profile-id"] = provider_id
3251 requirer_id
= r
["entities"][1]["id"]
3254 "vnf-profile-id": vnf_profile_id
,
3255 "endpoint": r
["entities"][1]["endpoint"],
3257 if requirer_id
!= vnfd_id
:
3258 requirer_dict
["vdu-profile-id"] = requirer_id
3261 "provider/requirer or entities must be included in the relation."
3263 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3264 nsr_id
, nsd
, provider_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3266 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3267 nsr_id
, nsd
, requirer_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3269 provider
= EERelation(relation_provider
)
3270 requirer
= EERelation(relation_requirer
)
3271 relation
= Relation(r
["name"], provider
, requirer
)
3272 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3274 relations
.append(relation
)
3277 def _get_kdu_resource_data(
3279 ee_relation
: EERelation
,
3280 db_nsr
: Dict
[str, Any
],
3281 cached_vnfds
: Dict
[str, Any
],
3282 ) -> DeployedK8sResource
:
3283 nsd
= get_nsd(db_nsr
)
3284 vnf_profiles
= get_vnf_profiles(nsd
)
3285 vnfd_id
= find_in_list(
3287 lambda vnf_profile
: vnf_profile
["id"] == ee_relation
.vnf_profile_id
,
3289 project
= nsd
["_admin"]["projects_read"][0]
3290 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3291 kdu_resource_profile
= get_kdu_resource_profile(
3292 db_vnfd
, ee_relation
.kdu_resource_profile_id
3294 kdu_name
= kdu_resource_profile
["kdu-name"]
3295 deployed_kdu
, _
= get_deployed_kdu(
3296 db_nsr
.get("_admin", ()).get("deployed", ()),
3298 ee_relation
.vnf_profile_id
,
3300 deployed_kdu
.update({"resource-name": kdu_resource_profile
["resource-name"]})
3303 def _get_deployed_component(
3305 ee_relation
: EERelation
,
3306 db_nsr
: Dict
[str, Any
],
3307 cached_vnfds
: Dict
[str, Any
],
3308 ) -> DeployedComponent
:
3309 nsr_id
= db_nsr
["_id"]
3310 deployed_component
= None
3311 ee_level
= EELevel
.get_level(ee_relation
)
3312 if ee_level
== EELevel
.NS
:
3313 vca
= get_deployed_vca(db_nsr
, {"vdu_id": None, "member-vnf-index": None})
3315 deployed_component
= DeployedVCA(nsr_id
, vca
)
3316 elif ee_level
== EELevel
.VNF
:
3317 vca
= get_deployed_vca(
3321 "member-vnf-index": ee_relation
.vnf_profile_id
,
3322 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3326 deployed_component
= DeployedVCA(nsr_id
, vca
)
3327 elif ee_level
== EELevel
.VDU
:
3328 vca
= get_deployed_vca(
3331 "vdu_id": ee_relation
.vdu_profile_id
,
3332 "member-vnf-index": ee_relation
.vnf_profile_id
,
3333 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3337 deployed_component
= DeployedVCA(nsr_id
, vca
)
3338 elif ee_level
== EELevel
.KDU
:
3339 kdu_resource_data
= self
._get
_kdu
_resource
_data
(
3340 ee_relation
, db_nsr
, cached_vnfds
3342 if kdu_resource_data
:
3343 deployed_component
= DeployedK8sResource(kdu_resource_data
)
3344 return deployed_component
3346 async def _add_relation(
3350 db_nsr
: Dict
[str, Any
],
3351 cached_vnfds
: Dict
[str, Any
],
3352 cached_vnfrs
: Dict
[str, Any
],
3354 deployed_provider
= self
._get
_deployed
_component
(
3355 relation
.provider
, db_nsr
, cached_vnfds
3357 deployed_requirer
= self
._get
_deployed
_component
(
3358 relation
.requirer
, db_nsr
, cached_vnfds
3362 and deployed_requirer
3363 and deployed_provider
.config_sw_installed
3364 and deployed_requirer
.config_sw_installed
3366 provider_db_vnfr
= (
3368 relation
.provider
.nsr_id
,
3369 relation
.provider
.vnf_profile_id
,
3372 if relation
.provider
.vnf_profile_id
3375 requirer_db_vnfr
= (
3377 relation
.requirer
.nsr_id
,
3378 relation
.requirer
.vnf_profile_id
,
3381 if relation
.requirer
.vnf_profile_id
3384 provider_vca_id
= self
.get_vca_id(provider_db_vnfr
, db_nsr
)
3385 requirer_vca_id
= self
.get_vca_id(requirer_db_vnfr
, db_nsr
)
3386 provider_relation_endpoint
= RelationEndpoint(
3387 deployed_provider
.ee_id
,
3389 relation
.provider
.endpoint
,
3391 requirer_relation_endpoint
= RelationEndpoint(
3392 deployed_requirer
.ee_id
,
3394 relation
.requirer
.endpoint
,
3397 await self
.vca_map
[vca_type
].add_relation(
3398 provider
=provider_relation_endpoint
,
3399 requirer
=requirer_relation_endpoint
,
3401 except N2VCException
as exception
:
3402 self
.logger
.error(exception
)
3403 raise LcmException(exception
)
3407 async def _add_vca_relations(
3413 timeout
: int = 3600,
3416 # 1. find all relations for this VCA
3417 # 2. wait for other peers related
3421 # STEP 1: find all relations for this VCA
3424 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3425 nsd
= get_nsd(db_nsr
)
3428 deployed_vca_dict
= get_deployed_vca_list(db_nsr
)[vca_index
]
3429 my_vca
= DeployedVCA(nsr_id
, deployed_vca_dict
)
3434 relations
.extend(self
._get
_ns
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3435 relations
.extend(self
._get
_vnf
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3437 # if no relations, terminate
3439 self
.logger
.debug(logging_text
+ " No relations")
3442 self
.logger
.debug(logging_text
+ " adding relations {}".format(relations
))
3449 if now
- start
>= timeout
:
3450 self
.logger
.error(logging_text
+ " : timeout adding relations")
3453 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3454 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3456 # for each relation, find the VCA's related
3457 for relation
in relations
.copy():
3458 added
= await self
._add
_relation
(
3466 relations
.remove(relation
)
3469 self
.logger
.debug("Relations added")
3471 await asyncio
.sleep(5.0)
3475 except Exception as e
:
3476 self
.logger
.warn(logging_text
+ " ERROR adding relations: {}".format(e
))
3479 async def _install_kdu(
3487 k8s_instance_info
: dict,
3488 k8params
: dict = None,
3493 k8sclustertype
= k8s_instance_info
["k8scluster-type"]
3496 "collection": "nsrs",
3497 "filter": {"_id": nsr_id
},
3498 "path": nsr_db_path
,
3501 if k8s_instance_info
.get("kdu-deployment-name"):
3502 kdu_instance
= k8s_instance_info
.get("kdu-deployment-name")
3504 kdu_instance
= self
.k8scluster_map
[
3506 ].generate_kdu_instance_name(
3507 db_dict
=db_dict_install
,
3508 kdu_model
=k8s_instance_info
["kdu-model"],
3509 kdu_name
=k8s_instance_info
["kdu-name"],
3512 # Update the nsrs table with the kdu-instance value
3516 _desc
={nsr_db_path
+ ".kdu-instance": kdu_instance
},
3519 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3520 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3521 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3522 # namespace, this first verification could be removed, and the next step would be done for any kind
3524 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3525 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3526 if k8sclustertype
in ("juju", "juju-bundle"):
3527 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3528 # that the user passed a namespace which he wants its KDU to be deployed in)
3534 "_admin.projects_write": k8s_instance_info
["namespace"],
3535 "_admin.projects_read": k8s_instance_info
["namespace"],
3541 f
"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3546 _desc
={f
"{nsr_db_path}.namespace": kdu_instance
},
3548 k8s_instance_info
["namespace"] = kdu_instance
3550 await self
.k8scluster_map
[k8sclustertype
].install(
3551 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3552 kdu_model
=k8s_instance_info
["kdu-model"],
3555 db_dict
=db_dict_install
,
3557 kdu_name
=k8s_instance_info
["kdu-name"],
3558 namespace
=k8s_instance_info
["namespace"],
3559 kdu_instance
=kdu_instance
,
3563 # Obtain services to obtain management service ip
3564 services
= await self
.k8scluster_map
[k8sclustertype
].get_services(
3565 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3566 kdu_instance
=kdu_instance
,
3567 namespace
=k8s_instance_info
["namespace"],
3570 # Obtain management service info (if exists)
3571 vnfr_update_dict
= {}
3572 kdu_config
= get_configuration(vnfd
, kdud
["name"])
3574 target_ee_list
= kdu_config
.get("execution-environment-list", [])
3579 vnfr_update_dict
["kdur.{}.services".format(kdu_index
)] = services
3582 for service
in kdud
.get("service", [])
3583 if service
.get("mgmt-service")
3585 for mgmt_service
in mgmt_services
:
3586 for service
in services
:
3587 if service
["name"].startswith(mgmt_service
["name"]):
3588 # Mgmt service found, Obtain service ip
3589 ip
= service
.get("external_ip", service
.get("cluster_ip"))
3590 if isinstance(ip
, list) and len(ip
) == 1:
3594 "kdur.{}.ip-address".format(kdu_index
)
3597 # Check if must update also mgmt ip at the vnf
3598 service_external_cp
= mgmt_service
.get(
3599 "external-connection-point-ref"
3601 if service_external_cp
:
3603 deep_get(vnfd
, ("mgmt-interface", "cp"))
3604 == service_external_cp
3606 vnfr_update_dict
["ip-address"] = ip
3611 "external-connection-point-ref", ""
3613 == service_external_cp
,
3616 "kdur.{}.ip-address".format(kdu_index
)
3621 "Mgmt service name: {} not found".format(
3622 mgmt_service
["name"]
3626 vnfr_update_dict
["kdur.{}.status".format(kdu_index
)] = "READY"
3627 self
.update_db_2("vnfrs", vnfr_data
.get("_id"), vnfr_update_dict
)
3629 kdu_config
= get_configuration(vnfd
, k8s_instance_info
["kdu-name"])
3632 and kdu_config
.get("initial-config-primitive")
3633 and get_juju_ee_ref(vnfd
, k8s_instance_info
["kdu-name"]) is None
3635 initial_config_primitive_list
= kdu_config
.get(
3636 "initial-config-primitive"
3638 initial_config_primitive_list
.sort(key
=lambda val
: int(val
["seq"]))
3640 for initial_config_primitive
in initial_config_primitive_list
:
3641 primitive_params_
= self
._map
_primitive
_params
(
3642 initial_config_primitive
, {}, {}
3645 await asyncio
.wait_for(
3646 self
.k8scluster_map
[k8sclustertype
].exec_primitive(
3647 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3648 kdu_instance
=kdu_instance
,
3649 primitive_name
=initial_config_primitive
["name"],
3650 params
=primitive_params_
,
3651 db_dict
=db_dict_install
,
3657 except Exception as e
:
3658 # Prepare update db with error and raise exception
3661 "nsrs", nsr_id
, {nsr_db_path
+ ".detailed-status": str(e
)}
3665 vnfr_data
.get("_id"),
3666 {"kdur.{}.status".format(kdu_index
): "ERROR"},
3668 except Exception as error
:
3669 # ignore to keep original exception
3670 self
.logger
.warning(
3671 f
"An exception occurred while updating DB: {str(error)}"
3673 # reraise original error
3678 async def deploy_kdus(
3685 task_instantiation_info
,
3687 # Launch kdus if present in the descriptor
3689 k8scluster_id_2_uuic
= {
3690 "helm-chart-v3": {},
3694 async def _get_cluster_id(cluster_id
, cluster_type
):
3695 nonlocal k8scluster_id_2_uuic
3696 if cluster_id
in k8scluster_id_2_uuic
[cluster_type
]:
3697 return k8scluster_id_2_uuic
[cluster_type
][cluster_id
]
3699 # check if K8scluster is creating and wait look if previous tasks in process
3700 task_name
, task_dependency
= self
.lcm_tasks
.lookfor_related(
3701 "k8scluster", cluster_id
3704 text
= "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3705 task_name
, cluster_id
3707 self
.logger
.debug(logging_text
+ text
)
3708 await asyncio
.wait(task_dependency
, timeout
=3600)
3710 db_k8scluster
= self
.db
.get_one(
3711 "k8sclusters", {"_id": cluster_id
}, fail_on_empty
=False
3713 if not db_k8scluster
:
3714 raise LcmException("K8s cluster {} cannot be found".format(cluster_id
))
3716 k8s_id
= deep_get(db_k8scluster
, ("_admin", cluster_type
, "id"))
3718 if cluster_type
== "helm-chart-v3":
3720 # backward compatibility for existing clusters that have not been initialized for helm v3
3721 k8s_credentials
= yaml
.safe_dump(
3722 db_k8scluster
.get("credentials")
3724 k8s_id
, uninstall_sw
= await self
.k8sclusterhelm3
.init_env(
3725 k8s_credentials
, reuse_cluster_uuid
=cluster_id
3727 db_k8scluster_update
= {}
3728 db_k8scluster_update
["_admin.helm-chart-v3.error_msg"] = None
3729 db_k8scluster_update
["_admin.helm-chart-v3.id"] = k8s_id
3730 db_k8scluster_update
[
3731 "_admin.helm-chart-v3.created"
3733 db_k8scluster_update
[
3734 "_admin.helm-chart-v3.operationalState"
3737 "k8sclusters", cluster_id
, db_k8scluster_update
3739 except Exception as e
:
3742 + "error initializing helm-v3 cluster: {}".format(str(e
))
3745 "K8s cluster '{}' has not been initialized for '{}'".format(
3746 cluster_id
, cluster_type
3751 "K8s cluster '{}' has not been initialized for '{}'".format(
3752 cluster_id
, cluster_type
3755 k8scluster_id_2_uuic
[cluster_type
][cluster_id
] = k8s_id
3758 logging_text
+= "Deploy kdus: "
3761 db_nsr_update
= {"_admin.deployed.K8s": []}
3762 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3765 updated_cluster_list
= []
3766 updated_v3_cluster_list
= []
3768 for vnfr_data
in db_vnfrs
.values():
3769 vca_id
= self
.get_vca_id(vnfr_data
, {})
3770 for kdu_index
, kdur
in enumerate(get_iterable(vnfr_data
, "kdur")):
3771 # Step 0: Prepare and set parameters
3772 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
3773 vnfd_id
= vnfr_data
.get("vnfd-id")
3774 vnfd_with_id
= find_in_list(
3775 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3779 for kdud
in vnfd_with_id
["kdu"]
3780 if kdud
["name"] == kdur
["kdu-name"]
3782 namespace
= kdur
.get("k8s-namespace")
3783 kdu_deployment_name
= kdur
.get("kdu-deployment-name")
3784 if kdur
.get("helm-chart"):
3785 kdumodel
= kdur
["helm-chart"]
3786 # Default version: helm3, if helm-version is v2 assign v2
3787 k8sclustertype
= "helm-chart-v3"
3788 self
.logger
.debug("kdur: {}".format(kdur
))
3789 elif kdur
.get("juju-bundle"):
3790 kdumodel
= kdur
["juju-bundle"]
3791 k8sclustertype
= "juju-bundle"
3794 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3795 "juju-bundle. Maybe an old NBI version is running".format(
3796 vnfr_data
["member-vnf-index-ref"], kdur
["kdu-name"]
3799 # check if kdumodel is a file and exists
3801 vnfd_with_id
= find_in_list(
3802 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3804 storage
= deep_get(vnfd_with_id
, ("_admin", "storage"))
3805 if storage
: # may be not present if vnfd has not artifacts
3806 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3807 if storage
["pkg-dir"]:
3808 filename
= "{}/{}/{}s/{}".format(
3815 filename
= "{}/Scripts/{}s/{}".format(
3820 if self
.fs
.file_exists(
3821 filename
, mode
="file"
3822 ) or self
.fs
.file_exists(filename
, mode
="dir"):
3823 kdumodel
= self
.fs
.path
+ filename
3824 except (asyncio
.TimeoutError
, asyncio
.CancelledError
):
3826 except Exception as e
: # it is not a file
3827 self
.logger
.warning(f
"An exception occurred: {str(e)}")
3829 k8s_cluster_id
= kdur
["k8s-cluster"]["id"]
3830 step
= "Synchronize repos for k8s cluster '{}'".format(
3833 cluster_uuid
= await _get_cluster_id(k8s_cluster_id
, k8sclustertype
)
3837 k8sclustertype
== "helm-chart"
3838 and cluster_uuid
not in updated_cluster_list
3840 k8sclustertype
== "helm-chart-v3"
3841 and cluster_uuid
not in updated_v3_cluster_list
3843 del_repo_list
, added_repo_dict
= await asyncio
.ensure_future(
3844 self
.k8scluster_map
[k8sclustertype
].synchronize_repos(
3845 cluster_uuid
=cluster_uuid
3848 if del_repo_list
or added_repo_dict
:
3849 if k8sclustertype
== "helm-chart":
3851 "_admin.helm_charts_added." + item
: None
3852 for item
in del_repo_list
3855 "_admin.helm_charts_added." + item
: name
3856 for item
, name
in added_repo_dict
.items()
3858 updated_cluster_list
.append(cluster_uuid
)
3859 elif k8sclustertype
== "helm-chart-v3":
3861 "_admin.helm_charts_v3_added." + item
: None
3862 for item
in del_repo_list
3865 "_admin.helm_charts_v3_added." + item
: name
3866 for item
, name
in added_repo_dict
.items()
3868 updated_v3_cluster_list
.append(cluster_uuid
)
3870 logging_text
+ "repos synchronized on k8s cluster "
3871 "'{}' to_delete: {}, to_add: {}".format(
3872 k8s_cluster_id
, del_repo_list
, added_repo_dict
3877 {"_id": k8s_cluster_id
},
3883 step
= "Instantiating KDU {}.{} in k8s cluster {}".format(
3884 vnfr_data
["member-vnf-index-ref"],
3888 k8s_instance_info
= {
3889 "kdu-instance": None,
3890 "k8scluster-uuid": cluster_uuid
,
3891 "k8scluster-type": k8sclustertype
,
3892 "member-vnf-index": vnfr_data
["member-vnf-index-ref"],
3893 "kdu-name": kdur
["kdu-name"],
3894 "kdu-model": kdumodel
,
3895 "namespace": namespace
,
3896 "kdu-deployment-name": kdu_deployment_name
,
3898 db_path
= "_admin.deployed.K8s.{}".format(index
)
3899 db_nsr_update
[db_path
] = k8s_instance_info
3900 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3901 vnfd_with_id
= find_in_list(
3902 db_vnfds
, lambda vnf
: vnf
["_id"] == vnfd_id
3904 task
= asyncio
.ensure_future(
3913 k8params
=desc_params
,
3918 self
.lcm_tasks
.register(
3922 "instantiate_KDU-{}".format(index
),
3925 task_instantiation_info
[task
] = "Deploying KDU {}".format(
3931 except (LcmException
, asyncio
.CancelledError
):
3933 except Exception as e
:
3934 msg
= "Exception {} while {}: {}".format(type(e
).__name
__, step
, e
)
3935 if isinstance(e
, (N2VCException
, DbException
)):
3936 self
.logger
.error(logging_text
+ msg
)
3938 self
.logger
.critical(logging_text
+ msg
, exc_info
=True)
3939 raise LcmException(msg
)
3942 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3962 task_instantiation_info
,
3965 # launch instantiate_N2VC in a asyncio task and register task object
3966 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3967 # if not found, create one entry and update database
3968 # fill db_nsr._admin.deployed.VCA.<index>
3971 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
3975 get_charm_name
= False
3976 if "execution-environment-list" in descriptor_config
:
3977 ee_list
= descriptor_config
.get("execution-environment-list", [])
3978 elif "juju" in descriptor_config
:
3979 ee_list
= [descriptor_config
] # ns charms
3980 if "execution-environment-list" not in descriptor_config
:
3981 # charm name is only required for ns charms
3982 get_charm_name
= True
3983 else: # other types as script are not supported
3986 for ee_item
in ee_list
:
3989 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3990 ee_item
.get("juju"), ee_item
.get("helm-chart")
3993 ee_descriptor_id
= ee_item
.get("id")
3994 vca_name
, charm_name
, vca_type
= self
.get_vca_info(
3995 ee_item
, db_nsr
, get_charm_name
3999 logging_text
+ "skipping, non juju/charm/helm configuration"
4004 for vca_index
, vca_deployed
in enumerate(
4005 db_nsr
["_admin"]["deployed"]["VCA"]
4007 if not vca_deployed
:
4010 vca_deployed
.get("member-vnf-index") == member_vnf_index
4011 and vca_deployed
.get("vdu_id") == vdu_id
4012 and vca_deployed
.get("kdu_name") == kdu_name
4013 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
4014 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
4018 # not found, create one.
4020 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
4023 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
4025 target
+= "/kdu/{}".format(kdu_name
)
4027 "target_element": target
,
4028 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4029 "member-vnf-index": member_vnf_index
,
4031 "kdu_name": kdu_name
,
4032 "vdu_count_index": vdu_index
,
4033 "operational-status": "init", # TODO revise
4034 "detailed-status": "", # TODO revise
4035 "step": "initial-deploy", # TODO revise
4037 "vdu_name": vdu_name
,
4039 "ee_descriptor_id": ee_descriptor_id
,
4040 "charm_name": charm_name
,
4044 # create VCA and configurationStatus in db
4046 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
4047 "configurationStatus.{}".format(vca_index
): dict(),
4049 self
.update_db_2("nsrs", nsr_id
, db_dict
)
4051 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
4053 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
4054 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
4055 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
4058 task_n2vc
= asyncio
.ensure_future(
4059 self
.instantiate_N2VC(
4060 logging_text
=logging_text
,
4061 vca_index
=vca_index
,
4067 vdu_index
=vdu_index
,
4068 kdu_index
=kdu_index
,
4069 deploy_params
=deploy_params
,
4070 config_descriptor
=descriptor_config
,
4071 base_folder
=base_folder
,
4072 nslcmop_id
=nslcmop_id
,
4076 ee_config_descriptor
=ee_item
,
4079 self
.lcm_tasks
.register(
4083 "instantiate_N2VC-{}".format(vca_index
),
4086 task_instantiation_info
[
4088 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
4089 member_vnf_index
or "", vdu_id
or ""
4092 def _format_additional_params(self
, params
):
4093 params
= params
or {}
4094 for key
, value
in params
.items():
4095 if str(value
).startswith("!!yaml "):
4096 params
[key
] = yaml
.safe_load(value
[7:])
4099 def _get_terminate_primitive_params(self
, seq
, vnf_index
):
4100 primitive
= seq
.get("name")
4101 primitive_params
= {}
4103 "member_vnf_index": vnf_index
,
4104 "primitive": primitive
,
4105 "primitive_params": primitive_params
,
4108 return self
._map
_primitive
_params
(seq
, params
, desc_params
)
4112 def _retry_or_skip_suboperation(self
, db_nslcmop
, op_index
):
4113 op
= deep_get(db_nslcmop
, ("_admin", "operations"), [])[op_index
]
4114 if op
.get("operationState") == "COMPLETED":
4115 # b. Skip sub-operation
4116 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4117 return self
.SUBOPERATION_STATUS_SKIP
4119 # c. retry executing sub-operation
4120 # The sub-operation exists, and operationState != 'COMPLETED'
4121 # Update operationState = 'PROCESSING' to indicate a retry.
4122 operationState
= "PROCESSING"
4123 detailed_status
= "In progress"
4124 self
._update
_suboperation
_status
(
4125 db_nslcmop
, op_index
, operationState
, detailed_status
4127 # Return the sub-operation index
4128 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4129 # with arguments extracted from the sub-operation
4132 # Find a sub-operation where all keys in a matching dictionary must match
4133 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4134 def _find_suboperation(self
, db_nslcmop
, match
):
4135 if db_nslcmop
and match
:
4136 op_list
= db_nslcmop
.get("_admin", {}).get("operations", [])
4137 for i
, op
in enumerate(op_list
):
4138 if all(op
.get(k
) == match
[k
] for k
in match
):
4140 return self
.SUBOPERATION_STATUS_NOT_FOUND
4142 # Update status for a sub-operation given its index
4143 def _update_suboperation_status(
4144 self
, db_nslcmop
, op_index
, operationState
, detailed_status
4146 # Update DB for HA tasks
4147 q_filter
= {"_id": db_nslcmop
["_id"]}
4149 "_admin.operations.{}.operationState".format(op_index
): operationState
,
4150 "_admin.operations.{}.detailed-status".format(op_index
): detailed_status
,
4153 "nslcmops", q_filter
=q_filter
, update_dict
=update_dict
, fail_on_empty
=False
4156 # Add sub-operation, return the index of the added sub-operation
4157 # Optionally, set operationState, detailed-status, and operationType
4158 # Status and type are currently set for 'scale' sub-operations:
4159 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4160 # 'detailed-status' : status message
4161 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4162 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4163 def _add_suboperation(
4171 mapped_primitive_params
,
4172 operationState
=None,
4173 detailed_status
=None,
4176 RO_scaling_info
=None,
4179 return self
.SUBOPERATION_STATUS_NOT_FOUND
4180 # Get the "_admin.operations" list, if it exists
4181 db_nslcmop_admin
= db_nslcmop
.get("_admin", {})
4182 op_list
= db_nslcmop_admin
.get("operations")
4183 # Create or append to the "_admin.operations" list
4185 "member_vnf_index": vnf_index
,
4187 "vdu_count_index": vdu_count_index
,
4188 "primitive": primitive
,
4189 "primitive_params": mapped_primitive_params
,
4192 new_op
["operationState"] = operationState
4194 new_op
["detailed-status"] = detailed_status
4196 new_op
["lcmOperationType"] = operationType
4198 new_op
["RO_nsr_id"] = RO_nsr_id
4200 new_op
["RO_scaling_info"] = RO_scaling_info
4202 # No existing operations, create key 'operations' with current operation as first list element
4203 db_nslcmop_admin
.update({"operations": [new_op
]})
4204 op_list
= db_nslcmop_admin
.get("operations")
4206 # Existing operations, append operation to list
4207 op_list
.append(new_op
)
4209 db_nslcmop_update
= {"_admin.operations": op_list
}
4210 self
.update_db_2("nslcmops", db_nslcmop
["_id"], db_nslcmop_update
)
4211 op_index
= len(op_list
) - 1
4214 # Helper methods for scale() sub-operations
4216 # pre-scale/post-scale:
4217 # Check for 3 different cases:
4218 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4219 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4220 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4221 def _check_or_add_scale_suboperation(
4225 vnf_config_primitive
,
4229 RO_scaling_info
=None,
4231 # Find this sub-operation
4232 if RO_nsr_id
and RO_scaling_info
:
4233 operationType
= "SCALE-RO"
4235 "member_vnf_index": vnf_index
,
4236 "RO_nsr_id": RO_nsr_id
,
4237 "RO_scaling_info": RO_scaling_info
,
4241 "member_vnf_index": vnf_index
,
4242 "primitive": vnf_config_primitive
,
4243 "primitive_params": primitive_params
,
4244 "lcmOperationType": operationType
,
4246 op_index
= self
._find
_suboperation
(db_nslcmop
, match
)
4247 if op_index
== self
.SUBOPERATION_STATUS_NOT_FOUND
:
4248 # a. New sub-operation
4249 # The sub-operation does not exist, add it.
4250 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4251 # The following parameters are set to None for all kind of scaling:
4253 vdu_count_index
= None
4255 if RO_nsr_id
and RO_scaling_info
:
4256 vnf_config_primitive
= None
4257 primitive_params
= None
4260 RO_scaling_info
= None
4261 # Initial status for sub-operation
4262 operationState
= "PROCESSING"
4263 detailed_status
= "In progress"
4264 # Add sub-operation for pre/post-scaling (zero or more operations)
4265 self
._add
_suboperation
(
4271 vnf_config_primitive
,
4279 return self
.SUBOPERATION_STATUS_NEW
4281 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4282 # or op_index (operationState != 'COMPLETED')
4283 return self
._retry
_or
_skip
_suboperation
(db_nslcmop
, op_index
)
4285 # Function to return execution_environment id
4287 async def destroy_N2VC(
4295 exec_primitives
=True,
4300 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4301 :param logging_text:
4303 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4304 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4305 :param vca_index: index in the database _admin.deployed.VCA
4306 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4307 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4308 not executed properly
4309 :param scaling_in: True destroys the application, False destroys the model
4310 :return: None or exception
4315 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4316 vca_index
, vca_deployed
, config_descriptor
, destroy_ee
4320 vca_type
= vca_deployed
.get("type", "lxc_proxy_charm")
4322 # execute terminate_primitives
4324 terminate_primitives
= get_ee_sorted_terminate_config_primitive_list(
4325 config_descriptor
.get("terminate-config-primitive"),
4326 vca_deployed
.get("ee_descriptor_id"),
4328 vdu_id
= vca_deployed
.get("vdu_id")
4329 vdu_count_index
= vca_deployed
.get("vdu_count_index")
4330 vdu_name
= vca_deployed
.get("vdu_name")
4331 vnf_index
= vca_deployed
.get("member-vnf-index")
4332 if terminate_primitives
and vca_deployed
.get("needed_terminate"):
4333 for seq
in terminate_primitives
:
4334 # For each sequence in list, get primitive and call _ns_execute_primitive()
4335 step
= "Calling terminate action for vnf_member_index={} primitive={}".format(
4336 vnf_index
, seq
.get("name")
4338 self
.logger
.debug(logging_text
+ step
)
4339 # Create the primitive for each sequence, i.e. "primitive": "touch"
4340 primitive
= seq
.get("name")
4341 mapped_primitive_params
= self
._get
_terminate
_primitive
_params
(
4346 self
._add
_suboperation
(
4353 mapped_primitive_params
,
4355 # Sub-operations: Call _ns_execute_primitive() instead of action()
4357 result
, result_detail
= await self
._ns
_execute
_primitive
(
4358 vca_deployed
["ee_id"],
4360 mapped_primitive_params
,
4364 except LcmException
:
4365 # this happens when VCA is not deployed. In this case it is not needed to terminate
4367 result_ok
= ["COMPLETED", "PARTIALLY_COMPLETED"]
4368 if result
not in result_ok
:
4370 "terminate_primitive {} for vnf_member_index={} fails with "
4371 "error {}".format(seq
.get("name"), vnf_index
, result_detail
)
4373 # set that this VCA do not need terminated
4374 db_update_entry
= "_admin.deployed.VCA.{}.needed_terminate".format(
4378 "nsrs", db_nslcmop
["nsInstanceId"], {db_update_entry
: False}
4381 # Delete Prometheus Jobs if any
4382 # This uses NSR_ID, so it will destroy any jobs under this index
4383 self
.db
.del_list("prometheus_jobs", {"nsr_id": db_nslcmop
["nsInstanceId"]})
4386 await self
.vca_map
[vca_type
].delete_execution_environment(
4387 vca_deployed
["ee_id"],
4388 scaling_in
=scaling_in
,
4393 async def _delete_all_N2VC(self
, db_nsr
: dict, vca_id
: str = None):
4394 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="TERMINATING")
4395 namespace
= "." + db_nsr
["_id"]
4397 await self
.n2vc
.delete_namespace(
4398 namespace
=namespace
,
4399 total_timeout
=self
.timeout
.charm_delete
,
4402 except N2VCNotFound
: # already deleted. Skip
4404 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="DELETED")
4406 async def terminate(self
, nsr_id
, nslcmop_id
):
4407 # Try to lock HA task here
4408 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
4409 if not task_is_locked_by_me
:
4412 logging_text
= "Task ns={} terminate={} ".format(nsr_id
, nslcmop_id
)
4413 self
.logger
.debug(logging_text
+ "Enter")
4414 timeout_ns_terminate
= self
.timeout
.ns_terminate
4417 operation_params
= None
4419 error_list
= [] # annotates all failed error messages
4420 db_nslcmop_update
= {}
4421 autoremove
= False # autoremove after terminated
4422 tasks_dict_info
= {}
4425 "Stage 1/3: Preparing task.",
4426 "Waiting for previous operations to terminate.",
4429 # ^ contains [stage, step, VIM-status]
4431 # wait for any previous tasks in process
4432 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
4434 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
4435 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
4436 operation_params
= db_nslcmop
.get("operationParams") or {}
4437 if operation_params
.get("timeout_ns_terminate"):
4438 timeout_ns_terminate
= operation_params
["timeout_ns_terminate"]
4439 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
4440 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
4442 db_nsr_update
["operational-status"] = "terminating"
4443 db_nsr_update
["config-status"] = "terminating"
4444 self
._write
_ns
_status
(
4446 ns_state
="TERMINATING",
4447 current_operation
="TERMINATING",
4448 current_operation_id
=nslcmop_id
,
4449 other_update
=db_nsr_update
,
4451 self
._write
_op
_status
(op_id
=nslcmop_id
, queuePosition
=0, stage
=stage
)
4452 nsr_deployed
= deepcopy(db_nsr
["_admin"].get("deployed")) or {}
4453 if db_nsr
["_admin"]["nsState"] == "NOT_INSTANTIATED":
4456 stage
[1] = "Getting vnf descriptors from db."
4457 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
4459 db_vnfr
["member-vnf-index-ref"]: db_vnfr
for db_vnfr
in db_vnfrs_list
4461 db_vnfds_from_id
= {}
4462 db_vnfds_from_member_index
= {}
4464 for vnfr
in db_vnfrs_list
:
4465 vnfd_id
= vnfr
["vnfd-id"]
4466 if vnfd_id
not in db_vnfds_from_id
:
4467 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
4468 db_vnfds_from_id
[vnfd_id
] = vnfd
4469 db_vnfds_from_member_index
[
4470 vnfr
["member-vnf-index-ref"]
4471 ] = db_vnfds_from_id
[vnfd_id
]
4473 # Destroy individual execution environments when there are terminating primitives.
4474 # Rest of EE will be deleted at once
4475 # TODO - check before calling _destroy_N2VC
4476 # if not operation_params.get("skip_terminate_primitives"):#
4477 # or not vca.get("needed_terminate"):
4478 stage
[0] = "Stage 2/3 execute terminating primitives."
4479 self
.logger
.debug(logging_text
+ stage
[0])
4480 stage
[1] = "Looking execution environment that needs terminate."
4481 self
.logger
.debug(logging_text
+ stage
[1])
4483 for vca_index
, vca
in enumerate(get_iterable(nsr_deployed
, "VCA")):
4484 config_descriptor
= None
4485 vca_member_vnf_index
= vca
.get("member-vnf-index")
4486 vca_id
= self
.get_vca_id(
4487 db_vnfrs_dict
.get(vca_member_vnf_index
)
4488 if vca_member_vnf_index
4492 if not vca
or not vca
.get("ee_id"):
4494 if not vca
.get("member-vnf-index"):
4496 config_descriptor
= db_nsr
.get("ns-configuration")
4497 elif vca
.get("vdu_id"):
4498 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4499 config_descriptor
= get_configuration(db_vnfd
, vca
.get("vdu_id"))
4500 elif vca
.get("kdu_name"):
4501 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4502 config_descriptor
= get_configuration(db_vnfd
, vca
.get("kdu_name"))
4504 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4505 config_descriptor
= get_configuration(db_vnfd
, db_vnfd
["id"])
4506 vca_type
= vca
.get("type")
4507 exec_terminate_primitives
= not operation_params
.get(
4508 "skip_terminate_primitives"
4509 ) and vca
.get("needed_terminate")
4510 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4511 # pending native charms
4512 destroy_ee
= True if vca_type
in ("helm-v3", "native_charm") else False
4513 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4514 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4515 task
= asyncio
.ensure_future(
4523 exec_terminate_primitives
,
4527 tasks_dict_info
[task
] = "Terminating VCA {}".format(vca
.get("ee_id"))
4529 # wait for pending tasks of terminate primitives
4533 + "Waiting for tasks {}".format(list(tasks_dict_info
.keys()))
4535 error_list
= await self
._wait
_for
_tasks
(
4538 min(self
.timeout
.charm_delete
, timeout_ns_terminate
),
4542 tasks_dict_info
.clear()
4544 return # raise LcmException("; ".join(error_list))
4546 # remove All execution environments at once
4547 stage
[0] = "Stage 3/3 delete all."
4549 if nsr_deployed
.get("VCA"):
4550 stage
[1] = "Deleting all execution environments."
4551 self
.logger
.debug(logging_text
+ stage
[1])
4552 helm_vca_list
= get_deployed_vca(db_nsr
, {"type": "helm-v3"})
4554 # Delete Namespace and Certificates
4555 await self
.vca_map
["helm-v3"].delete_tls_certificate(
4556 namespace
=db_nslcmop
["nsInstanceId"],
4557 certificate_name
=self
.EE_TLS_NAME
,
4559 await self
.vca_map
["helm-v3"].delete_namespace(
4560 namespace
=db_nslcmop
["nsInstanceId"],
4563 vca_id
= self
.get_vca_id({}, db_nsr
)
4564 task_delete_ee
= asyncio
.ensure_future(
4566 self
._delete
_all
_N
2VC
(db_nsr
=db_nsr
, vca_id
=vca_id
),
4567 timeout
=self
.timeout
.charm_delete
,
4570 tasks_dict_info
[task_delete_ee
] = "Terminating all VCA"
4572 # Delete from k8scluster
4573 stage
[1] = "Deleting KDUs."
4574 self
.logger
.debug(logging_text
+ stage
[1])
4575 # print(nsr_deployed)
4576 for kdu
in get_iterable(nsr_deployed
, "K8s"):
4577 if not kdu
or not kdu
.get("kdu-instance"):
4579 kdu_instance
= kdu
.get("kdu-instance")
4580 if kdu
.get("k8scluster-type") in self
.k8scluster_map
:
4581 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4582 vca_id
= self
.get_vca_id({}, db_nsr
)
4583 task_delete_kdu_instance
= asyncio
.ensure_future(
4584 self
.k8scluster_map
[kdu
["k8scluster-type"]].uninstall(
4585 cluster_uuid
=kdu
.get("k8scluster-uuid"),
4586 kdu_instance
=kdu_instance
,
4588 namespace
=kdu
.get("namespace"),
4594 + "Unknown k8s deployment type {}".format(
4595 kdu
.get("k8scluster-type")
4600 task_delete_kdu_instance
4601 ] = "Terminating KDU '{}'".format(kdu
.get("kdu-name"))
4604 stage
[1] = "Deleting ns from VIM."
4605 if self
.ro_config
.ng
:
4606 task_delete_ro
= asyncio
.ensure_future(
4607 self
._terminate
_ng
_ro
(
4608 logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
4611 tasks_dict_info
[task_delete_ro
] = "Removing deployment from VIM"
4613 # rest of staff will be done at finally
4616 ROclient
.ROClientException
,
4621 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
4623 except asyncio
.CancelledError
:
4625 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
4627 exc
= "Operation was cancelled"
4628 except Exception as e
:
4629 exc
= traceback
.format_exc()
4630 self
.logger
.critical(
4631 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
4636 error_list
.append(str(exc
))
4638 # wait for pending tasks
4640 stage
[1] = "Waiting for terminate pending tasks."
4641 self
.logger
.debug(logging_text
+ stage
[1])
4642 error_list
+= await self
._wait
_for
_tasks
(
4645 timeout_ns_terminate
,
4649 stage
[1] = stage
[2] = ""
4650 except asyncio
.CancelledError
:
4651 error_list
.append("Cancelled")
4652 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
4653 await self
._wait
_for
_tasks
(
4656 timeout_ns_terminate
,
4660 except Exception as exc
:
4661 error_list
.append(str(exc
))
4662 # update status at database
4664 error_detail
= "; ".join(error_list
)
4665 # self.logger.error(logging_text + error_detail)
4666 error_description_nslcmop
= "{} Detail: {}".format(
4667 stage
[0], error_detail
4669 error_description_nsr
= "Operation: TERMINATING.{}, {}.".format(
4670 nslcmop_id
, stage
[0]
4673 db_nsr_update
["operational-status"] = "failed"
4674 db_nsr_update
["detailed-status"] = (
4675 error_description_nsr
+ " Detail: " + error_detail
4677 db_nslcmop_update
["detailed-status"] = error_detail
4678 nslcmop_operation_state
= "FAILED"
4682 error_description_nsr
= error_description_nslcmop
= None
4683 ns_state
= "NOT_INSTANTIATED"
4684 db_nsr_update
["operational-status"] = "terminated"
4685 db_nsr_update
["detailed-status"] = "Done"
4686 db_nsr_update
["_admin.nsState"] = "NOT_INSTANTIATED"
4687 db_nslcmop_update
["detailed-status"] = "Done"
4688 nslcmop_operation_state
= "COMPLETED"
4691 self
._write
_ns
_status
(
4694 current_operation
="IDLE",
4695 current_operation_id
=None,
4696 error_description
=error_description_nsr
,
4697 error_detail
=error_detail
,
4698 other_update
=db_nsr_update
,
4700 self
._write
_op
_status
(
4703 error_message
=error_description_nslcmop
,
4704 operation_state
=nslcmop_operation_state
,
4705 other_update
=db_nslcmop_update
,
4707 if ns_state
== "NOT_INSTANTIATED":
4711 {"nsr-id-ref": nsr_id
},
4712 {"_admin.nsState": "NOT_INSTANTIATED"},
4714 except DbException
as e
:
4717 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4721 if operation_params
:
4722 autoremove
= operation_params
.get("autoremove", False)
4723 if nslcmop_operation_state
:
4725 await self
.msg
.aiowrite(
4730 "nslcmop_id": nslcmop_id
,
4731 "operationState": nslcmop_operation_state
,
4732 "autoremove": autoremove
,
4735 except Exception as e
:
4737 logging_text
+ "kafka_write notification Exception {}".format(e
)
4739 self
.logger
.debug(f
"Deleting alerts: ns_id={nsr_id}")
4740 self
.db
.del_list("alerts", {"tags.ns_id": nsr_id
})
4742 self
.logger
.debug(logging_text
+ "Exit")
4743 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_terminate")
4745 async def _wait_for_tasks(
4746 self
, logging_text
, created_tasks_info
, timeout
, stage
, nslcmop_id
, nsr_id
=None
4749 error_detail_list
= []
4751 pending_tasks
= list(created_tasks_info
.keys())
4752 num_tasks
= len(pending_tasks
)
4754 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4755 self
._write
_op
_status
(nslcmop_id
, stage
)
4756 while pending_tasks
:
4758 _timeout
= timeout
+ time_start
- time()
4759 done
, pending_tasks
= await asyncio
.wait(
4760 pending_tasks
, timeout
=_timeout
, return_when
=asyncio
.FIRST_COMPLETED
4762 num_done
+= len(done
)
4763 if not done
: # Timeout
4764 for task
in pending_tasks
:
4765 new_error
= created_tasks_info
[task
] + ": Timeout"
4766 error_detail_list
.append(new_error
)
4767 error_list
.append(new_error
)
4770 if task
.cancelled():
4773 exc
= task
.exception()
4775 if isinstance(exc
, asyncio
.TimeoutError
):
4777 new_error
= created_tasks_info
[task
] + ": {}".format(exc
)
4778 error_list
.append(created_tasks_info
[task
])
4779 error_detail_list
.append(new_error
)
4786 ROclient
.ROClientException
,
4792 self
.logger
.error(logging_text
+ new_error
)
4794 exc_traceback
= "".join(
4795 traceback
.format_exception(None, exc
, exc
.__traceback
__)
4799 + created_tasks_info
[task
]
4805 logging_text
+ created_tasks_info
[task
] + ": Done"
4807 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4809 stage
[1] += " Errors: " + ". ".join(error_detail_list
) + "."
4810 if nsr_id
: # update also nsr
4815 "errorDescription": "Error at: " + ", ".join(error_list
),
4816 "errorDetail": ". ".join(error_detail_list
),
4819 self
._write
_op
_status
(nslcmop_id
, stage
)
4820 return error_detail_list
4822 async def _cancel_pending_tasks(self
, logging_text
, created_tasks_info
):
4823 for task
, name
in created_tasks_info
.items():
4824 self
.logger
.debug(logging_text
+ "Cancelling task: " + name
)
4828 def _map_primitive_params(primitive_desc
, params
, instantiation_params
):
4830 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4831 The default-value is used. If it is between < > it look for a value at instantiation_params
4832 :param primitive_desc: portion of VNFD/NSD that describes primitive
4833 :param params: Params provided by user
4834 :param instantiation_params: Instantiation params provided by user
4835 :return: a dictionary with the calculated params
4837 calculated_params
= {}
4838 for parameter
in primitive_desc
.get("parameter", ()):
4839 param_name
= parameter
["name"]
4840 if param_name
in params
:
4841 calculated_params
[param_name
] = params
[param_name
]
4842 elif "default-value" in parameter
or "value" in parameter
:
4843 if "value" in parameter
:
4844 calculated_params
[param_name
] = parameter
["value"]
4846 calculated_params
[param_name
] = parameter
["default-value"]
4848 isinstance(calculated_params
[param_name
], str)
4849 and calculated_params
[param_name
].startswith("<")
4850 and calculated_params
[param_name
].endswith(">")
4852 if calculated_params
[param_name
][1:-1] in instantiation_params
:
4853 calculated_params
[param_name
] = instantiation_params
[
4854 calculated_params
[param_name
][1:-1]
4858 "Parameter {} needed to execute primitive {} not provided".format(
4859 calculated_params
[param_name
], primitive_desc
["name"]
4864 "Parameter {} needed to execute primitive {} not provided".format(
4865 param_name
, primitive_desc
["name"]
4869 if isinstance(calculated_params
[param_name
], (dict, list, tuple)):
4870 calculated_params
[param_name
] = yaml
.safe_dump(
4871 calculated_params
[param_name
], default_flow_style
=True, width
=256
4873 elif isinstance(calculated_params
[param_name
], str) and calculated_params
[
4875 ].startswith("!!yaml "):
4876 calculated_params
[param_name
] = calculated_params
[param_name
][7:]
4877 if parameter
.get("data-type") == "INTEGER":
4879 calculated_params
[param_name
] = int(calculated_params
[param_name
])
4880 except ValueError: # error converting string to int
4882 "Parameter {} of primitive {} must be integer".format(
4883 param_name
, primitive_desc
["name"]
4886 elif parameter
.get("data-type") == "BOOLEAN":
4887 calculated_params
[param_name
] = not (
4888 (str(calculated_params
[param_name
])).lower() == "false"
4891 # add always ns_config_info if primitive name is config
4892 if primitive_desc
["name"] == "config":
4893 if "ns_config_info" in instantiation_params
:
4894 calculated_params
["ns_config_info"] = instantiation_params
[
4897 return calculated_params
4899 def _look_for_deployed_vca(
4906 ee_descriptor_id
=None,
4908 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4909 for vca
in deployed_vca
:
4912 if member_vnf_index
!= vca
["member-vnf-index"] or vdu_id
!= vca
["vdu_id"]:
4915 vdu_count_index
is not None
4916 and vdu_count_index
!= vca
["vdu_count_index"]
4919 if kdu_name
and kdu_name
!= vca
["kdu_name"]:
4921 if ee_descriptor_id
and ee_descriptor_id
!= vca
["ee_descriptor_id"]:
4925 # vca_deployed not found
4927 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4928 " is not deployed".format(
4937 ee_id
= vca
.get("ee_id")
4939 "type", "lxc_proxy_charm"
4940 ) # default value for backward compatibility - proxy charm
4943 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4944 "execution environment".format(
4945 member_vnf_index
, vdu_id
, kdu_name
, vdu_count_index
4948 return ee_id
, vca_type
4950 async def _ns_execute_primitive(
4956 retries_interval
=30,
4963 if primitive
== "config":
4964 primitive_params
= {"params": primitive_params
}
4966 vca_type
= vca_type
or "lxc_proxy_charm"
4970 output
= await asyncio
.wait_for(
4971 self
.vca_map
[vca_type
].exec_primitive(
4973 primitive_name
=primitive
,
4974 params_dict
=primitive_params
,
4975 progress_timeout
=self
.timeout
.progress_primitive
,
4976 total_timeout
=self
.timeout
.primitive
,
4981 timeout
=timeout
or self
.timeout
.primitive
,
4985 except asyncio
.CancelledError
:
4987 except Exception as e
:
4991 "Error executing action {} on {} -> {}".format(
4996 await asyncio
.sleep(retries_interval
)
4998 if isinstance(e
, asyncio
.TimeoutError
):
5000 message
="Timed out waiting for action to complete"
5002 return "FAILED", getattr(e
, "message", repr(e
))
5004 return "COMPLETED", output
5006 except (LcmException
, asyncio
.CancelledError
):
5008 except Exception as e
:
5009 return "FAIL", "Error executing action {}: {}".format(primitive
, e
)
5011 async def vca_status_refresh(self
, nsr_id
, nslcmop_id
):
5013 Updating the vca_status with latest juju information in nsrs record
5014 :param: nsr_id: Id of the nsr
5015 :param: nslcmop_id: Id of the nslcmop
5019 self
.logger
.debug("Task ns={} action={} Enter".format(nsr_id
, nslcmop_id
))
5020 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5021 vca_id
= self
.get_vca_id({}, db_nsr
)
5022 if db_nsr
["_admin"]["deployed"]["K8s"]:
5023 for _
, k8s
in enumerate(db_nsr
["_admin"]["deployed"]["K8s"]):
5024 cluster_uuid
, kdu_instance
, cluster_type
= (
5025 k8s
["k8scluster-uuid"],
5026 k8s
["kdu-instance"],
5027 k8s
["k8scluster-type"],
5029 await self
._on
_update
_k
8s
_db
(
5030 cluster_uuid
=cluster_uuid
,
5031 kdu_instance
=kdu_instance
,
5032 filter={"_id": nsr_id
},
5034 cluster_type
=cluster_type
,
5036 if db_nsr
["_admin"]["deployed"]["VCA"]:
5037 for vca_index
, _
in enumerate(db_nsr
["_admin"]["deployed"]["VCA"]):
5038 table
, filter = "nsrs", {"_id": nsr_id
}
5039 path
= "_admin.deployed.VCA.{}.".format(vca_index
)
5040 await self
._on
_update
_n
2vc
_db
(table
, filter, path
, {})
5042 self
.logger
.debug("Task ns={} action={} Exit".format(nsr_id
, nslcmop_id
))
5043 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_vca_status_refresh")
5045 async def action(self
, nsr_id
, nslcmop_id
):
5046 # Try to lock HA task here
5047 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5048 if not task_is_locked_by_me
:
5051 logging_text
= "Task ns={} action={} ".format(nsr_id
, nslcmop_id
)
5052 self
.logger
.debug(logging_text
+ "Enter")
5053 # get all needed from database
5057 db_nslcmop_update
= {}
5058 nslcmop_operation_state
= None
5059 error_description_nslcmop
= None
5063 # wait for any previous tasks in process
5064 step
= "Waiting for previous operations to terminate"
5065 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5067 self
._write
_ns
_status
(
5070 current_operation
="RUNNING ACTION",
5071 current_operation_id
=nslcmop_id
,
5074 step
= "Getting information from database"
5075 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5076 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5077 if db_nslcmop
["operationParams"].get("primitive_params"):
5078 db_nslcmop
["operationParams"]["primitive_params"] = json
.loads(
5079 db_nslcmop
["operationParams"]["primitive_params"]
5082 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5083 vnf_index
= db_nslcmop
["operationParams"].get("member_vnf_index")
5084 vdu_id
= db_nslcmop
["operationParams"].get("vdu_id")
5085 kdu_name
= db_nslcmop
["operationParams"].get("kdu_name")
5086 vdu_count_index
= db_nslcmop
["operationParams"].get("vdu_count_index")
5087 primitive
= db_nslcmop
["operationParams"]["primitive"]
5088 primitive_params
= db_nslcmop
["operationParams"]["primitive_params"]
5089 timeout_ns_action
= db_nslcmop
["operationParams"].get(
5090 "timeout_ns_action", self
.timeout
.primitive
5094 step
= "Getting vnfr from database"
5095 db_vnfr
= self
.db
.get_one(
5096 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
5098 if db_vnfr
.get("kdur"):
5100 for kdur
in db_vnfr
["kdur"]:
5101 if kdur
.get("additionalParams"):
5102 kdur
["additionalParams"] = json
.loads(
5103 kdur
["additionalParams"]
5105 kdur_list
.append(kdur
)
5106 db_vnfr
["kdur"] = kdur_list
5107 step
= "Getting vnfd from database"
5108 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
5110 # Sync filesystem before running a primitive
5111 self
.fs
.sync(db_vnfr
["vnfd-id"])
5113 step
= "Getting nsd from database"
5114 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
5116 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
5117 # for backward compatibility
5118 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
5119 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
5120 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
5121 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5123 # look for primitive
5124 config_primitive_desc
= descriptor_configuration
= None
5126 descriptor_configuration
= get_configuration(db_vnfd
, vdu_id
)
5128 descriptor_configuration
= get_configuration(db_vnfd
, kdu_name
)
5130 descriptor_configuration
= get_configuration(db_vnfd
, db_vnfd
["id"])
5132 descriptor_configuration
= db_nsd
.get("ns-configuration")
5134 if descriptor_configuration
and descriptor_configuration
.get(
5137 for config_primitive
in descriptor_configuration
["config-primitive"]:
5138 if config_primitive
["name"] == primitive
:
5139 config_primitive_desc
= config_primitive
5142 if not config_primitive_desc
:
5143 if not (kdu_name
and primitive
in ("upgrade", "rollback", "status")):
5145 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5149 primitive_name
= primitive
5150 ee_descriptor_id
= None
5152 primitive_name
= config_primitive_desc
.get(
5153 "execution-environment-primitive", primitive
5155 ee_descriptor_id
= config_primitive_desc
.get(
5156 "execution-environment-ref"
5162 (x
for x
in db_vnfr
["vdur"] if x
["vdu-id-ref"] == vdu_id
), None
5164 desc_params
= parse_yaml_strings(vdur
.get("additionalParams"))
5167 (x
for x
in db_vnfr
["kdur"] if x
["kdu-name"] == kdu_name
), None
5169 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
5171 desc_params
= parse_yaml_strings(
5172 db_vnfr
.get("additionalParamsForVnf")
5175 desc_params
= parse_yaml_strings(db_nsr
.get("additionalParamsForNs"))
5176 if kdu_name
and get_configuration(db_vnfd
, kdu_name
):
5177 kdu_configuration
= get_configuration(db_vnfd
, kdu_name
)
5179 for primitive
in kdu_configuration
.get("initial-config-primitive", []):
5180 actions
.add(primitive
["name"])
5181 for primitive
in kdu_configuration
.get("config-primitive", []):
5182 actions
.add(primitive
["name"])
5184 nsr_deployed
["K8s"],
5185 lambda kdu
: kdu_name
== kdu
["kdu-name"]
5186 and kdu
["member-vnf-index"] == vnf_index
,
5190 if primitive_name
in actions
5191 and kdu
["k8scluster-type"] != "helm-chart-v3"
5195 # TODO check if ns is in a proper status
5197 primitive_name
in ("upgrade", "rollback", "status") or kdu_action
5199 # kdur and desc_params already set from before
5200 if primitive_params
:
5201 desc_params
.update(primitive_params
)
5202 # TODO Check if we will need something at vnf level
5203 for index
, kdu
in enumerate(get_iterable(nsr_deployed
, "K8s")):
5205 kdu_name
== kdu
["kdu-name"]
5206 and kdu
["member-vnf-index"] == vnf_index
5211 "KDU '{}' for vnf '{}' not deployed".format(kdu_name
, vnf_index
)
5214 if kdu
.get("k8scluster-type") not in self
.k8scluster_map
:
5215 msg
= "unknown k8scluster-type '{}'".format(
5216 kdu
.get("k8scluster-type")
5218 raise LcmException(msg
)
5221 "collection": "nsrs",
5222 "filter": {"_id": nsr_id
},
5223 "path": "_admin.deployed.K8s.{}".format(index
),
5227 + "Exec k8s {} on {}.{}".format(primitive_name
, vnf_index
, kdu_name
)
5229 step
= "Executing kdu {}".format(primitive_name
)
5230 if primitive_name
== "upgrade":
5231 if desc_params
.get("kdu_model"):
5232 kdu_model
= desc_params
.get("kdu_model")
5233 del desc_params
["kdu_model"]
5235 kdu_model
= kdu
.get("kdu-model")
5236 if kdu_model
.count("/") < 2: # helm chart is not embedded
5237 parts
= kdu_model
.split(sep
=":")
5239 kdu_model
= parts
[0]
5240 if desc_params
.get("kdu_atomic_upgrade"):
5241 atomic_upgrade
= desc_params
.get(
5242 "kdu_atomic_upgrade"
5243 ).lower() in ("yes", "true", "1")
5244 del desc_params
["kdu_atomic_upgrade"]
5246 atomic_upgrade
= True
5248 detailed_status
= await asyncio
.wait_for(
5249 self
.k8scluster_map
[kdu
["k8scluster-type"]].upgrade(
5250 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5251 kdu_instance
=kdu
.get("kdu-instance"),
5252 atomic
=atomic_upgrade
,
5253 kdu_model
=kdu_model
,
5256 timeout
=timeout_ns_action
,
5258 timeout
=timeout_ns_action
+ 10,
5261 logging_text
+ " Upgrade of kdu {} done".format(detailed_status
)
5263 elif primitive_name
== "rollback":
5264 detailed_status
= await asyncio
.wait_for(
5265 self
.k8scluster_map
[kdu
["k8scluster-type"]].rollback(
5266 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5267 kdu_instance
=kdu
.get("kdu-instance"),
5270 timeout
=timeout_ns_action
,
5272 elif primitive_name
== "status":
5273 detailed_status
= await asyncio
.wait_for(
5274 self
.k8scluster_map
[kdu
["k8scluster-type"]].status_kdu(
5275 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5276 kdu_instance
=kdu
.get("kdu-instance"),
5279 timeout
=timeout_ns_action
,
5282 kdu_instance
= kdu
.get("kdu-instance") or "{}-{}".format(
5283 kdu
["kdu-name"], nsr_id
5285 params
= self
._map
_primitive
_params
(
5286 config_primitive_desc
, primitive_params
, desc_params
5289 detailed_status
= await asyncio
.wait_for(
5290 self
.k8scluster_map
[kdu
["k8scluster-type"]].exec_primitive(
5291 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5292 kdu_instance
=kdu_instance
,
5293 primitive_name
=primitive_name
,
5296 timeout
=timeout_ns_action
,
5299 timeout
=timeout_ns_action
,
5303 nslcmop_operation_state
= "COMPLETED"
5305 detailed_status
= ""
5306 nslcmop_operation_state
= "FAILED"
5308 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
5309 nsr_deployed
["VCA"],
5310 member_vnf_index
=vnf_index
,
5312 vdu_count_index
=vdu_count_index
,
5313 ee_descriptor_id
=ee_descriptor_id
,
5315 for vca_index
, vca_deployed
in enumerate(
5316 db_nsr
["_admin"]["deployed"]["VCA"]
5318 if vca_deployed
.get("member-vnf-index") == vnf_index
:
5320 "collection": "nsrs",
5321 "filter": {"_id": nsr_id
},
5322 "path": "_admin.deployed.VCA.{}.".format(vca_index
),
5326 nslcmop_operation_state
,
5328 ) = await self
._ns
_execute
_primitive
(
5330 primitive
=primitive_name
,
5331 primitive_params
=self
._map
_primitive
_params
(
5332 config_primitive_desc
, primitive_params
, desc_params
5334 timeout
=timeout_ns_action
,
5340 db_nslcmop_update
["detailed-status"] = detailed_status
5341 error_description_nslcmop
= (
5342 detailed_status
if nslcmop_operation_state
== "FAILED" else ""
5346 + "Done with result {} {}".format(
5347 nslcmop_operation_state
, detailed_status
5350 return # database update is called inside finally
5352 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
5353 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
5355 except asyncio
.CancelledError
:
5357 logging_text
+ "Cancelled Exception while '{}'".format(step
)
5359 exc
= "Operation was cancelled"
5360 except asyncio
.TimeoutError
:
5361 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
5363 except Exception as e
:
5364 exc
= traceback
.format_exc()
5365 self
.logger
.critical(
5366 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
5375 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
5376 nslcmop_operation_state
= "FAILED"
5378 self
._write
_ns
_status
(
5382 ], # TODO check if degraded. For the moment use previous status
5383 current_operation
="IDLE",
5384 current_operation_id
=None,
5385 # error_description=error_description_nsr,
5386 # error_detail=error_detail,
5387 other_update
=db_nsr_update
,
5390 self
._write
_op
_status
(
5393 error_message
=error_description_nslcmop
,
5394 operation_state
=nslcmop_operation_state
,
5395 other_update
=db_nslcmop_update
,
5398 if nslcmop_operation_state
:
5400 await self
.msg
.aiowrite(
5405 "nslcmop_id": nslcmop_id
,
5406 "operationState": nslcmop_operation_state
,
5409 except Exception as e
:
5411 logging_text
+ "kafka_write notification Exception {}".format(e
)
5413 self
.logger
.debug(logging_text
+ "Exit")
5414 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_action")
5415 return nslcmop_operation_state
, detailed_status
5417 async def terminate_vdus(
5418 self
, db_vnfr
, member_vnf_index
, db_nsr
, update_db_nslcmops
, stage
, logging_text
5420 """This method terminates VDUs
5423 db_vnfr: VNF instance record
5424 member_vnf_index: VNF index to identify the VDUs to be removed
5425 db_nsr: NS instance record
5426 update_db_nslcmops: Nslcmop update record
5428 vca_scaling_info
= []
5429 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5430 scaling_info
["scaling_direction"] = "IN"
5431 scaling_info
["vdu-delete"] = {}
5432 scaling_info
["kdu-delete"] = {}
5433 db_vdur
= db_vnfr
.get("vdur")
5434 vdur_list
= copy(db_vdur
)
5436 for index
, vdu
in enumerate(vdur_list
):
5437 vca_scaling_info
.append(
5439 "osm_vdu_id": vdu
["vdu-id-ref"],
5440 "member-vnf-index": member_vnf_index
,
5442 "vdu_index": count_index
,
5445 scaling_info
["vdu-delete"][vdu
["vdu-id-ref"]] = count_index
5446 scaling_info
["vdu"].append(
5448 "name": vdu
.get("name") or vdu
.get("vdu-name"),
5449 "vdu_id": vdu
["vdu-id-ref"],
5453 for interface
in vdu
["interfaces"]:
5454 scaling_info
["vdu"][index
]["interface"].append(
5456 "name": interface
["name"],
5457 "ip_address": interface
["ip-address"],
5458 "mac_address": interface
.get("mac-address"),
5461 self
.logger
.info("NS update scaling info{}".format(scaling_info
))
5462 stage
[2] = "Terminating VDUs"
5463 if scaling_info
.get("vdu-delete"):
5464 # scale_process = "RO"
5465 if self
.ro_config
.ng
:
5466 await self
._scale
_ng
_ro
(
5475 async def remove_vnf(self
, nsr_id
, nslcmop_id
, vnf_instance_id
):
5476 """This method is to Remove VNF instances from NS.
5479 nsr_id: NS instance id
5480 nslcmop_id: nslcmop id of update
5481 vnf_instance_id: id of the VNF instance to be removed
5484 result: (str, str) COMPLETED/FAILED, details
5488 logging_text
= "Task ns={} update ".format(nsr_id
)
5489 check_vnfr_count
= len(self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}))
5490 self
.logger
.info("check_vnfr_count {}".format(check_vnfr_count
))
5491 if check_vnfr_count
> 1:
5492 stage
= ["", "", ""]
5493 step
= "Getting nslcmop from database"
5495 step
+ " after having waited for previous tasks to be completed"
5497 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5498 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5499 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
5500 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5501 """ db_vnfr = self.db.get_one(
5502 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5504 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5505 await self
.terminate_vdus(
5514 constituent_vnfr
= db_nsr
.get("constituent-vnfr-ref")
5515 constituent_vnfr
.remove(db_vnfr
.get("_id"))
5516 db_nsr_update
["constituent-vnfr-ref"] = db_nsr
.get(
5517 "constituent-vnfr-ref"
5519 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5520 self
.db
.del_one("vnfrs", {"_id": db_vnfr
.get("_id")})
5521 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5522 return "COMPLETED", "Done"
5524 step
= "Terminate VNF Failed with"
5526 "{} Cannot terminate the last VNF in this NS.".format(
5530 except (LcmException
, asyncio
.CancelledError
):
5532 except Exception as e
:
5533 self
.logger
.debug("Error removing VNF {}".format(e
))
5534 return "FAILED", "Error removing VNF {}".format(e
)
5536 async def _ns_redeploy_vnf(
5544 """This method updates and redeploys VNF instances
5547 nsr_id: NS instance id
5548 nslcmop_id: nslcmop id
5549 db_vnfd: VNF descriptor
5550 db_vnfr: VNF instance record
5551 db_nsr: NS instance record
5554 result: (str, str) COMPLETED/FAILED, details
5558 stage
= ["", "", ""]
5559 logging_text
= "Task ns={} update ".format(nsr_id
)
5560 latest_vnfd_revision
= db_vnfd
["_admin"].get("revision")
5561 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5563 # Terminate old VNF resources
5564 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5565 await self
.terminate_vdus(
5574 # old_vnfd_id = db_vnfr["vnfd-id"]
5575 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5576 new_db_vnfd
= db_vnfd
5577 # new_vnfd_ref = new_db_vnfd["id"]
5578 # new_vnfd_id = vnfd_id
5582 for cp
in new_db_vnfd
.get("ext-cpd", ()):
5584 "name": cp
.get("id"),
5585 "connection-point-id": cp
.get("int-cpd", {}).get("cpd"),
5586 "connection-point-vdu-id": cp
.get("int-cpd", {}).get("vdu-id"),
5589 new_vnfr_cp
.append(vnf_cp
)
5590 new_vdur
= update_db_nslcmops
["operationParams"]["newVdur"]
5591 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5592 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5594 "revision": latest_vnfd_revision
,
5595 "connection-point": new_vnfr_cp
,
5599 self
.update_db_2("vnfrs", db_vnfr
["_id"], new_vnfr_update
)
5600 updated_db_vnfr
= self
.db
.get_one(
5602 {"member-vnf-index-ref": member_vnf_index
, "nsr-id-ref": nsr_id
},
5605 # Instantiate new VNF resources
5606 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5607 vca_scaling_info
= []
5608 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5609 scaling_info
["scaling_direction"] = "OUT"
5610 scaling_info
["vdu-create"] = {}
5611 scaling_info
["kdu-create"] = {}
5612 vdud_instantiate_list
= db_vnfd
["vdu"]
5613 for index
, vdud
in enumerate(vdud_instantiate_list
):
5614 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(vdud
, db_vnfd
)
5616 additional_params
= (
5617 self
._get
_vdu
_additional
_params
(updated_db_vnfr
, vdud
["id"])
5620 cloud_init_list
= []
5622 # TODO Information of its own ip is not available because db_vnfr is not updated.
5623 additional_params
["OSM"] = get_osm_params(
5624 updated_db_vnfr
, vdud
["id"], 1
5626 cloud_init_list
.append(
5627 self
._parse
_cloud
_init
(
5634 vca_scaling_info
.append(
5636 "osm_vdu_id": vdud
["id"],
5637 "member-vnf-index": member_vnf_index
,
5639 "vdu_index": count_index
,
5642 scaling_info
["vdu-create"][vdud
["id"]] = count_index
5643 if self
.ro_config
.ng
:
5645 "New Resources to be deployed: {}".format(scaling_info
)
5647 await self
._scale
_ng
_ro
(
5655 return "COMPLETED", "Done"
5656 except (LcmException
, asyncio
.CancelledError
):
5658 except Exception as e
:
5659 self
.logger
.debug("Error updating VNF {}".format(e
))
5660 return "FAILED", "Error updating VNF {}".format(e
)
5662 async def _ns_charm_upgrade(
5668 timeout
: float = None,
5670 """This method upgrade charms in VNF instances
5673 ee_id: Execution environment id
5674 path: Local path to the charm
5676 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5677 timeout: (Float) Timeout for the ns update operation
5680 result: (str, str) COMPLETED/FAILED, details
5683 charm_type
= charm_type
or "lxc_proxy_charm"
5684 output
= await self
.vca_map
[charm_type
].upgrade_charm(
5688 charm_type
=charm_type
,
5689 timeout
=timeout
or self
.timeout
.ns_update
,
5693 return "COMPLETED", output
5695 except (LcmException
, asyncio
.CancelledError
):
5698 except Exception as e
:
5699 self
.logger
.debug("Error upgrading charm {}".format(path
))
5701 return "FAILED", "Error upgrading charm {}: {}".format(path
, e
)
5703 async def update(self
, nsr_id
, nslcmop_id
):
5704 """Update NS according to different update types
5706 This method performs upgrade of VNF instances then updates the revision
5707 number in VNF record
5710 nsr_id: Network service will be updated
5711 nslcmop_id: ns lcm operation id
5714 It may raise DbException, LcmException, N2VCException, K8sException
5717 # Try to lock HA task here
5718 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5719 if not task_is_locked_by_me
:
5722 logging_text
= "Task ns={} update={} ".format(nsr_id
, nslcmop_id
)
5723 self
.logger
.debug(logging_text
+ "Enter")
5725 # Set the required variables to be filled up later
5727 db_nslcmop_update
= {}
5729 nslcmop_operation_state
= None
5731 error_description_nslcmop
= ""
5733 change_type
= "updated"
5734 detailed_status
= ""
5735 member_vnf_index
= None
5738 # wait for any previous tasks in process
5739 step
= "Waiting for previous operations to terminate"
5740 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5741 self
._write
_ns
_status
(
5744 current_operation
="UPDATING",
5745 current_operation_id
=nslcmop_id
,
5748 step
= "Getting nslcmop from database"
5749 db_nslcmop
= self
.db
.get_one(
5750 "nslcmops", {"_id": nslcmop_id
}, fail_on_empty
=False
5752 update_type
= db_nslcmop
["operationParams"]["updateType"]
5754 step
= "Getting nsr from database"
5755 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5756 old_operational_status
= db_nsr
["operational-status"]
5757 db_nsr_update
["operational-status"] = "updating"
5758 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5759 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5761 if update_type
== "CHANGE_VNFPKG":
5762 # Get the input parameters given through update request
5763 vnf_instance_id
= db_nslcmop
["operationParams"][
5764 "changeVnfPackageData"
5765 ].get("vnfInstanceId")
5767 vnfd_id
= db_nslcmop
["operationParams"]["changeVnfPackageData"].get(
5770 timeout_seconds
= db_nslcmop
["operationParams"].get("timeout_ns_update")
5772 step
= "Getting vnfr from database"
5773 db_vnfr
= self
.db
.get_one(
5774 "vnfrs", {"_id": vnf_instance_id
}, fail_on_empty
=False
5777 step
= "Getting vnfds from database"
5779 latest_vnfd
= self
.db
.get_one(
5780 "vnfds", {"_id": vnfd_id
}, fail_on_empty
=False
5782 latest_vnfd_revision
= latest_vnfd
["_admin"].get("revision")
5785 current_vnf_revision
= db_vnfr
.get("revision", 1)
5786 current_vnfd
= self
.db
.get_one(
5788 {"_id": vnfd_id
+ ":" + str(current_vnf_revision
)},
5789 fail_on_empty
=False,
5791 # Charm artifact paths will be filled up later
5793 current_charm_artifact_path
,
5794 target_charm_artifact_path
,
5795 charm_artifact_paths
,
5797 ) = ([], [], [], [])
5799 step
= "Checking if revision has changed in VNFD"
5800 if current_vnf_revision
!= latest_vnfd_revision
:
5801 change_type
= "policy_updated"
5803 # There is new revision of VNFD, update operation is required
5804 current_vnfd_path
= vnfd_id
+ ":" + str(current_vnf_revision
)
5805 latest_vnfd_path
= vnfd_id
+ ":" + str(latest_vnfd_revision
)
5807 step
= "Removing the VNFD packages if they exist in the local path"
5808 shutil
.rmtree(self
.fs
.path
+ current_vnfd_path
, ignore_errors
=True)
5809 shutil
.rmtree(self
.fs
.path
+ latest_vnfd_path
, ignore_errors
=True)
5811 step
= "Get the VNFD packages from FSMongo"
5812 self
.fs
.sync(from_path
=latest_vnfd_path
)
5813 self
.fs
.sync(from_path
=current_vnfd_path
)
5816 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5818 current_base_folder
= current_vnfd
["_admin"]["storage"]
5819 latest_base_folder
= latest_vnfd
["_admin"]["storage"]
5821 for vca_index
, vca_deployed
in enumerate(
5822 get_iterable(nsr_deployed
, "VCA")
5824 vnf_index
= db_vnfr
.get("member-vnf-index-ref")
5826 # Getting charm-id and charm-type
5827 if vca_deployed
.get("member-vnf-index") == vnf_index
:
5828 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
5829 vca_type
= vca_deployed
.get("type")
5830 vdu_count_index
= vca_deployed
.get("vdu_count_index")
5833 ee_id
= vca_deployed
.get("ee_id")
5835 step
= "Getting descriptor config"
5836 if current_vnfd
.get("kdu"):
5837 search_key
= "kdu_name"
5839 search_key
= "vnfd_id"
5841 entity_id
= vca_deployed
.get(search_key
)
5843 descriptor_config
= get_configuration(
5844 current_vnfd
, entity_id
5847 if "execution-environment-list" in descriptor_config
:
5848 ee_list
= descriptor_config
.get(
5849 "execution-environment-list", []
5854 # There could be several charm used in the same VNF
5855 for ee_item
in ee_list
:
5856 if ee_item
.get("juju"):
5857 step
= "Getting charm name"
5858 charm_name
= ee_item
["juju"].get("charm")
5860 step
= "Setting Charm artifact paths"
5861 current_charm_artifact_path
.append(
5862 get_charm_artifact_path(
5863 current_base_folder
,
5866 current_vnf_revision
,
5869 target_charm_artifact_path
.append(
5870 get_charm_artifact_path(
5874 latest_vnfd_revision
,
5877 elif ee_item
.get("helm-chart"):
5878 # add chart to list and all parameters
5879 step
= "Getting helm chart name"
5880 chart_name
= ee_item
.get("helm-chart")
5881 vca_type
= "helm-v3"
5882 step
= "Setting Helm chart artifact paths"
5884 helm_artifacts
.append(
5886 "current_artifact_path": get_charm_artifact_path(
5887 current_base_folder
,
5890 current_vnf_revision
,
5892 "target_artifact_path": get_charm_artifact_path(
5896 latest_vnfd_revision
,
5899 "vca_index": vca_index
,
5900 "vdu_index": vdu_count_index
,
5904 charm_artifact_paths
= zip(
5905 current_charm_artifact_path
, target_charm_artifact_path
5908 step
= "Checking if software version has changed in VNFD"
5909 if find_software_version(current_vnfd
) != find_software_version(
5912 step
= "Checking if existing VNF has charm"
5913 for current_charm_path
, target_charm_path
in list(
5914 charm_artifact_paths
5916 if current_charm_path
:
5918 "Software version change is not supported as VNF instance {} has charm.".format(
5923 step
= "Checking whether the descriptor has SFC"
5924 if db_nsr
.get("nsd", {}).get("vnffgd"):
5926 "Ns update is not allowed for NS with SFC"
5929 # There is no change in the charm package, then redeploy the VNF
5930 # based on new descriptor
5931 step
= "Redeploying VNF"
5932 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5933 (result
, detailed_status
) = await self
._ns
_redeploy
_vnf
(
5934 nsr_id
, nslcmop_id
, latest_vnfd
, db_vnfr
, db_nsr
5936 if result
== "FAILED":
5937 nslcmop_operation_state
= result
5938 error_description_nslcmop
= detailed_status
5939 old_operational_status
= "failed"
5940 db_nslcmop_update
["detailed-status"] = detailed_status
5941 db_nsr_update
["detailed-status"] = detailed_status
5942 scaling_aspect
= get_scaling_aspect(latest_vnfd
)
5943 scaling_group_desc
= db_nsr
.get("_admin").get(
5944 "scaling-group", None
5946 if scaling_group_desc
:
5947 for aspect
in scaling_aspect
:
5948 scaling_group_id
= aspect
.get("id")
5949 for scale_index
, scaling_group
in enumerate(
5952 if scaling_group
.get("name") == scaling_group_id
:
5954 "_admin.scaling-group.{}.nb-scale-op".format(
5960 + " step {} Done with result {} {}".format(
5961 step
, nslcmop_operation_state
, detailed_status
5966 step
= "Checking if any charm package has changed or not"
5967 for current_charm_path
, target_charm_path
in list(
5968 charm_artifact_paths
5972 and target_charm_path
5973 and self
.check_charm_hash_changed(
5974 current_charm_path
, target_charm_path
5977 step
= "Checking whether VNF uses juju bundle"
5978 if check_juju_bundle_existence(current_vnfd
):
5980 "Charm upgrade is not supported for the instance which"
5981 " uses juju-bundle: {}".format(
5982 check_juju_bundle_existence(current_vnfd
)
5986 step
= "Upgrading Charm"
5990 ) = await self
._ns
_charm
_upgrade
(
5993 charm_type
=vca_type
,
5994 path
=self
.fs
.path
+ target_charm_path
,
5995 timeout
=timeout_seconds
,
5998 if result
== "FAILED":
5999 nslcmop_operation_state
= result
6000 error_description_nslcmop
= detailed_status
6002 db_nslcmop_update
["detailed-status"] = detailed_status
6005 + " step {} Done with result {} {}".format(
6006 step
, nslcmop_operation_state
, detailed_status
6010 step
= "Updating policies"
6011 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6012 result
= "COMPLETED"
6013 detailed_status
= "Done"
6014 db_nslcmop_update
["detailed-status"] = "Done"
6017 for item
in helm_artifacts
:
6019 item
["current_artifact_path"]
6020 and item
["target_artifact_path"]
6021 and self
.check_charm_hash_changed(
6022 item
["current_artifact_path"],
6023 item
["target_artifact_path"],
6027 db_update_entry
= "_admin.deployed.VCA.{}.".format(
6030 vnfr_id
= db_vnfr
["_id"]
6031 osm_config
= {"osm": {"ns_id": nsr_id
, "vnf_id": vnfr_id
}}
6033 "collection": "nsrs",
6034 "filter": {"_id": nsr_id
},
6035 "path": db_update_entry
,
6037 vca_type
, namespace
, helm_id
= get_ee_id_parts(item
["ee_id"])
6038 await self
.vca_map
[vca_type
].upgrade_execution_environment(
6039 namespace
=namespace
,
6043 artifact_path
=item
["target_artifact_path"],
6046 vnf_id
= db_vnfr
.get("vnfd-ref")
6047 config_descriptor
= get_configuration(latest_vnfd
, vnf_id
)
6048 self
.logger
.debug("get ssh key block")
6052 ("config-access", "ssh-access", "required"),
6054 # Needed to inject a ssh key
6057 ("config-access", "ssh-access", "default-user"),
6060 "Install configuration Software, getting public ssh key"
6062 pub_key
= await self
.vca_map
[
6064 ].get_ee_ssh_public__key(
6065 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
6069 "Insert public key into VM user={} ssh_key={}".format(
6073 self
.logger
.debug(logging_text
+ step
)
6075 # wait for RO (ip-address) Insert pub_key into VM
6076 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
6086 initial_config_primitive_list
= config_descriptor
.get(
6087 "initial-config-primitive"
6089 config_primitive
= next(
6092 for p
in initial_config_primitive_list
6093 if p
["name"] == "config"
6097 if not config_primitive
:
6100 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
6102 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
6103 if db_vnfr
.get("additionalParamsForVnf"):
6104 deploy_params
.update(
6106 db_vnfr
["additionalParamsForVnf"].copy()
6109 primitive_params_
= self
._map
_primitive
_params
(
6110 config_primitive
, {}, deploy_params
6113 step
= "execute primitive '{}' params '{}'".format(
6114 config_primitive
["name"], primitive_params_
6116 self
.logger
.debug(logging_text
+ step
)
6117 await self
.vca_map
[vca_type
].exec_primitive(
6119 primitive_name
=config_primitive
["name"],
6120 params_dict
=primitive_params_
,
6126 step
= "Updating policies"
6127 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6128 detailed_status
= "Done"
6129 db_nslcmop_update
["detailed-status"] = "Done"
6131 # If nslcmop_operation_state is None, so any operation is not failed.
6132 if not nslcmop_operation_state
:
6133 nslcmop_operation_state
= "COMPLETED"
6135 # If update CHANGE_VNFPKG nslcmop_operation is successful
6136 # vnf revision need to be updated
6137 vnfr_update
["revision"] = latest_vnfd_revision
6138 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
6142 + " task Done with result {} {}".format(
6143 nslcmop_operation_state
, detailed_status
6146 elif update_type
== "REMOVE_VNF":
6147 # This part is included in https://osm.etsi.org/gerrit/11876
6148 vnf_instance_id
= db_nslcmop
["operationParams"]["removeVnfInstanceId"]
6149 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
6150 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6151 step
= "Removing VNF"
6152 (result
, detailed_status
) = await self
.remove_vnf(
6153 nsr_id
, nslcmop_id
, vnf_instance_id
6155 if result
== "FAILED":
6156 nslcmop_operation_state
= result
6157 error_description_nslcmop
= detailed_status
6158 db_nslcmop_update
["detailed-status"] = detailed_status
6159 change_type
= "vnf_terminated"
6160 if not nslcmop_operation_state
:
6161 nslcmop_operation_state
= "COMPLETED"
6164 + " task Done with result {} {}".format(
6165 nslcmop_operation_state
, detailed_status
6169 elif update_type
== "OPERATE_VNF":
6170 vnf_id
= db_nslcmop
["operationParams"]["operateVnfData"][
6173 operation_type
= db_nslcmop
["operationParams"]["operateVnfData"][
6176 additional_param
= db_nslcmop
["operationParams"]["operateVnfData"][
6179 (result
, detailed_status
) = await self
.rebuild_start_stop(
6180 nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
6182 if result
== "FAILED":
6183 nslcmop_operation_state
= result
6184 error_description_nslcmop
= detailed_status
6185 db_nslcmop_update
["detailed-status"] = detailed_status
6186 if not nslcmop_operation_state
:
6187 nslcmop_operation_state
= "COMPLETED"
6190 + " task Done with result {} {}".format(
6191 nslcmop_operation_state
, detailed_status
6195 # If nslcmop_operation_state is None, so any operation is not failed.
6196 # All operations are executed in overall.
6197 if not nslcmop_operation_state
:
6198 nslcmop_operation_state
= "COMPLETED"
6199 db_nsr_update
["operational-status"] = old_operational_status
6201 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
6202 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
6204 except asyncio
.CancelledError
:
6206 logging_text
+ "Cancelled Exception while '{}'".format(step
)
6208 exc
= "Operation was cancelled"
6209 except asyncio
.TimeoutError
:
6210 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
6212 except Exception as e
:
6213 exc
= traceback
.format_exc()
6214 self
.logger
.critical(
6215 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
6224 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
6225 nslcmop_operation_state
= "FAILED"
6226 db_nsr_update
["operational-status"] = old_operational_status
6228 self
._write
_ns
_status
(
6230 ns_state
=db_nsr
["nsState"],
6231 current_operation
="IDLE",
6232 current_operation_id
=None,
6233 other_update
=db_nsr_update
,
6236 self
._write
_op
_status
(
6239 error_message
=error_description_nslcmop
,
6240 operation_state
=nslcmop_operation_state
,
6241 other_update
=db_nslcmop_update
,
6244 if nslcmop_operation_state
:
6248 "nslcmop_id": nslcmop_id
,
6249 "operationState": nslcmop_operation_state
,
6252 change_type
in ("vnf_terminated", "policy_updated")
6253 and member_vnf_index
6255 msg
.update({"vnf_member_index": member_vnf_index
})
6256 await self
.msg
.aiowrite("ns", change_type
, msg
)
6257 except Exception as e
:
6259 logging_text
+ "kafka_write notification Exception {}".format(e
)
6261 self
.logger
.debug(logging_text
+ "Exit")
6262 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_update")
6263 return nslcmop_operation_state
, detailed_status
6265 async def scale(self
, nsr_id
, nslcmop_id
):
6266 # Try to lock HA task here
6267 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
6268 if not task_is_locked_by_me
:
6271 logging_text
= "Task ns={} scale={} ".format(nsr_id
, nslcmop_id
)
6272 stage
= ["", "", ""]
6273 tasks_dict_info
= {}
6274 # ^ stage, step, VIM progress
6275 self
.logger
.debug(logging_text
+ "Enter")
6276 # get all needed from database
6278 db_nslcmop_update
= {}
6281 # in case of error, indicates what part of scale was failed to put nsr at error status
6282 scale_process
= None
6283 old_operational_status
= ""
6284 old_config_status
= ""
6288 # wait for any previous tasks in process
6289 step
= "Waiting for previous operations to terminate"
6290 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
6291 self
._write
_ns
_status
(
6294 current_operation
="SCALING",
6295 current_operation_id
=nslcmop_id
,
6298 step
= "Getting nslcmop from database"
6300 step
+ " after having waited for previous tasks to be completed"
6302 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
6304 step
= "Getting nsr from database"
6305 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
6306 old_operational_status
= db_nsr
["operational-status"]
6307 old_config_status
= db_nsr
["config-status"]
6309 step
= "Checking whether the descriptor has SFC"
6310 if db_nsr
.get("nsd", {}).get("vnffgd"):
6311 raise LcmException("Scaling is not allowed for NS with SFC")
6313 step
= "Parsing scaling parameters"
6314 db_nsr_update
["operational-status"] = "scaling"
6315 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6316 nsr_deployed
= db_nsr
["_admin"].get("deployed")
6318 vnf_index
= db_nslcmop
["operationParams"]["scaleVnfData"][
6320 ]["member-vnf-index"]
6321 scaling_group
= db_nslcmop
["operationParams"]["scaleVnfData"][
6323 ]["scaling-group-descriptor"]
6324 scaling_type
= db_nslcmop
["operationParams"]["scaleVnfData"]["scaleVnfType"]
6325 # for backward compatibility
6326 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
6327 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
6328 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
6329 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6331 step
= "Getting vnfr from database"
6332 db_vnfr
= self
.db
.get_one(
6333 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
6336 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
6338 step
= "Getting vnfd from database"
6339 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
6341 base_folder
= db_vnfd
["_admin"]["storage"]
6343 step
= "Getting scaling-group-descriptor"
6344 scaling_descriptor
= find_in_list(
6345 get_scaling_aspect(db_vnfd
),
6346 lambda scale_desc
: scale_desc
["name"] == scaling_group
,
6348 if not scaling_descriptor
:
6350 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6351 "at vnfd:scaling-group-descriptor".format(scaling_group
)
6354 step
= "Sending scale order to VIM"
6355 # TODO check if ns is in a proper status
6357 if not db_nsr
["_admin"].get("scaling-group"):
6362 "_admin.scaling-group": [
6364 "name": scaling_group
,
6365 "vnf_index": vnf_index
,
6371 admin_scale_index
= 0
6373 for admin_scale_index
, admin_scale_info
in enumerate(
6374 db_nsr
["_admin"]["scaling-group"]
6377 admin_scale_info
["name"] == scaling_group
6378 and admin_scale_info
["vnf_index"] == vnf_index
6380 nb_scale_op
= admin_scale_info
.get("nb-scale-op", 0)
6382 else: # not found, set index one plus last element and add new entry with the name
6383 admin_scale_index
+= 1
6385 "_admin.scaling-group.{}.name".format(admin_scale_index
)
6388 "_admin.scaling-group.{}.vnf_index".format(admin_scale_index
)
6391 vca_scaling_info
= []
6392 scaling_info
= {"scaling_group_name": scaling_group
, "vdu": [], "kdu": []}
6393 if scaling_type
== "SCALE_OUT":
6394 if "aspect-delta-details" not in scaling_descriptor
:
6396 "Aspect delta details not fount in scaling descriptor {}".format(
6397 scaling_descriptor
["name"]
6400 # count if max-instance-count is reached
6401 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6403 scaling_info
["scaling_direction"] = "OUT"
6404 scaling_info
["vdu-create"] = {}
6405 scaling_info
["kdu-create"] = {}
6406 for delta
in deltas
:
6407 for vdu_delta
in delta
.get("vdu-delta", {}):
6408 vdud
= get_vdu(db_vnfd
, vdu_delta
["id"])
6409 # vdu_index also provides the number of instance of the targeted vdu
6410 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6411 if vdu_index
<= len(db_vnfr
["vdur"]):
6412 vdu_name_id
= db_vnfr
["vdur"][vdu_index
- 1]["vdu-name"]
6414 db_vnfr
["_id"] + vdu_name_id
+ str(vdu_index
- 1)
6416 prom_job_name
= prom_job_name
.replace("_", "")
6417 prom_job_name
= prom_job_name
.replace("-", "")
6419 prom_job_name
= None
6420 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(
6424 additional_params
= (
6425 self
._get
_vdu
_additional
_params
(db_vnfr
, vdud
["id"])
6428 cloud_init_list
= []
6430 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6431 max_instance_count
= 10
6432 if vdu_profile
and "max-number-of-instances" in vdu_profile
:
6433 max_instance_count
= vdu_profile
.get(
6434 "max-number-of-instances", 10
6437 default_instance_num
= get_number_of_instances(
6440 instances_number
= vdu_delta
.get("number-of-instances", 1)
6441 nb_scale_op
+= instances_number
6443 new_instance_count
= nb_scale_op
+ default_instance_num
6444 # Control if new count is over max and vdu count is less than max.
6445 # Then assign new instance count
6446 if new_instance_count
> max_instance_count
> vdu_count
:
6447 instances_number
= new_instance_count
- max_instance_count
6449 instances_number
= instances_number
6451 if new_instance_count
> max_instance_count
:
6453 "reached the limit of {} (max-instance-count) "
6454 "scaling-out operations for the "
6455 "scaling-group-descriptor '{}'".format(
6456 nb_scale_op
, scaling_group
6459 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6461 # TODO Information of its own ip is not available because db_vnfr is not updated.
6462 additional_params
["OSM"] = get_osm_params(
6463 db_vnfr
, vdu_delta
["id"], vdu_index
+ x
6465 cloud_init_list
.append(
6466 self
._parse
_cloud
_init
(
6473 vca_scaling_info
.append(
6475 "osm_vdu_id": vdu_delta
["id"],
6476 "member-vnf-index": vnf_index
,
6478 "vdu_index": vdu_index
+ x
,
6481 scaling_info
["vdu-create"][vdu_delta
["id"]] = instances_number
6482 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6483 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6484 kdu_name
= kdu_profile
["kdu-name"]
6485 resource_name
= kdu_profile
.get("resource-name", "")
6487 # Might have different kdus in the same delta
6488 # Should have list for each kdu
6489 if not scaling_info
["kdu-create"].get(kdu_name
, None):
6490 scaling_info
["kdu-create"][kdu_name
] = []
6492 kdur
= get_kdur(db_vnfr
, kdu_name
)
6493 if kdur
.get("helm-chart"):
6494 k8s_cluster_type
= "helm-chart-v3"
6495 self
.logger
.debug("kdur: {}".format(kdur
))
6496 elif kdur
.get("juju-bundle"):
6497 k8s_cluster_type
= "juju-bundle"
6500 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6501 "juju-bundle. Maybe an old NBI version is running".format(
6502 db_vnfr
["member-vnf-index-ref"], kdu_name
6506 max_instance_count
= 10
6507 if kdu_profile
and "max-number-of-instances" in kdu_profile
:
6508 max_instance_count
= kdu_profile
.get(
6509 "max-number-of-instances", 10
6512 nb_scale_op
+= kdu_delta
.get("number-of-instances", 1)
6513 deployed_kdu
, _
= get_deployed_kdu(
6514 nsr_deployed
, kdu_name
, vnf_index
6516 if deployed_kdu
is None:
6518 "KDU '{}' for vnf '{}' not deployed".format(
6522 kdu_instance
= deployed_kdu
.get("kdu-instance")
6523 instance_num
= await self
.k8scluster_map
[
6529 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6530 kdu_model
=deployed_kdu
.get("kdu-model"),
6532 kdu_replica_count
= instance_num
+ kdu_delta
.get(
6533 "number-of-instances", 1
6536 # Control if new count is over max and instance_num is less than max.
6537 # Then assign max instance number to kdu replica count
6538 if kdu_replica_count
> max_instance_count
> instance_num
:
6539 kdu_replica_count
= max_instance_count
6540 if kdu_replica_count
> max_instance_count
:
6542 "reached the limit of {} (max-instance-count) "
6543 "scaling-out operations for the "
6544 "scaling-group-descriptor '{}'".format(
6545 instance_num
, scaling_group
6549 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6550 vca_scaling_info
.append(
6552 "osm_kdu_id": kdu_name
,
6553 "member-vnf-index": vnf_index
,
6555 "kdu_index": instance_num
+ x
- 1,
6558 scaling_info
["kdu-create"][kdu_name
].append(
6560 "member-vnf-index": vnf_index
,
6562 "k8s-cluster-type": k8s_cluster_type
,
6563 "resource-name": resource_name
,
6564 "scale": kdu_replica_count
,
6567 elif scaling_type
== "SCALE_IN":
6568 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6570 scaling_info
["scaling_direction"] = "IN"
6571 scaling_info
["vdu-delete"] = {}
6572 scaling_info
["kdu-delete"] = {}
6574 for delta
in deltas
:
6575 for vdu_delta
in delta
.get("vdu-delta", {}):
6576 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6577 min_instance_count
= 0
6578 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6579 if vdu_profile
and "min-number-of-instances" in vdu_profile
:
6580 min_instance_count
= vdu_profile
["min-number-of-instances"]
6582 default_instance_num
= get_number_of_instances(
6583 db_vnfd
, vdu_delta
["id"]
6585 instance_num
= vdu_delta
.get("number-of-instances", 1)
6586 nb_scale_op
-= instance_num
6588 new_instance_count
= nb_scale_op
+ default_instance_num
6590 if new_instance_count
< min_instance_count
< vdu_count
:
6591 instances_number
= min_instance_count
- new_instance_count
6593 instances_number
= instance_num
6595 if new_instance_count
< min_instance_count
:
6597 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6598 "scaling-group-descriptor '{}'".format(
6599 nb_scale_op
, scaling_group
6602 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6603 vca_scaling_info
.append(
6605 "osm_vdu_id": vdu_delta
["id"],
6606 "member-vnf-index": vnf_index
,
6608 "vdu_index": vdu_index
- 1 - x
,
6611 scaling_info
["vdu-delete"][vdu_delta
["id"]] = instances_number
6612 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6613 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6614 kdu_name
= kdu_profile
["kdu-name"]
6615 resource_name
= kdu_profile
.get("resource-name", "")
6617 if not scaling_info
["kdu-delete"].get(kdu_name
, None):
6618 scaling_info
["kdu-delete"][kdu_name
] = []
6620 kdur
= get_kdur(db_vnfr
, kdu_name
)
6621 if kdur
.get("helm-chart"):
6622 k8s_cluster_type
= "helm-chart-v3"
6623 self
.logger
.debug("kdur: {}".format(kdur
))
6624 elif kdur
.get("juju-bundle"):
6625 k8s_cluster_type
= "juju-bundle"
6628 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6629 "juju-bundle. Maybe an old NBI version is running".format(
6630 db_vnfr
["member-vnf-index-ref"], kdur
["kdu-name"]
6634 min_instance_count
= 0
6635 if kdu_profile
and "min-number-of-instances" in kdu_profile
:
6636 min_instance_count
= kdu_profile
["min-number-of-instances"]
6638 nb_scale_op
-= kdu_delta
.get("number-of-instances", 1)
6639 deployed_kdu
, _
= get_deployed_kdu(
6640 nsr_deployed
, kdu_name
, vnf_index
6642 if deployed_kdu
is None:
6644 "KDU '{}' for vnf '{}' not deployed".format(
6648 kdu_instance
= deployed_kdu
.get("kdu-instance")
6649 instance_num
= await self
.k8scluster_map
[
6655 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6656 kdu_model
=deployed_kdu
.get("kdu-model"),
6658 kdu_replica_count
= instance_num
- kdu_delta
.get(
6659 "number-of-instances", 1
6662 if kdu_replica_count
< min_instance_count
< instance_num
:
6663 kdu_replica_count
= min_instance_count
6664 if kdu_replica_count
< min_instance_count
:
6666 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6667 "scaling-group-descriptor '{}'".format(
6668 instance_num
, scaling_group
6672 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6673 vca_scaling_info
.append(
6675 "osm_kdu_id": kdu_name
,
6676 "member-vnf-index": vnf_index
,
6678 "kdu_index": instance_num
- x
- 1,
6681 scaling_info
["kdu-delete"][kdu_name
].append(
6683 "member-vnf-index": vnf_index
,
6685 "k8s-cluster-type": k8s_cluster_type
,
6686 "resource-name": resource_name
,
6687 "scale": kdu_replica_count
,
6691 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6692 vdu_delete
= copy(scaling_info
.get("vdu-delete"))
6693 if scaling_info
["scaling_direction"] == "IN":
6694 for vdur
in reversed(db_vnfr
["vdur"]):
6695 if vdu_delete
.get(vdur
["vdu-id-ref"]):
6696 vdu_delete
[vdur
["vdu-id-ref"]] -= 1
6697 scaling_info
["vdu"].append(
6699 "name": vdur
.get("name") or vdur
.get("vdu-name"),
6700 "vdu_id": vdur
["vdu-id-ref"],
6704 for interface
in vdur
["interfaces"]:
6705 scaling_info
["vdu"][-1]["interface"].append(
6707 "name": interface
["name"],
6708 "ip_address": interface
["ip-address"],
6709 "mac_address": interface
.get("mac-address"),
6712 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6715 step
= "Executing pre-scale vnf-config-primitive"
6716 if scaling_descriptor
.get("scaling-config-action"):
6717 for scaling_config_action
in scaling_descriptor
[
6718 "scaling-config-action"
6721 scaling_config_action
.get("trigger") == "pre-scale-in"
6722 and scaling_type
== "SCALE_IN"
6724 scaling_config_action
.get("trigger") == "pre-scale-out"
6725 and scaling_type
== "SCALE_OUT"
6727 vnf_config_primitive
= scaling_config_action
[
6728 "vnf-config-primitive-name-ref"
6730 step
= db_nslcmop_update
[
6732 ] = "executing pre-scale scaling-config-action '{}'".format(
6733 vnf_config_primitive
6736 # look for primitive
6737 for config_primitive
in (
6738 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
6739 ).get("config-primitive", ()):
6740 if config_primitive
["name"] == vnf_config_primitive
:
6744 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6745 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6746 "primitive".format(scaling_group
, vnf_config_primitive
)
6749 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
6750 if db_vnfr
.get("additionalParamsForVnf"):
6751 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
6753 scale_process
= "VCA"
6754 db_nsr_update
["config-status"] = "configuring pre-scaling"
6755 primitive_params
= self
._map
_primitive
_params
(
6756 config_primitive
, {}, vnfr_params
6759 # Pre-scale retry check: Check if this sub-operation has been executed before
6760 op_index
= self
._check
_or
_add
_scale
_suboperation
(
6763 vnf_config_primitive
,
6767 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
6768 # Skip sub-operation
6769 result
= "COMPLETED"
6770 result_detail
= "Done"
6773 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6774 vnf_config_primitive
, result
, result_detail
6778 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
6779 # New sub-operation: Get index of this sub-operation
6781 len(db_nslcmop
.get("_admin", {}).get("operations"))
6786 + "vnf_config_primitive={} New sub-operation".format(
6787 vnf_config_primitive
6791 # retry: Get registered params for this existing sub-operation
6792 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
6795 vnf_index
= op
.get("member_vnf_index")
6796 vnf_config_primitive
= op
.get("primitive")
6797 primitive_params
= op
.get("primitive_params")
6800 + "vnf_config_primitive={} Sub-operation retry".format(
6801 vnf_config_primitive
6804 # Execute the primitive, either with new (first-time) or registered (reintent) args
6805 ee_descriptor_id
= config_primitive
.get(
6806 "execution-environment-ref"
6808 primitive_name
= config_primitive
.get(
6809 "execution-environment-primitive", vnf_config_primitive
6811 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
6812 nsr_deployed
["VCA"],
6813 member_vnf_index
=vnf_index
,
6815 vdu_count_index
=None,
6816 ee_descriptor_id
=ee_descriptor_id
,
6818 result
, result_detail
= await self
._ns
_execute
_primitive
(
6827 + "vnf_config_primitive={} Done with result {} {}".format(
6828 vnf_config_primitive
, result
, result_detail
6831 # Update operationState = COMPLETED | FAILED
6832 self
._update
_suboperation
_status
(
6833 db_nslcmop
, op_index
, result
, result_detail
6836 if result
== "FAILED":
6837 raise LcmException(result_detail
)
6838 db_nsr_update
["config-status"] = old_config_status
6839 scale_process
= None
6843 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index
)
6846 "_admin.scaling-group.{}.time".format(admin_scale_index
)
6849 # SCALE-IN VCA - BEGIN
6850 if vca_scaling_info
:
6851 step
= db_nslcmop_update
[
6853 ] = "Deleting the execution environments"
6854 scale_process
= "VCA"
6855 for vca_info
in vca_scaling_info
:
6856 if vca_info
["type"] == "delete" and not vca_info
.get("osm_kdu_id"):
6857 member_vnf_index
= str(vca_info
["member-vnf-index"])
6859 logging_text
+ "vdu info: {}".format(vca_info
)
6861 if vca_info
.get("osm_vdu_id"):
6862 vdu_id
= vca_info
["osm_vdu_id"]
6863 vdu_index
= int(vca_info
["vdu_index"])
6866 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6867 member_vnf_index
, vdu_id
, vdu_index
6869 stage
[2] = step
= "Scaling in VCA"
6870 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
6871 vca_update
= db_nsr
["_admin"]["deployed"]["VCA"]
6872 config_update
= db_nsr
["configurationStatus"]
6873 for vca_index
, vca
in enumerate(vca_update
):
6875 (vca
or vca
.get("ee_id"))
6876 and vca
["member-vnf-index"] == member_vnf_index
6877 and vca
["vdu_count_index"] == vdu_index
6879 if vca
.get("vdu_id"):
6880 config_descriptor
= get_configuration(
6881 db_vnfd
, vca
.get("vdu_id")
6883 elif vca
.get("kdu_name"):
6884 config_descriptor
= get_configuration(
6885 db_vnfd
, vca
.get("kdu_name")
6888 config_descriptor
= get_configuration(
6889 db_vnfd
, db_vnfd
["id"]
6891 operation_params
= (
6892 db_nslcmop
.get("operationParams") or {}
6894 exec_terminate_primitives
= not operation_params
.get(
6895 "skip_terminate_primitives"
6896 ) and vca
.get("needed_terminate")
6897 task
= asyncio
.ensure_future(
6906 exec_primitives
=exec_terminate_primitives
,
6910 timeout
=self
.timeout
.charm_delete
,
6913 tasks_dict_info
[task
] = "Terminating VCA {}".format(
6916 del vca_update
[vca_index
]
6917 del config_update
[vca_index
]
6918 # wait for pending tasks of terminate primitives
6922 + "Waiting for tasks {}".format(
6923 list(tasks_dict_info
.keys())
6926 error_list
= await self
._wait
_for
_tasks
(
6930 self
.timeout
.charm_delete
, self
.timeout
.ns_terminate
6935 tasks_dict_info
.clear()
6937 raise LcmException("; ".join(error_list
))
6939 db_vca_and_config_update
= {
6940 "_admin.deployed.VCA": vca_update
,
6941 "configurationStatus": config_update
,
6944 "nsrs", db_nsr
["_id"], db_vca_and_config_update
6946 scale_process
= None
6947 # SCALE-IN VCA - END
6950 if scaling_info
.get("vdu-create") or scaling_info
.get("vdu-delete"):
6951 scale_process
= "RO"
6952 if self
.ro_config
.ng
:
6953 await self
._scale
_ng
_ro
(
6954 logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, scaling_info
, stage
6956 scaling_info
.pop("vdu-create", None)
6957 scaling_info
.pop("vdu-delete", None)
6959 scale_process
= None
6963 if scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete"):
6964 scale_process
= "KDU"
6965 await self
._scale
_kdu
(
6966 logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
6968 scaling_info
.pop("kdu-create", None)
6969 scaling_info
.pop("kdu-delete", None)
6971 scale_process
= None
6975 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6977 # SCALE-UP VCA - BEGIN
6978 if vca_scaling_info
:
6979 step
= db_nslcmop_update
[
6981 ] = "Creating new execution environments"
6982 scale_process
= "VCA"
6983 for vca_info
in vca_scaling_info
:
6984 if vca_info
["type"] == "create" and not vca_info
.get("osm_kdu_id"):
6985 member_vnf_index
= str(vca_info
["member-vnf-index"])
6987 logging_text
+ "vdu info: {}".format(vca_info
)
6989 vnfd_id
= db_vnfr
["vnfd-ref"]
6990 if vca_info
.get("osm_vdu_id"):
6991 vdu_index
= int(vca_info
["vdu_index"])
6992 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
6993 if db_vnfr
.get("additionalParamsForVnf"):
6994 deploy_params
.update(
6996 db_vnfr
["additionalParamsForVnf"].copy()
6999 descriptor_config
= get_configuration(
7000 db_vnfd
, db_vnfd
["id"]
7002 if descriptor_config
:
7008 logging_text
=logging_text
7009 + "member_vnf_index={} ".format(member_vnf_index
),
7012 nslcmop_id
=nslcmop_id
,
7018 kdu_index
=kdu_index
,
7019 member_vnf_index
=member_vnf_index
,
7020 vdu_index
=vdu_index
,
7022 deploy_params
=deploy_params
,
7023 descriptor_config
=descriptor_config
,
7024 base_folder
=base_folder
,
7025 task_instantiation_info
=tasks_dict_info
,
7028 vdu_id
= vca_info
["osm_vdu_id"]
7029 vdur
= find_in_list(
7030 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
7032 descriptor_config
= get_configuration(db_vnfd
, vdu_id
)
7033 if vdur
.get("additionalParams"):
7034 deploy_params_vdu
= parse_yaml_strings(
7035 vdur
["additionalParams"]
7038 deploy_params_vdu
= deploy_params
7039 deploy_params_vdu
["OSM"] = get_osm_params(
7040 db_vnfr
, vdu_id
, vdu_count_index
=vdu_index
7042 if descriptor_config
:
7048 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7049 member_vnf_index
, vdu_id
, vdu_index
7051 stage
[2] = step
= "Scaling out VCA"
7052 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
7054 logging_text
=logging_text
7055 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7056 member_vnf_index
, vdu_id
, vdu_index
7060 nslcmop_id
=nslcmop_id
,
7066 member_vnf_index
=member_vnf_index
,
7067 vdu_index
=vdu_index
,
7068 kdu_index
=kdu_index
,
7070 deploy_params
=deploy_params_vdu
,
7071 descriptor_config
=descriptor_config
,
7072 base_folder
=base_folder
,
7073 task_instantiation_info
=tasks_dict_info
,
7076 # SCALE-UP VCA - END
7077 scale_process
= None
7080 # execute primitive service POST-SCALING
7081 step
= "Executing post-scale vnf-config-primitive"
7082 if scaling_descriptor
.get("scaling-config-action"):
7083 for scaling_config_action
in scaling_descriptor
[
7084 "scaling-config-action"
7087 scaling_config_action
.get("trigger") == "post-scale-in"
7088 and scaling_type
== "SCALE_IN"
7090 scaling_config_action
.get("trigger") == "post-scale-out"
7091 and scaling_type
== "SCALE_OUT"
7093 vnf_config_primitive
= scaling_config_action
[
7094 "vnf-config-primitive-name-ref"
7096 step
= db_nslcmop_update
[
7098 ] = "executing post-scale scaling-config-action '{}'".format(
7099 vnf_config_primitive
7102 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
7103 if db_vnfr
.get("additionalParamsForVnf"):
7104 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
7106 # look for primitive
7107 for config_primitive
in (
7108 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
7109 ).get("config-primitive", ()):
7110 if config_primitive
["name"] == vnf_config_primitive
:
7114 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7115 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7116 "config-primitive".format(
7117 scaling_group
, vnf_config_primitive
7120 scale_process
= "VCA"
7121 db_nsr_update
["config-status"] = "configuring post-scaling"
7122 primitive_params
= self
._map
_primitive
_params
(
7123 config_primitive
, {}, vnfr_params
7126 # Post-scale retry check: Check if this sub-operation has been executed before
7127 op_index
= self
._check
_or
_add
_scale
_suboperation
(
7130 vnf_config_primitive
,
7134 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
7135 # Skip sub-operation
7136 result
= "COMPLETED"
7137 result_detail
= "Done"
7140 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7141 vnf_config_primitive
, result
, result_detail
7145 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
7146 # New sub-operation: Get index of this sub-operation
7148 len(db_nslcmop
.get("_admin", {}).get("operations"))
7153 + "vnf_config_primitive={} New sub-operation".format(
7154 vnf_config_primitive
7158 # retry: Get registered params for this existing sub-operation
7159 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
7162 vnf_index
= op
.get("member_vnf_index")
7163 vnf_config_primitive
= op
.get("primitive")
7164 primitive_params
= op
.get("primitive_params")
7167 + "vnf_config_primitive={} Sub-operation retry".format(
7168 vnf_config_primitive
7171 # Execute the primitive, either with new (first-time) or registered (reintent) args
7172 ee_descriptor_id
= config_primitive
.get(
7173 "execution-environment-ref"
7175 primitive_name
= config_primitive
.get(
7176 "execution-environment-primitive", vnf_config_primitive
7178 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
7179 nsr_deployed
["VCA"],
7180 member_vnf_index
=vnf_index
,
7182 vdu_count_index
=None,
7183 ee_descriptor_id
=ee_descriptor_id
,
7185 result
, result_detail
= await self
._ns
_execute
_primitive
(
7194 + "vnf_config_primitive={} Done with result {} {}".format(
7195 vnf_config_primitive
, result
, result_detail
7198 # Update operationState = COMPLETED | FAILED
7199 self
._update
_suboperation
_status
(
7200 db_nslcmop
, op_index
, result
, result_detail
7203 if result
== "FAILED":
7204 raise LcmException(result_detail
)
7205 db_nsr_update
["config-status"] = old_config_status
7206 scale_process
= None
7208 # Check if each vnf has exporter for metric collection if so update prometheus job records
7209 if scaling_type
== "SCALE_OUT":
7210 if "exporters-endpoints" in db_vnfd
.get("df")[0]:
7211 vnfr_id
= db_vnfr
["id"]
7212 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
7213 exporter_config
= db_vnfd
.get("df")[0].get("exporters-endpoints")
7214 self
.logger
.debug("exporter config :{}".format(exporter_config
))
7215 artifact_path
= "{}/{}/{}".format(
7216 base_folder
["folder"],
7217 base_folder
["pkg-dir"],
7218 "exporter-endpoint",
7221 ee_config_descriptor
= exporter_config
7222 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
7226 vdu_id
=db_vnfr
["vdur"][-1]["vdu-id-ref"],
7227 vdu_index
=db_vnfr
["vdur"][-1]["count-index"],
7231 self
.logger
.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip
))
7232 self
.logger
.debug("Artifact_path:{}".format(artifact_path
))
7233 vdu_id_for_prom
= None
7234 vdu_index_for_prom
= None
7235 for x
in get_iterable(db_vnfr
, "vdur"):
7236 vdu_id_for_prom
= x
.get("vdu-id-ref")
7237 vdu_index_for_prom
= x
.get("count-index")
7238 vnfr_id
= vnfr_id
+ vdu_id
+ str(vdu_index
)
7239 vnfr_id
= vnfr_id
.replace("_", "")
7240 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
7242 artifact_path
=artifact_path
,
7243 ee_config_descriptor
=ee_config_descriptor
,
7246 target_ip
=rw_mgmt_ip
,
7248 vdu_id
=vdu_id_for_prom
,
7249 vdu_index
=vdu_index_for_prom
,
7252 self
.logger
.debug("Prometheus job:{}".format(prometheus_jobs
))
7255 "_admin.deployed.prometheus_jobs"
7263 for job
in prometheus_jobs
:
7269 fail_on_empty
=False,
7273 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7274 db_nsr_update
["operational-status"] = (
7276 if old_operational_status
== "failed"
7277 else old_operational_status
7279 db_nsr_update
["config-status"] = old_config_status
7282 ROclient
.ROClientException
,
7287 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
7289 except asyncio
.CancelledError
:
7291 logging_text
+ "Cancelled Exception while '{}'".format(step
)
7293 exc
= "Operation was cancelled"
7294 except Exception as e
:
7295 exc
= traceback
.format_exc()
7296 self
.logger
.critical(
7297 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
7303 error_list
.append(str(exc
))
7304 self
._write
_ns
_status
(
7307 current_operation
="IDLE",
7308 current_operation_id
=None,
7312 stage
[1] = "Waiting for instantiate pending tasks."
7313 self
.logger
.debug(logging_text
+ stage
[1])
7314 exc
= await self
._wait
_for
_tasks
(
7317 self
.timeout
.ns_deploy
,
7322 except asyncio
.CancelledError
:
7323 error_list
.append("Cancelled")
7324 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
7325 await self
._wait
_for
_tasks
(
7328 self
.timeout
.ns_deploy
,
7334 error_detail
= "; ".join(error_list
)
7337 ] = error_description_nslcmop
= "FAILED {}: {}".format(
7340 nslcmop_operation_state
= "FAILED"
7342 db_nsr_update
["operational-status"] = old_operational_status
7343 db_nsr_update
["config-status"] = old_config_status
7344 db_nsr_update
["detailed-status"] = ""
7346 if "VCA" in scale_process
:
7347 db_nsr_update
["config-status"] = "failed"
7348 if "RO" in scale_process
:
7349 db_nsr_update
["operational-status"] = "failed"
7352 ] = "FAILED scaling nslcmop={} {}: {}".format(
7353 nslcmop_id
, step
, error_detail
7356 error_description_nslcmop
= None
7357 nslcmop_operation_state
= "COMPLETED"
7358 db_nslcmop_update
["detailed-status"] = "Done"
7359 if scaling_type
== "SCALE_IN" and prom_job_name
is not None:
7362 {"job_name": prom_job_name
},
7363 fail_on_empty
=False,
7366 self
._write
_op
_status
(
7369 error_message
=error_description_nslcmop
,
7370 operation_state
=nslcmop_operation_state
,
7371 other_update
=db_nslcmop_update
,
7374 self
._write
_ns
_status
(
7377 current_operation
="IDLE",
7378 current_operation_id
=None,
7379 other_update
=db_nsr_update
,
7382 if nslcmop_operation_state
:
7386 "nslcmop_id": nslcmop_id
,
7387 "operationState": nslcmop_operation_state
,
7389 await self
.msg
.aiowrite("ns", "scaled", msg
)
7390 except Exception as e
:
7392 logging_text
+ "kafka_write notification Exception {}".format(e
)
7394 self
.logger
.debug(logging_text
+ "Exit")
7395 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_scale")
7397 async def _scale_kdu(
7398 self
, logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
7400 _scaling_info
= scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete")
7401 for kdu_name
in _scaling_info
:
7402 for kdu_scaling_info
in _scaling_info
[kdu_name
]:
7403 deployed_kdu
, index
= get_deployed_kdu(
7404 nsr_deployed
, kdu_name
, kdu_scaling_info
["member-vnf-index"]
7406 cluster_uuid
= deployed_kdu
["k8scluster-uuid"]
7407 kdu_instance
= deployed_kdu
["kdu-instance"]
7408 kdu_model
= deployed_kdu
.get("kdu-model")
7409 scale
= int(kdu_scaling_info
["scale"])
7410 k8s_cluster_type
= kdu_scaling_info
["k8s-cluster-type"]
7413 "collection": "nsrs",
7414 "filter": {"_id": nsr_id
},
7415 "path": "_admin.deployed.K8s.{}".format(index
),
7418 step
= "scaling application {}".format(
7419 kdu_scaling_info
["resource-name"]
7421 self
.logger
.debug(logging_text
+ step
)
7423 if kdu_scaling_info
["type"] == "delete":
7424 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7427 and kdu_config
.get("terminate-config-primitive")
7428 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7430 terminate_config_primitive_list
= kdu_config
.get(
7431 "terminate-config-primitive"
7433 terminate_config_primitive_list
.sort(
7434 key
=lambda val
: int(val
["seq"])
7438 terminate_config_primitive
7439 ) in terminate_config_primitive_list
:
7440 primitive_params_
= self
._map
_primitive
_params
(
7441 terminate_config_primitive
, {}, {}
7443 step
= "execute terminate config primitive"
7444 self
.logger
.debug(logging_text
+ step
)
7445 await asyncio
.wait_for(
7446 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7447 cluster_uuid
=cluster_uuid
,
7448 kdu_instance
=kdu_instance
,
7449 primitive_name
=terminate_config_primitive
["name"],
7450 params
=primitive_params_
,
7452 total_timeout
=self
.timeout
.primitive
,
7455 timeout
=self
.timeout
.primitive
7456 * self
.timeout
.primitive_outer_factor
,
7459 await asyncio
.wait_for(
7460 self
.k8scluster_map
[k8s_cluster_type
].scale(
7461 kdu_instance
=kdu_instance
,
7463 resource_name
=kdu_scaling_info
["resource-name"],
7464 total_timeout
=self
.timeout
.scale_on_error
,
7466 cluster_uuid
=cluster_uuid
,
7467 kdu_model
=kdu_model
,
7471 timeout
=self
.timeout
.scale_on_error
7472 * self
.timeout
.scale_on_error_outer_factor
,
7475 if kdu_scaling_info
["type"] == "create":
7476 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7479 and kdu_config
.get("initial-config-primitive")
7480 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7482 initial_config_primitive_list
= kdu_config
.get(
7483 "initial-config-primitive"
7485 initial_config_primitive_list
.sort(
7486 key
=lambda val
: int(val
["seq"])
7489 for initial_config_primitive
in initial_config_primitive_list
:
7490 primitive_params_
= self
._map
_primitive
_params
(
7491 initial_config_primitive
, {}, {}
7493 step
= "execute initial config primitive"
7494 self
.logger
.debug(logging_text
+ step
)
7495 await asyncio
.wait_for(
7496 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7497 cluster_uuid
=cluster_uuid
,
7498 kdu_instance
=kdu_instance
,
7499 primitive_name
=initial_config_primitive
["name"],
7500 params
=primitive_params_
,
7507 async def _scale_ng_ro(
7508 self
, logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, vdu_scaling_info
, stage
7510 nsr_id
= db_nslcmop
["nsInstanceId"]
7511 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
7514 # read from db: vnfd's for every vnf
7517 # for each vnf in ns, read vnfd
7518 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
7519 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
7520 vnfd_id
= vnfr
["vnfd-id"] # vnfd uuid for this vnf
7521 # if we haven't this vnfd, read it from db
7522 if not find_in_list(db_vnfds
, lambda a_vnfd
: a_vnfd
["id"] == vnfd_id
):
7524 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
7525 db_vnfds
.append(vnfd
)
7526 n2vc_key
= self
.n2vc
.get_public_key()
7527 n2vc_key_list
= [n2vc_key
]
7530 vdu_scaling_info
.get("vdu-create"),
7531 vdu_scaling_info
.get("vdu-delete"),
7534 # db_vnfr has been updated, update db_vnfrs to use it
7535 db_vnfrs
[db_vnfr
["member-vnf-index-ref"]] = db_vnfr
7536 await self
._instantiate
_ng
_ro
(
7546 start_deploy
=time(),
7547 timeout_ns_deploy
=self
.timeout
.ns_deploy
,
7549 if vdu_scaling_info
.get("vdu-delete"):
7551 db_vnfr
, None, vdu_scaling_info
["vdu-delete"], mark_delete
=False
7554 async def extract_prometheus_scrape_jobs(
7558 ee_config_descriptor
: dict,
7563 vnf_member_index
: str = "",
7565 vdu_index
: int = None,
7567 kdu_index
: int = None,
7569 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7570 This method will wait until the corresponding VDU or KDU is fully instantiated
7573 ee_id (str): Execution Environment ID
7574 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7575 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7576 vnfr_id (str): VNFR ID where this EE applies
7577 nsr_id (str): NSR ID where this EE applies
7578 target_ip (str): VDU/KDU instance IP address
7579 element_type (str): NS or VNF or VDU or KDU
7580 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7581 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7582 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7583 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7584 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7587 LcmException: When the VDU or KDU instance was not found in an hour
7590 _type_: Prometheus jobs
7592 # default the vdur and kdur names to an empty string, to avoid any later
7593 # problem with Prometheus when the element type is not VDU or KDU
7597 # look if exist a file called 'prometheus*.j2' and
7598 artifact_content
= self
.fs
.dir_ls(artifact_path
)
7602 for f
in artifact_content
7603 if f
.startswith("prometheus") and f
.endswith(".j2")
7609 self
.logger
.debug("Artifact path{}".format(artifact_path
))
7610 self
.logger
.debug("job file{}".format(job_file
))
7611 with self
.fs
.file_open((artifact_path
, job_file
), "r") as f
:
7614 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7615 if element_type
in ("VDU", "KDU"):
7616 for _
in range(360):
7617 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
7618 if vdu_id
and vdu_index
is not None:
7622 for x
in get_iterable(db_vnfr
, "vdur")
7624 x
.get("vdu-id-ref") == vdu_id
7625 and x
.get("count-index") == vdu_index
7630 if vdur
.get("name"):
7631 vdur_name
= vdur
.get("name")
7633 if kdu_name
and kdu_index
is not None:
7637 for x
in get_iterable(db_vnfr
, "kdur")
7639 x
.get("kdu-name") == kdu_name
7640 and x
.get("count-index") == kdu_index
7645 if kdur
.get("name"):
7646 kdur_name
= kdur
.get("name")
7649 await asyncio
.sleep(10)
7651 if vdu_id
and vdu_index
is not None:
7653 f
"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7655 if kdu_name
and kdu_index
is not None:
7657 f
"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7660 if ee_id
is not None:
7661 _
, namespace
, helm_id
= get_ee_id_parts(
7663 ) # get namespace and EE gRPC service name
7664 host_name
= f
'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7666 vnfr_id
= vnfr_id
.replace("-", "")
7668 "JOB_NAME": vnfr_id
,
7669 "TARGET_IP": target_ip
,
7670 "EXPORTER_POD_IP": host_name
,
7671 "EXPORTER_POD_PORT": host_port
,
7673 "VNF_MEMBER_INDEX": vnf_member_index
,
7674 "VDUR_NAME": vdur_name
,
7675 "KDUR_NAME": kdur_name
,
7676 "ELEMENT_TYPE": element_type
,
7679 metric_path
= ee_config_descriptor
["metric-path"]
7680 target_port
= ee_config_descriptor
["metric-port"]
7681 vnfr_id
= vnfr_id
.replace("-", "")
7683 "JOB_NAME": vnfr_id
,
7684 "TARGET_IP": target_ip
,
7685 "TARGET_PORT": target_port
,
7686 "METRIC_PATH": metric_path
,
7689 job_list
= parse_job(job_data
, variables
)
7690 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7691 for job
in job_list
:
7693 not isinstance(job
.get("job_name"), str)
7694 or vnfr_id
not in job
["job_name"]
7696 job
["job_name"] = vnfr_id
+ "_" + str(SystemRandom().randint(1, 10000))
7697 job
["nsr_id"] = nsr_id
7698 job
["vnfr_id"] = vnfr_id
7701 async def rebuild_start_stop(
7702 self
, nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
7704 logging_text
= "Task ns={} {}={} ".format(nsr_id
, operation_type
, nslcmop_id
)
7705 self
.logger
.info(logging_text
+ "Enter")
7706 stage
= ["Preparing the environment", ""]
7707 # database nsrs record
7711 # in case of error, indicates what part of scale was failed to put nsr at error status
7712 start_deploy
= time()
7714 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_id
})
7715 vim_account_id
= db_vnfr
.get("vim-account-id")
7716 vim_info_key
= "vim:" + vim_account_id
7717 vdu_id
= additional_param
["vdu_id"]
7718 vdurs
= [item
for item
in db_vnfr
["vdur"] if item
["vdu-id-ref"] == vdu_id
]
7719 vdur
= find_in_list(
7720 vdurs
, lambda vdu
: vdu
["count-index"] == additional_param
["count-index"]
7723 vdu_vim_name
= vdur
["name"]
7724 vim_vm_id
= vdur
["vim_info"][vim_info_key
]["vim_id"]
7725 target_vim
, _
= next(k_v
for k_v
in vdur
["vim_info"].items())
7727 raise LcmException("Target vdu is not found")
7728 self
.logger
.info("vdu_vim_name >> {} ".format(vdu_vim_name
))
7729 # wait for any previous tasks in process
7730 stage
[1] = "Waiting for previous operations to terminate"
7731 self
.logger
.info(stage
[1])
7732 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7734 stage
[1] = "Reading from database."
7735 self
.logger
.info(stage
[1])
7736 self
._write
_ns
_status
(
7739 current_operation
=operation_type
.upper(),
7740 current_operation_id
=nslcmop_id
,
7742 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7745 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
7746 db_nsr_update
["operational-status"] = operation_type
7747 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7751 "vim_vm_id": vim_vm_id
,
7753 "vdu_index": additional_param
["count-index"],
7754 "vdu_id": vdur
["id"],
7755 "target_vim": target_vim
,
7756 "vim_account_id": vim_account_id
,
7759 stage
[1] = "Sending rebuild request to RO... {}".format(desc
)
7760 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7761 self
.logger
.info("ro nsr id: {}".format(nsr_id
))
7762 result_dict
= await self
.RO
.operate(nsr_id
, desc
, operation_type
)
7763 self
.logger
.info("response from RO: {}".format(result_dict
))
7764 action_id
= result_dict
["action_id"]
7765 await self
._wait
_ng
_ro
(
7770 self
.timeout
.operate
,
7772 "start_stop_rebuild",
7774 return "COMPLETED", "Done"
7775 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7776 self
.logger
.error("Exit Exception {}".format(e
))
7778 except asyncio
.CancelledError
:
7779 self
.logger
.error("Cancelled Exception while '{}'".format(stage
))
7780 exc
= "Operation was cancelled"
7781 except Exception as e
:
7782 exc
= traceback
.format_exc()
7783 self
.logger
.critical(
7784 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7786 return "FAILED", "Error in operate VNF {}".format(exc
)
7788 async def migrate(self
, nsr_id
, nslcmop_id
):
7790 Migrate VNFs and VDUs instances in a NS
7792 :param: nsr_id: NS Instance ID
7793 :param: nslcmop_id: nslcmop ID of migrate
7796 # Try to lock HA task here
7797 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7798 if not task_is_locked_by_me
:
7800 logging_text
= "Task ns={} migrate ".format(nsr_id
)
7801 self
.logger
.debug(logging_text
+ "Enter")
7802 # get all needed from database
7804 db_nslcmop_update
= {}
7805 nslcmop_operation_state
= None
7809 # in case of error, indicates what part of scale was failed to put nsr at error status
7810 start_deploy
= time()
7813 # wait for any previous tasks in process
7814 step
= "Waiting for previous operations to terminate"
7815 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7817 self
._write
_ns
_status
(
7820 current_operation
="MIGRATING",
7821 current_operation_id
=nslcmop_id
,
7823 step
= "Getting nslcmop from database"
7825 step
+ " after having waited for previous tasks to be completed"
7827 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
7828 migrate_params
= db_nslcmop
.get("operationParams")
7831 target
.update(migrate_params
)
7832 desc
= await self
.RO
.migrate(nsr_id
, target
)
7833 self
.logger
.debug("RO return > {}".format(desc
))
7834 action_id
= desc
["action_id"]
7835 await self
._wait
_ng
_ro
(
7840 self
.timeout
.migrate
,
7841 operation
="migrate",
7843 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7844 self
.logger
.error("Exit Exception {}".format(e
))
7846 except asyncio
.CancelledError
:
7847 self
.logger
.error("Cancelled Exception while '{}'".format(step
))
7848 exc
= "Operation was cancelled"
7849 except Exception as e
:
7850 exc
= traceback
.format_exc()
7851 self
.logger
.critical(
7852 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7855 self
._write
_ns
_status
(
7858 current_operation
="IDLE",
7859 current_operation_id
=None,
7862 db_nslcmop_update
["detailed-status"] = "FAILED {}: {}".format(step
, exc
)
7863 nslcmop_operation_state
= "FAILED"
7865 nslcmop_operation_state
= "COMPLETED"
7866 db_nslcmop_update
["detailed-status"] = "Done"
7867 db_nsr_update
["detailed-status"] = "Done"
7869 self
._write
_op
_status
(
7873 operation_state
=nslcmop_operation_state
,
7874 other_update
=db_nslcmop_update
,
7876 if nslcmop_operation_state
:
7880 "nslcmop_id": nslcmop_id
,
7881 "operationState": nslcmop_operation_state
,
7883 await self
.msg
.aiowrite("ns", "migrated", msg
)
7884 except Exception as e
:
7886 logging_text
+ "kafka_write notification Exception {}".format(e
)
7888 self
.logger
.debug(logging_text
+ "Exit")
7889 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_migrate")
7891 async def heal(self
, nsr_id
, nslcmop_id
):
7895 :param nsr_id: ns instance to heal
7896 :param nslcmop_id: operation to run
7900 # Try to lock HA task here
7901 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7902 if not task_is_locked_by_me
:
7905 logging_text
= "Task ns={} heal={} ".format(nsr_id
, nslcmop_id
)
7906 stage
= ["", "", ""]
7907 tasks_dict_info
= {}
7908 # ^ stage, step, VIM progress
7909 self
.logger
.debug(logging_text
+ "Enter")
7910 # get all needed from database
7912 db_nslcmop_update
= {}
7914 db_vnfrs
= {} # vnf's info indexed by _id
7916 old_operational_status
= ""
7917 old_config_status
= ""
7920 # wait for any previous tasks in process
7921 step
= "Waiting for previous operations to terminate"
7922 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7923 self
._write
_ns
_status
(
7926 current_operation
="HEALING",
7927 current_operation_id
=nslcmop_id
,
7930 step
= "Getting nslcmop from database"
7932 step
+ " after having waited for previous tasks to be completed"
7934 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
7936 step
= "Getting nsr from database"
7937 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
7938 old_operational_status
= db_nsr
["operational-status"]
7939 old_config_status
= db_nsr
["config-status"]
7942 "operational-status": "healing",
7943 "_admin.deployed.RO.operational-status": "healing",
7945 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7947 step
= "Sending heal order to VIM"
7949 logging_text
=logging_text
,
7951 db_nslcmop
=db_nslcmop
,
7956 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
7957 self
.logger
.debug(logging_text
+ stage
[1])
7958 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
7959 self
.fs
.sync(db_nsr
["nsd-id"])
7961 # read from db: vnfr's of this ns
7962 step
= "Getting vnfrs from db"
7963 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
7964 for vnfr
in db_vnfrs_list
:
7965 db_vnfrs
[vnfr
["_id"]] = vnfr
7966 self
.logger
.debug("ns.heal db_vnfrs={}".format(db_vnfrs
))
7968 # Check for each target VNF
7969 target_list
= db_nslcmop
.get("operationParams", {}).get("healVnfData", {})
7970 for target_vnf
in target_list
:
7971 # Find this VNF in the list from DB
7972 vnfr_id
= target_vnf
.get("vnfInstanceId", None)
7974 db_vnfr
= db_vnfrs
[vnfr_id
]
7975 vnfd_id
= db_vnfr
.get("vnfd-id")
7976 vnfd_ref
= db_vnfr
.get("vnfd-ref")
7977 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
7978 base_folder
= vnfd
["_admin"]["storage"]
7983 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
7984 member_vnf_index
= db_vnfr
.get("member-vnf-index-ref")
7986 # Check each target VDU and deploy N2VC
7987 target_vdu_list
= target_vnf
.get("additionalParams", {}).get(
7990 if not target_vdu_list
:
7991 # Codigo nuevo para crear diccionario
7992 target_vdu_list
= []
7993 for existing_vdu
in db_vnfr
.get("vdur"):
7994 vdu_name
= existing_vdu
.get("vdu-name", None)
7995 vdu_index
= existing_vdu
.get("count-index", 0)
7996 vdu_run_day1
= target_vnf
.get("additionalParams", {}).get(
7999 vdu_to_be_healed
= {
8001 "count-index": vdu_index
,
8002 "run-day1": vdu_run_day1
,
8004 target_vdu_list
.append(vdu_to_be_healed
)
8005 for target_vdu
in target_vdu_list
:
8006 deploy_params_vdu
= target_vdu
8007 # Set run-day1 vnf level value if not vdu level value exists
8008 if not deploy_params_vdu
.get("run-day1") and target_vnf
.get(
8009 "additionalParams", {}
8011 deploy_params_vdu
["run-day1"] = target_vnf
[
8014 vdu_name
= target_vdu
.get("vdu-id", None)
8015 # TODO: Get vdu_id from vdud.
8017 # For multi instance VDU count-index is mandatory
8018 # For single session VDU count-indes is 0
8019 vdu_index
= target_vdu
.get("count-index", 0)
8021 # n2vc_redesign STEP 3 to 6 Deploy N2VC
8022 stage
[1] = "Deploying Execution Environments."
8023 self
.logger
.debug(logging_text
+ stage
[1])
8025 # VNF Level charm. Normal case when proxy charms.
8026 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
8027 descriptor_config
= get_configuration(vnfd
, vnfd_ref
)
8028 if descriptor_config
:
8029 # Continue if healed machine is management machine
8030 vnf_ip_address
= db_vnfr
.get("ip-address")
8031 target_instance
= None
8032 for instance
in db_vnfr
.get("vdur", None):
8034 instance
["vdu-name"] == vdu_name
8035 and instance
["count-index"] == vdu_index
8037 target_instance
= instance
8039 if vnf_ip_address
== target_instance
.get("ip-address"):
8041 logging_text
=logging_text
8042 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8043 member_vnf_index
, vdu_name
, vdu_index
8047 nslcmop_id
=nslcmop_id
,
8053 member_vnf_index
=member_vnf_index
,
8056 deploy_params
=deploy_params_vdu
,
8057 descriptor_config
=descriptor_config
,
8058 base_folder
=base_folder
,
8059 task_instantiation_info
=tasks_dict_info
,
8063 # VDU Level charm. Normal case with native charms.
8064 descriptor_config
= get_configuration(vnfd
, vdu_name
)
8065 if descriptor_config
:
8067 logging_text
=logging_text
8068 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8069 member_vnf_index
, vdu_name
, vdu_index
8073 nslcmop_id
=nslcmop_id
,
8079 member_vnf_index
=member_vnf_index
,
8080 vdu_index
=vdu_index
,
8082 deploy_params
=deploy_params_vdu
,
8083 descriptor_config
=descriptor_config
,
8084 base_folder
=base_folder
,
8085 task_instantiation_info
=tasks_dict_info
,
8089 ROclient
.ROClientException
,
8094 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
8096 except asyncio
.CancelledError
:
8098 logging_text
+ "Cancelled Exception while '{}'".format(step
)
8100 exc
= "Operation was cancelled"
8101 except Exception as e
:
8102 exc
= traceback
.format_exc()
8103 self
.logger
.critical(
8104 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
8109 if db_vnfrs_list
and target_list
:
8110 for vnfrs
in db_vnfrs_list
:
8111 for vnf_instance
in target_list
:
8112 if vnfrs
["_id"] == vnf_instance
.get("vnfInstanceId"):
8115 {"_id": vnfrs
["_id"]},
8116 {"_admin.modified": time()},
8119 error_list
.append(str(exc
))
8122 stage
[1] = "Waiting for healing pending tasks."
8123 self
.logger
.debug(logging_text
+ stage
[1])
8124 exc
= await self
._wait
_for
_tasks
(
8127 self
.timeout
.ns_deploy
,
8132 except asyncio
.CancelledError
:
8133 error_list
.append("Cancelled")
8134 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
8135 await self
._wait
_for
_tasks
(
8138 self
.timeout
.ns_deploy
,
8144 error_detail
= "; ".join(error_list
)
8147 ] = error_description_nslcmop
= "FAILED {}: {}".format(
8150 nslcmop_operation_state
= "FAILED"
8152 db_nsr_update
["operational-status"] = old_operational_status
8153 db_nsr_update
["config-status"] = old_config_status
8156 ] = "FAILED healing nslcmop={} {}: {}".format(
8157 nslcmop_id
, step
, error_detail
8159 for task
, task_name
in tasks_dict_info
.items():
8160 if not task
.done() or task
.cancelled() or task
.exception():
8161 if task_name
.startswith(self
.task_name_deploy_vca
):
8162 # A N2VC task is pending
8163 db_nsr_update
["config-status"] = "failed"
8165 # RO task is pending
8166 db_nsr_update
["operational-status"] = "failed"
8168 error_description_nslcmop
= None
8169 nslcmop_operation_state
= "COMPLETED"
8170 db_nslcmop_update
["detailed-status"] = "Done"
8171 db_nsr_update
["detailed-status"] = "Done"
8172 db_nsr_update
["operational-status"] = "running"
8173 db_nsr_update
["config-status"] = "configured"
8175 self
._write
_op
_status
(
8178 error_message
=error_description_nslcmop
,
8179 operation_state
=nslcmop_operation_state
,
8180 other_update
=db_nslcmop_update
,
8183 self
._write
_ns
_status
(
8186 current_operation
="IDLE",
8187 current_operation_id
=None,
8188 other_update
=db_nsr_update
,
8191 if nslcmop_operation_state
:
8195 "nslcmop_id": nslcmop_id
,
8196 "operationState": nslcmop_operation_state
,
8198 await self
.msg
.aiowrite("ns", "healed", msg
)
8199 except Exception as e
:
8201 logging_text
+ "kafka_write notification Exception {}".format(e
)
8203 self
.logger
.debug(logging_text
+ "Exit")
8204 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_heal")
8215 :param logging_text: preffix text to use at logging
8216 :param nsr_id: nsr identity
8217 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8218 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8219 :return: None or exception
8222 def get_vim_account(vim_account_id
):
8224 if vim_account_id
in db_vims
:
8225 return db_vims
[vim_account_id
]
8226 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
8227 db_vims
[vim_account_id
] = db_vim
8232 ns_params
= db_nslcmop
.get("operationParams")
8233 if ns_params
and ns_params
.get("timeout_ns_heal"):
8234 timeout_ns_heal
= ns_params
["timeout_ns_heal"]
8236 timeout_ns_heal
= self
.timeout
.ns_heal
8240 nslcmop_id
= db_nslcmop
["_id"]
8242 "action_id": nslcmop_id
,
8244 self
.logger
.warning(
8245 "db_nslcmop={} and timeout_ns_heal={}".format(
8246 db_nslcmop
, timeout_ns_heal
8249 target
.update(db_nslcmop
.get("operationParams", {}))
8251 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
8252 desc
= await self
.RO
.recreate(nsr_id
, target
)
8253 self
.logger
.debug("RO return > {}".format(desc
))
8254 action_id
= desc
["action_id"]
8255 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8256 await self
._wait
_ng
_ro
(
8263 operation
="healing",
8268 "_admin.deployed.RO.operational-status": "running",
8269 "detailed-status": " ".join(stage
),
8271 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
8272 self
._write
_op
_status
(nslcmop_id
, stage
)
8274 logging_text
+ "ns healed at RO. RO_id={}".format(action_id
)
8277 except Exception as e
:
8278 stage
[2] = "ERROR healing at VIM"
8279 # self.set_vnfr_at_error(db_vnfrs, str(e))
8281 "Error healing at VIM {}".format(e
),
8282 exc_info
=not isinstance(
8285 ROclient
.ROClientException
,
8311 task_instantiation_info
,
8314 # launch instantiate_N2VC in a asyncio task and register task object
8315 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8316 # if not found, create one entry and update database
8317 # fill db_nsr._admin.deployed.VCA.<index>
8320 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
8324 get_charm_name
= False
8325 if "execution-environment-list" in descriptor_config
:
8326 ee_list
= descriptor_config
.get("execution-environment-list", [])
8327 elif "juju" in descriptor_config
:
8328 ee_list
= [descriptor_config
] # ns charms
8329 if "execution-environment-list" not in descriptor_config
:
8330 # charm name is only required for ns charms
8331 get_charm_name
= True
8332 else: # other types as script are not supported
8335 for ee_item
in ee_list
:
8338 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8339 ee_item
.get("juju"), ee_item
.get("helm-chart")
8342 ee_descriptor_id
= ee_item
.get("id")
8343 vca_name
, charm_name
, vca_type
= self
.get_vca_info(
8344 ee_item
, db_nsr
, get_charm_name
8348 logging_text
+ "skipping, non juju/charm/helm configuration"
8353 for vca_index
, vca_deployed
in enumerate(
8354 db_nsr
["_admin"]["deployed"]["VCA"]
8356 if not vca_deployed
:
8359 vca_deployed
.get("member-vnf-index") == member_vnf_index
8360 and vca_deployed
.get("vdu_id") == vdu_id
8361 and vca_deployed
.get("kdu_name") == kdu_name
8362 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
8363 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
8367 # not found, create one.
8369 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
8372 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
8374 target
+= "/kdu/{}".format(kdu_name
)
8376 "target_element": target
,
8377 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8378 "member-vnf-index": member_vnf_index
,
8380 "kdu_name": kdu_name
,
8381 "vdu_count_index": vdu_index
,
8382 "operational-status": "init", # TODO revise
8383 "detailed-status": "", # TODO revise
8384 "step": "initial-deploy", # TODO revise
8386 "vdu_name": vdu_name
,
8388 "ee_descriptor_id": ee_descriptor_id
,
8389 "charm_name": charm_name
,
8393 # create VCA and configurationStatus in db
8395 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
8396 "configurationStatus.{}".format(vca_index
): dict(),
8398 self
.update_db_2("nsrs", nsr_id
, db_dict
)
8400 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
8402 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
8403 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
8404 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
8407 task_n2vc
= asyncio
.ensure_future(
8409 logging_text
=logging_text
,
8410 vca_index
=vca_index
,
8416 vdu_index
=vdu_index
,
8417 deploy_params
=deploy_params
,
8418 config_descriptor
=descriptor_config
,
8419 base_folder
=base_folder
,
8420 nslcmop_id
=nslcmop_id
,
8424 ee_config_descriptor
=ee_item
,
8427 self
.lcm_tasks
.register(
8431 "instantiate_N2VC-{}".format(vca_index
),
8434 task_instantiation_info
[
8436 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
8437 member_vnf_index
or "", vdu_id
or ""
8440 async def heal_N2VC(
8457 ee_config_descriptor
,
8459 nsr_id
= db_nsr
["_id"]
8460 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
8461 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
8462 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
8463 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
8465 "collection": "nsrs",
8466 "filter": {"_id": nsr_id
},
8467 "path": db_update_entry
,
8472 element_under_configuration
= nsr_id
8476 vnfr_id
= db_vnfr
["_id"]
8477 osm_config
["osm"]["vnf_id"] = vnfr_id
8479 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
8481 if vca_type
== "native_charm":
8484 index_number
= vdu_index
or 0
8487 element_type
= "VNF"
8488 element_under_configuration
= vnfr_id
8489 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
8491 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
8492 element_type
= "VDU"
8493 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
8494 osm_config
["osm"]["vdu_id"] = vdu_id
8496 namespace
+= ".{}".format(kdu_name
)
8497 element_type
= "KDU"
8498 element_under_configuration
= kdu_name
8499 osm_config
["osm"]["kdu_name"] = kdu_name
8502 if base_folder
["pkg-dir"]:
8503 artifact_path
= "{}/{}/{}/{}".format(
8504 base_folder
["folder"],
8505 base_folder
["pkg-dir"],
8508 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8513 artifact_path
= "{}/Scripts/{}/{}/".format(
8514 base_folder
["folder"],
8517 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8522 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
8524 # get initial_config_primitive_list that applies to this element
8525 initial_config_primitive_list
= config_descriptor
.get(
8526 "initial-config-primitive"
8530 "Initial config primitive list > {}".format(
8531 initial_config_primitive_list
8535 # add config if not present for NS charm
8536 ee_descriptor_id
= ee_config_descriptor
.get("id")
8537 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
8538 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
8539 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
8543 "Initial config primitive list #2 > {}".format(
8544 initial_config_primitive_list
8547 # n2vc_redesign STEP 3.1
8548 # find old ee_id if exists
8549 ee_id
= vca_deployed
.get("ee_id")
8551 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
8552 # create or register execution environment in VCA. Only for native charms when healing
8553 if vca_type
== "native_charm":
8554 step
= "Waiting to VM being up and getting IP address"
8555 self
.logger
.debug(logging_text
+ step
)
8556 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8565 credentials
= {"hostname": rw_mgmt_ip
}
8567 username
= deep_get(
8568 config_descriptor
, ("config-access", "ssh-access", "default-user")
8570 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8571 # merged. Meanwhile let's get username from initial-config-primitive
8572 if not username
and initial_config_primitive_list
:
8573 for config_primitive
in initial_config_primitive_list
:
8574 for param
in config_primitive
.get("parameter", ()):
8575 if param
["name"] == "ssh-username":
8576 username
= param
["value"]
8580 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8581 "'config-access.ssh-access.default-user'"
8583 credentials
["username"] = username
8585 # n2vc_redesign STEP 3.2
8586 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8587 self
._write
_configuration
_status
(
8589 vca_index
=vca_index
,
8590 status
="REGISTERING",
8591 element_under_configuration
=element_under_configuration
,
8592 element_type
=element_type
,
8595 step
= "register execution environment {}".format(credentials
)
8596 self
.logger
.debug(logging_text
+ step
)
8597 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
8598 credentials
=credentials
,
8599 namespace
=namespace
,
8604 # update ee_id en db
8606 "_admin.deployed.VCA.{}.ee_id".format(vca_index
): ee_id
,
8608 self
.update_db_2("nsrs", nsr_id
, db_dict_ee_id
)
8610 # for compatibility with MON/POL modules, the need model and application name at database
8611 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8612 # Not sure if this need to be done when healing
8614 ee_id_parts = ee_id.split(".")
8615 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8616 if len(ee_id_parts) >= 2:
8617 model_name = ee_id_parts[0]
8618 application_name = ee_id_parts[1]
8619 db_nsr_update[db_update_entry + "model"] = model_name
8620 db_nsr_update[db_update_entry + "application"] = application_name
8623 # n2vc_redesign STEP 3.3
8624 # Install configuration software. Only for native charms.
8625 step
= "Install configuration Software"
8627 self
._write
_configuration
_status
(
8629 vca_index
=vca_index
,
8630 status
="INSTALLING SW",
8631 element_under_configuration
=element_under_configuration
,
8632 element_type
=element_type
,
8633 # other_update=db_nsr_update,
8637 # TODO check if already done
8638 self
.logger
.debug(logging_text
+ step
)
8640 if vca_type
== "native_charm":
8641 config_primitive
= next(
8642 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
8645 if config_primitive
:
8646 config
= self
._map
_primitive
_params
(
8647 config_primitive
, {}, deploy_params
8649 await self
.vca_map
[vca_type
].install_configuration_sw(
8651 artifact_path
=artifact_path
,
8659 # write in db flag of configuration_sw already installed
8661 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
8664 # Not sure if this need to be done when healing
8666 # add relations for this VCA (wait for other peers related with this VCA)
8667 await self._add_vca_relations(
8668 logging_text=logging_text,
8671 vca_index=vca_index,
8675 # if SSH access is required, then get execution environment SSH public
8676 # if native charm we have waited already to VM be UP
8677 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
8680 # self.logger.debug("get ssh key block")
8682 config_descriptor
, ("config-access", "ssh-access", "required")
8684 # self.logger.debug("ssh key needed")
8685 # Needed to inject a ssh key
8688 ("config-access", "ssh-access", "default-user"),
8690 step
= "Install configuration Software, getting public ssh key"
8691 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
8692 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
8695 step
= "Insert public key into VM user={} ssh_key={}".format(
8699 # self.logger.debug("no need to get ssh key")
8700 step
= "Waiting to VM being up and getting IP address"
8701 self
.logger
.debug(logging_text
+ step
)
8703 # n2vc_redesign STEP 5.1
8704 # wait for RO (ip-address) Insert pub_key into VM
8705 # IMPORTANT: We need do wait for RO to complete healing operation.
8706 await self
._wait
_heal
_ro
(nsr_id
, self
.timeout
.ns_heal
)
8709 rw_mgmt_ip
= await self
.wait_kdu_up(
8710 logging_text
, nsr_id
, vnfr_id
, kdu_name
8713 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8723 rw_mgmt_ip
= None # This is for a NS configuration
8725 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
8727 # store rw_mgmt_ip in deploy params for later replacement
8728 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
8731 # get run-day1 operation parameter
8732 runDay1
= deploy_params
.get("run-day1", False)
8734 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id
, vdu_id
, runDay1
)
8737 # n2vc_redesign STEP 6 Execute initial config primitive
8738 step
= "execute initial config primitive"
8740 # wait for dependent primitives execution (NS -> VNF -> VDU)
8741 if initial_config_primitive_list
:
8742 await self
._wait
_dependent
_n
2vc
(
8743 nsr_id
, vca_deployed_list
, vca_index
8746 # stage, in function of element type: vdu, kdu, vnf or ns
8747 my_vca
= vca_deployed_list
[vca_index
]
8748 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
8750 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
8751 elif my_vca
.get("member-vnf-index"):
8753 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
8756 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
8758 self
._write
_configuration
_status
(
8759 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
8762 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
8764 check_if_terminated_needed
= True
8765 for initial_config_primitive
in initial_config_primitive_list
:
8766 # adding information on the vca_deployed if it is a NS execution environment
8767 if not vca_deployed
["member-vnf-index"]:
8768 deploy_params
["ns_config_info"] = json
.dumps(
8769 self
._get
_ns
_config
_info
(nsr_id
)
8771 # TODO check if already done
8772 primitive_params_
= self
._map
_primitive
_params
(
8773 initial_config_primitive
, {}, deploy_params
8776 step
= "execute primitive '{}' params '{}'".format(
8777 initial_config_primitive
["name"], primitive_params_
8779 self
.logger
.debug(logging_text
+ step
)
8780 await self
.vca_map
[vca_type
].exec_primitive(
8782 primitive_name
=initial_config_primitive
["name"],
8783 params_dict
=primitive_params_
,
8788 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8789 if check_if_terminated_needed
:
8790 if config_descriptor
.get("terminate-config-primitive"):
8794 {db_update_entry
+ "needed_terminate": True},
8796 check_if_terminated_needed
= False
8798 # TODO register in database that primitive is done
8800 # STEP 7 Configure metrics
8801 # Not sure if this need to be done when healing
8803 if vca_type == "helm" or vca_type == "helm-v3":
8804 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8806 artifact_path=artifact_path,
8807 ee_config_descriptor=ee_config_descriptor,
8810 target_ip=rw_mgmt_ip,
8816 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8819 for job in prometheus_jobs:
8822 {"job_name": job["job_name"]},
8825 fail_on_empty=False,
8829 step
= "instantiated at VCA"
8830 self
.logger
.debug(logging_text
+ step
)
8832 self
._write
_configuration
_status
(
8833 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
8836 except Exception as e
: # TODO not use Exception but N2VC exception
8837 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8839 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
8842 "Exception while {} : {}".format(step
, e
), exc_info
=True
8844 self
._write
_configuration
_status
(
8845 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
8847 raise LcmException("{} {}".format(step
, e
)) from e
8849 async def _wait_heal_ro(
8855 while time() <= start_time
+ timeout
:
8856 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8857 operational_status_ro
= db_nsr
["_admin"]["deployed"]["RO"][
8858 "operational-status"
8860 self
.logger
.debug("Wait Heal RO > {}".format(operational_status_ro
))
8861 if operational_status_ro
!= "healing":
8863 await asyncio
.sleep(15)
8864 else: # timeout_ns_deploy
8865 raise NgRoException("Timeout waiting ns to deploy")
8867 async def vertical_scale(self
, nsr_id
, nslcmop_id
):
8869 Vertical Scale the VDUs in a NS
8871 :param: nsr_id: NS Instance ID
8872 :param: nslcmop_id: nslcmop ID of migrate
8875 # Try to lock HA task here
8876 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
8877 if not task_is_locked_by_me
:
8879 logging_text
= "Task ns={} vertical scale ".format(nsr_id
)
8880 self
.logger
.debug(logging_text
+ "Enter")
8881 # get all needed from database
8883 db_nslcmop_update
= {}
8884 nslcmop_operation_state
= None
8887 old_vdu_index
= None
8888 old_flavor_id
= None
8892 # in case of error, indicates what part of scale was failed to put nsr at error status
8893 start_deploy
= time()
8896 # wait for any previous tasks in process
8897 step
= "Waiting for previous operations to terminate"
8898 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
8900 self
._write
_ns
_status
(
8903 current_operation
="VerticalScale",
8904 current_operation_id
=nslcmop_id
,
8906 step
= "Getting nslcmop from database"
8908 step
+ " after having waited for previous tasks to be completed"
8910 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
8911 operationParams
= db_nslcmop
.get("operationParams")
8912 # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
8913 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8914 db_flavor
= db_nsr
.get("flavor")
8915 db_flavor_index
= str(len(db_flavor
))
8916 change_vnf_flavor_data
= operationParams
["changeVnfFlavorData"]
8917 flavor_dict
= change_vnf_flavor_data
["additionalParams"]
8918 count_index
= flavor_dict
["vduCountIndex"]
8919 vdu_id_ref
= flavor_dict
["vduid"]
8920 flavor_dict_update
= {
8921 "id": db_flavor_index
,
8922 "memory-mb": flavor_dict
["virtualMemory"],
8923 "name": f
"{vdu_id_ref}-{count_index}-flv",
8924 "storage-gb": flavor_dict
["sizeOfStorage"],
8925 "vcpu-count": flavor_dict
["numVirtualCpu"],
8927 db_flavor
.append(flavor_dict_update
)
8929 db_update
["flavor"] = db_flavor
8935 q_filter
=ns_q_filter
,
8936 update_dict
=db_update
,
8939 db_vnfr
= self
.db
.get_one(
8940 "vnfrs", {"_id": change_vnf_flavor_data
["vnfInstanceId"]}
8942 for vdu_index
, vdur
in enumerate(db_vnfr
.get("vdur", ())):
8944 vdur
.get("count-index") == count_index
8945 and vdur
.get("vdu-id-ref") == vdu_id_ref
8947 old_flavor_id
= vdur
.get("ns-flavor-id", 0)
8948 old_vdu_index
= vdu_index
8950 "_id": change_vnf_flavor_data
["vnfInstanceId"],
8951 "vdur.count-index": count_index
,
8952 "vdur.vdu-id-ref": vdu_id_ref
,
8954 q_filter
.update(filter_text
)
8957 "vdur.{}.ns-flavor-id".format(vdu_index
)
8962 update_dict
=db_update
,
8966 target
.update(operationParams
)
8967 desc
= await self
.RO
.vertical_scale(nsr_id
, target
)
8968 self
.logger
.debug("RO return > {}".format(desc
))
8969 action_id
= desc
["action_id"]
8970 await self
._wait
_ng
_ro
(
8975 self
.timeout
.verticalscale
,
8976 operation
="verticalscale",
8980 ROclient
.ROClientException
,
8984 self
.logger
.error("Exit Exception {}".format(e
))
8986 except asyncio
.CancelledError
:
8987 self
.logger
.error("Cancelled Exception while '{}'".format(step
))
8988 exc
= "Operation was cancelled"
8989 except Exception as e
:
8990 exc
= traceback
.format_exc()
8991 self
.logger
.critical(
8992 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
8995 self
._write
_ns
_status
(
8998 current_operation
="IDLE",
8999 current_operation_id
=None,
9002 db_nslcmop_update
["detailed-status"] = "FAILED {}: {}".format(step
, exc
)
9003 nslcmop_operation_state
= "FAILED"
9005 "vdur.{}.ns-flavor-id".format(old_vdu_index
)
9008 nslcmop_operation_state
= "COMPLETED"
9009 db_nslcmop_update
["detailed-status"] = "Done"
9010 db_nsr_update
["detailed-status"] = "Done"
9012 self
._write
_op
_status
(
9016 operation_state
=nslcmop_operation_state
,
9017 other_update
=db_nslcmop_update
,
9019 if old_vdu_index
and old_db_update
!= {}:
9020 self
.logger
.critical(
9021 "Reverting Old Flavor -- : {}".format(old_db_update
)
9026 update_dict
=old_db_update
,
9029 if nslcmop_operation_state
:
9033 "nslcmop_id": nslcmop_id
,
9034 "operationState": nslcmop_operation_state
,
9036 await self
.msg
.aiowrite("ns", "verticalscaled", msg
)
9037 except Exception as e
:
9039 logging_text
+ "kafka_write notification Exception {}".format(e
)
9041 self
.logger
.debug(logging_text
+ "Exit")
9042 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_verticalscale")