1 # -*- coding: utf-8 -*-
4 # Copyright 2018 Telefonica S.A.
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
21 from typing
import Any
, Dict
, List
24 import logging
.handlers
37 from osm_lcm
import ROclient
38 from osm_lcm
.data_utils
.lcm_config
import LcmCfg
39 from osm_lcm
.data_utils
.nsr
import (
42 get_deployed_vca_list
,
45 from osm_lcm
.data_utils
.vca
import (
54 from osm_lcm
.ng_ro
import NgRoClient
, NgRoException
55 from osm_lcm
.lcm_utils
import (
61 check_juju_bundle_existence
,
62 get_charm_artifact_path
,
66 from osm_lcm
.data_utils
.nsd
import (
67 get_ns_configuration_relation_list
,
71 from osm_lcm
.data_utils
.vnfd
import (
77 get_ee_sorted_initial_config_primitive_list
,
78 get_ee_sorted_terminate_config_primitive_list
,
80 get_virtual_link_profiles
,
85 get_number_of_instances
,
87 get_kdu_resource_profile
,
88 find_software_version
,
91 from osm_lcm
.data_utils
.list_utils
import find_in_list
92 from osm_lcm
.data_utils
.vnfr
import (
96 get_volumes_from_instantiation_params
,
98 from osm_lcm
.data_utils
.dict_utils
import parse_yaml_strings
99 from osm_lcm
.data_utils
.database
.vim_account
import VimAccountDB
100 from n2vc
.definitions
import RelationEndpoint
101 from n2vc
.k8s_helm3_conn
import K8sHelm3Connector
102 from n2vc
.k8s_juju_conn
import K8sJujuConnector
104 from osm_common
.dbbase
import DbException
105 from osm_common
.fsbase
import FsException
107 from osm_lcm
.data_utils
.database
.database
import Database
108 from osm_lcm
.data_utils
.filesystem
.filesystem
import Filesystem
109 from osm_lcm
.data_utils
.wim
import (
111 get_target_wim_attrs
,
112 select_feasible_wim_account
,
115 from n2vc
.n2vc_juju_conn
import N2VCJujuConnector
116 from n2vc
.exceptions
import N2VCException
, N2VCNotFound
, K8sException
118 from osm_lcm
.lcm_helm_conn
import LCMHelmConn
119 from osm_lcm
.osm_config
import OsmConfigBuilder
120 from osm_lcm
.prometheus
import parse_job
122 from copy
import copy
, deepcopy
123 from time
import time
124 from uuid
import uuid4
126 from random
import SystemRandom
128 __author__
= "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
131 class NsLcm(LcmBase
):
132 SUBOPERATION_STATUS_NOT_FOUND
= -1
133 SUBOPERATION_STATUS_NEW
= -2
134 SUBOPERATION_STATUS_SKIP
= -3
135 EE_TLS_NAME
= "ee-tls"
136 task_name_deploy_vca
= "Deploying VCA"
137 rel_operation_types
= {
146 def __init__(self
, msg
, lcm_tasks
, config
: LcmCfg
):
148 Init, Connect to database, filesystem storage, and messaging
149 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
152 super().__init
__(msg
=msg
, logger
=logging
.getLogger("lcm.ns"))
154 self
.db
= Database().instance
.db
155 self
.fs
= Filesystem().instance
.fs
156 self
.lcm_tasks
= lcm_tasks
157 self
.timeout
= config
.timeout
158 self
.ro_config
= config
.RO
159 self
.vca_config
= config
.VCA
161 # create N2VC connector
162 self
.n2vc
= N2VCJujuConnector(
164 on_update_db
=self
._on
_update
_n
2vc
_db
,
169 self
.conn_helm_ee
= LCMHelmConn(
171 vca_config
=self
.vca_config
,
172 on_update_db
=self
._on
_update
_n
2vc
_db
,
175 self
.k8sclusterhelm3
= K8sHelm3Connector(
176 kubectl_command
=self
.vca_config
.kubectlpath
,
177 helm_command
=self
.vca_config
.helm3path
,
184 self
.k8sclusterjuju
= K8sJujuConnector(
185 kubectl_command
=self
.vca_config
.kubectlpath
,
186 juju_command
=self
.vca_config
.jujupath
,
188 on_update_db
=self
._on
_update
_k
8s
_db
,
193 self
.k8scluster_map
= {
194 "helm-chart-v3": self
.k8sclusterhelm3
,
195 "chart": self
.k8sclusterhelm3
,
196 "juju-bundle": self
.k8sclusterjuju
,
197 "juju": self
.k8sclusterjuju
,
201 "lxc_proxy_charm": self
.n2vc
,
202 "native_charm": self
.n2vc
,
203 "k8s_proxy_charm": self
.n2vc
,
204 "helm": self
.conn_helm_ee
,
205 "helm-v3": self
.conn_helm_ee
,
209 self
.RO
= NgRoClient(**self
.ro_config
.to_dict())
211 self
.op_status_map
= {
212 "instantiation": self
.RO
.status
,
213 "termination": self
.RO
.status
,
214 "migrate": self
.RO
.status
,
215 "healing": self
.RO
.recreate_status
,
216 "verticalscale": self
.RO
.status
,
217 "start_stop_rebuild": self
.RO
.status
,
221 def increment_ip_mac(ip_mac
, vm_index
=1):
222 if not isinstance(ip_mac
, str):
227 dual_ip
= ip_mac
.split(";")
228 if len(dual_ip
) == 2:
230 if ipaddress
.ip_address(ip
).version
== 6:
231 ipv6
= ipaddress
.IPv6Address(ip
)
232 next_ipv6
= str(ipaddress
.IPv6Address(int(ipv6
) + 1))
233 elif ipaddress
.ip_address(ip
).version
== 4:
234 ipv4
= ipaddress
.IPv4Address(ip
)
235 next_ipv4
= str(ipaddress
.IPv4Address(int(ipv4
) + 1))
236 return [next_ipv4
, next_ipv6
]
237 # try with ipv4 look for last dot
238 i
= ip_mac
.rfind(".")
241 return "{}{}".format(ip_mac
[:i
], int(ip_mac
[i
:]) + vm_index
)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i
= ip_mac
.rfind(":")
246 # format in hex, len can be 2 for mac or 4 for ipv6
247 return ("{}{:0" + str(len(ip_mac
) - i
) + "x}").format(
248 ip_mac
[:i
], int(ip_mac
[i
:], 16) + vm_index
254 async def _on_update_n2vc_db(self
, table
, filter, path
, updated_data
, vca_id
=None):
255 # remove last dot from path (if exists)
256 if path
.endswith("."):
259 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
260 # .format(table, filter, path, updated_data))
262 nsr_id
= filter.get("_id")
264 # read ns record from database
265 nsr
= self
.db
.get_one(table
="nsrs", q_filter
=filter)
266 current_ns_status
= nsr
.get("nsState")
268 # get vca status for NS
269 status_dict
= await self
.n2vc
.get_status(
270 namespace
="." + nsr_id
, yaml_format
=False, vca_id
=vca_id
275 db_dict
["vcaStatus"] = status_dict
277 # update configurationStatus for this VCA
279 vca_index
= int(path
[path
.rfind(".") + 1 :])
282 target_dict
=nsr
, key_list
=("_admin", "deployed", "VCA")
284 vca_status
= vca_list
[vca_index
].get("status")
286 configuration_status_list
= nsr
.get("configurationStatus")
287 config_status
= configuration_status_list
[vca_index
].get("status")
289 if config_status
== "BROKEN" and vca_status
!= "failed":
290 db_dict
["configurationStatus"][vca_index
] = "READY"
291 elif config_status
!= "BROKEN" and vca_status
== "failed":
292 db_dict
["configurationStatus"][vca_index
] = "BROKEN"
293 except Exception as e
:
294 # not update configurationStatus
295 self
.logger
.debug("Error updating vca_index (ignore): {}".format(e
))
297 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
298 # if nsState = 'DEGRADED' check if all is OK
300 if current_ns_status
in ("READY", "DEGRADED"):
301 error_description
= ""
303 if status_dict
.get("machines"):
304 for machine_id
in status_dict
.get("machines"):
305 machine
= status_dict
.get("machines").get(machine_id
)
306 # check machine agent-status
307 if machine
.get("agent-status"):
308 s
= machine
.get("agent-status").get("status")
311 error_description
+= (
312 "machine {} agent-status={} ; ".format(
316 # check machine instance status
317 if machine
.get("instance-status"):
318 s
= machine
.get("instance-status").get("status")
321 error_description
+= (
322 "machine {} instance-status={} ; ".format(
327 if status_dict
.get("applications"):
328 for app_id
in status_dict
.get("applications"):
329 app
= status_dict
.get("applications").get(app_id
)
330 # check application status
331 if app
.get("status"):
332 s
= app
.get("status").get("status")
335 error_description
+= (
336 "application {} status={} ; ".format(app_id
, s
)
339 if error_description
:
340 db_dict
["errorDescription"] = error_description
341 if current_ns_status
== "READY" and is_degraded
:
342 db_dict
["nsState"] = "DEGRADED"
343 if current_ns_status
== "DEGRADED" and not is_degraded
:
344 db_dict
["nsState"] = "READY"
347 self
.update_db_2("nsrs", nsr_id
, db_dict
)
349 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
351 except Exception as e
:
352 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
354 async def _on_update_k8s_db(
355 self
, cluster_uuid
, kdu_instance
, filter=None, vca_id
=None, cluster_type
="juju"
358 Updating vca status in NSR record
359 :param cluster_uuid: UUID of a k8s cluster
360 :param kdu_instance: The unique name of the KDU instance
361 :param filter: To get nsr_id
362 :cluster_type: The cluster type (juju, k8s)
366 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
367 # .format(cluster_uuid, kdu_instance, filter))
369 nsr_id
= filter.get("_id")
371 vca_status
= await self
.k8scluster_map
[cluster_type
].status_kdu(
372 cluster_uuid
=cluster_uuid
,
373 kdu_instance
=kdu_instance
,
375 complete_status
=True,
381 db_dict
["vcaStatus"] = {nsr_id
: vca_status
}
384 f
"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
388 self
.update_db_2("nsrs", nsr_id
, db_dict
)
389 except (asyncio
.CancelledError
, asyncio
.TimeoutError
):
391 except Exception as e
:
392 self
.logger
.warn("Error updating NS state for ns={}: {}".format(nsr_id
, e
))
395 def _parse_cloud_init(cloud_init_text
, additional_params
, vnfd_id
, vdu_id
):
398 undefined
=StrictUndefined
,
399 autoescape
=select_autoescape(default_for_string
=True, default
=True),
401 template
= env
.from_string(cloud_init_text
)
402 return template
.render(additional_params
or {})
403 except UndefinedError
as e
:
405 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
406 "file, must be provided in the instantiation parameters inside the "
407 "'additionalParamsForVnf/Vdu' block".format(e
, vnfd_id
, vdu_id
)
409 except (TemplateError
, TemplateNotFound
) as e
:
411 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
416 def _get_vdu_cloud_init_content(self
, vdu
, vnfd
):
417 cloud_init_content
= cloud_init_file
= None
419 if vdu
.get("cloud-init-file"):
420 base_folder
= vnfd
["_admin"]["storage"]
421 if base_folder
["pkg-dir"]:
422 cloud_init_file
= "{}/{}/cloud_init/{}".format(
423 base_folder
["folder"],
424 base_folder
["pkg-dir"],
425 vdu
["cloud-init-file"],
428 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
429 base_folder
["folder"],
430 vdu
["cloud-init-file"],
432 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
433 cloud_init_content
= ci_file
.read()
434 elif vdu
.get("cloud-init"):
435 cloud_init_content
= vdu
["cloud-init"]
437 return cloud_init_content
438 except FsException
as e
:
440 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
441 vnfd
["id"], vdu
["id"], cloud_init_file
, e
445 def _get_vdu_additional_params(self
, db_vnfr
, vdu_id
):
447 (vdur
for vdur
in db_vnfr
.get("vdur") if vdu_id
== vdur
["vdu-id-ref"]), {}
449 additional_params
= vdur
.get("additionalParams")
450 return parse_yaml_strings(additional_params
)
453 def ip_profile_2_RO(ip_profile
):
454 RO_ip_profile
= deepcopy(ip_profile
)
455 if "dns-server" in RO_ip_profile
:
456 if isinstance(RO_ip_profile
["dns-server"], list):
457 RO_ip_profile
["dns-address"] = []
458 for ds
in RO_ip_profile
.pop("dns-server"):
459 RO_ip_profile
["dns-address"].append(ds
["address"])
461 RO_ip_profile
["dns-address"] = RO_ip_profile
.pop("dns-server")
462 if RO_ip_profile
.get("ip-version") == "ipv4":
463 RO_ip_profile
["ip-version"] = "IPv4"
464 if RO_ip_profile
.get("ip-version") == "ipv6":
465 RO_ip_profile
["ip-version"] = "IPv6"
466 if "dhcp-params" in RO_ip_profile
:
467 RO_ip_profile
["dhcp"] = RO_ip_profile
.pop("dhcp-params")
470 def scale_vnfr(self
, db_vnfr
, vdu_create
=None, vdu_delete
=None, mark_delete
=False):
471 db_vdu_push_list
= []
473 db_update
= {"_admin.modified": time()}
475 for vdu_id
, vdu_count
in vdu_create
.items():
479 for vdur
in reversed(db_vnfr
["vdur"])
480 if vdur
["vdu-id-ref"] == vdu_id
485 # Read the template saved in the db:
487 "No vdur in the database. Using the vdur-template to scale"
489 vdur_template
= db_vnfr
.get("vdur-template")
490 if not vdur_template
:
492 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
496 vdur
= vdur_template
[0]
497 # Delete a template from the database after using it
500 {"_id": db_vnfr
["_id"]},
502 pull
={"vdur-template": {"_id": vdur
["_id"]}},
504 for count
in range(vdu_count
):
505 vdur_copy
= deepcopy(vdur
)
506 vdur_copy
["status"] = "BUILD"
507 vdur_copy
["status-detailed"] = None
508 vdur_copy
["ip-address"] = None
509 vdur_copy
["_id"] = str(uuid4())
510 vdur_copy
["count-index"] += count
+ 1
511 vdur_copy
["id"] = "{}-{}".format(
512 vdur_copy
["vdu-id-ref"], vdur_copy
["count-index"]
514 vdur_copy
.pop("vim_info", None)
515 for iface
in vdur_copy
["interfaces"]:
516 if iface
.get("fixed-ip"):
517 iface
["ip-address"] = self
.increment_ip_mac(
518 iface
["ip-address"], count
+ 1
521 iface
.pop("ip-address", None)
522 if iface
.get("fixed-mac"):
523 iface
["mac-address"] = self
.increment_ip_mac(
524 iface
["mac-address"], count
+ 1
527 iface
.pop("mac-address", None)
531 ) # only first vdu can be managment of vnf
532 db_vdu_push_list
.append(vdur_copy
)
533 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
535 if len(db_vnfr
["vdur"]) == 1:
536 # The scale will move to 0 instances
538 "Scaling to 0 !, creating the template with the last vdur"
540 template_vdur
= [db_vnfr
["vdur"][0]]
541 for vdu_id
, vdu_count
in vdu_delete
.items():
543 indexes_to_delete
= [
545 for iv
in enumerate(db_vnfr
["vdur"])
546 if iv
[1]["vdu-id-ref"] == vdu_id
550 "vdur.{}.status".format(i
): "DELETING"
551 for i
in indexes_to_delete
[-vdu_count
:]
555 # it must be deleted one by one because common.db does not allow otherwise
558 for v
in reversed(db_vnfr
["vdur"])
559 if v
["vdu-id-ref"] == vdu_id
561 for vdu
in vdus_to_delete
[:vdu_count
]:
564 {"_id": db_vnfr
["_id"]},
566 pull
={"vdur": {"_id": vdu
["_id"]}},
570 db_push
["vdur"] = db_vdu_push_list
572 db_push
["vdur-template"] = template_vdur
575 db_vnfr
["vdur-template"] = template_vdur
576 self
.db
.set_one("vnfrs", {"_id": db_vnfr
["_id"]}, db_update
, push_list
=db_push
)
577 # modify passed dictionary db_vnfr
578 db_vnfr_
= self
.db
.get_one("vnfrs", {"_id": db_vnfr
["_id"]})
579 db_vnfr
["vdur"] = db_vnfr_
["vdur"]
581 def ns_update_nsr(self
, ns_update_nsr
, db_nsr
, nsr_desc_RO
):
583 Updates database nsr with the RO info for the created vld
584 :param ns_update_nsr: dictionary to be filled with the updated info
585 :param db_nsr: content of db_nsr. This is also modified
586 :param nsr_desc_RO: nsr descriptor from RO
587 :return: Nothing, LcmException is raised on errors
590 for vld_index
, vld
in enumerate(get_iterable(db_nsr
, "vld")):
591 for net_RO
in get_iterable(nsr_desc_RO
, "nets"):
592 if vld
["id"] != net_RO
.get("ns_net_osm_id"):
594 vld
["vim-id"] = net_RO
.get("vim_net_id")
595 vld
["name"] = net_RO
.get("vim_name")
596 vld
["status"] = net_RO
.get("status")
597 vld
["status-detailed"] = net_RO
.get("error_msg")
598 ns_update_nsr
["vld.{}".format(vld_index
)] = vld
602 "ns_update_nsr: Not found vld={} at RO info".format(vld
["id"])
605 def set_vnfr_at_error(self
, db_vnfrs
, error_text
):
607 for db_vnfr
in db_vnfrs
.values():
608 vnfr_update
= {"status": "ERROR"}
609 for vdu_index
, vdur
in enumerate(get_iterable(db_vnfr
, "vdur")):
610 if "status" not in vdur
:
611 vdur
["status"] = "ERROR"
612 vnfr_update
["vdur.{}.status".format(vdu_index
)] = "ERROR"
614 vdur
["status-detailed"] = str(error_text
)
616 "vdur.{}.status-detailed".format(vdu_index
)
618 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
619 except DbException
as e
:
620 self
.logger
.error("Cannot update vnf. {}".format(e
))
622 def _get_ns_config_info(self
, nsr_id
):
624 Generates a mapping between vnf,vdu elements and the N2VC id
625 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
626 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
627 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
628 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
630 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
631 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
633 ns_config_info
= {"osm-config-mapping": mapping
}
634 for vca
in vca_deployed_list
:
635 if not vca
["member-vnf-index"]:
637 if not vca
["vdu_id"]:
638 mapping
[vca
["member-vnf-index"]] = vca
["application"]
642 vca
["member-vnf-index"], vca
["vdu_id"], vca
["vdu_count_index"]
644 ] = vca
["application"]
645 return ns_config_info
647 async def _instantiate_ng_ro(
663 def get_vim_account(vim_account_id
):
665 if vim_account_id
in db_vims
:
666 return db_vims
[vim_account_id
]
667 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
668 db_vims
[vim_account_id
] = db_vim
671 # modify target_vld info with instantiation parameters
672 def parse_vld_instantiation_params(
673 target_vim
, target_vld
, vld_params
, target_sdn
675 if vld_params
.get("ip-profile"):
676 target_vld
["vim_info"][target_vim
]["ip_profile"] = vld_to_ro_ip_profile(
677 vld_params
["ip-profile"]
679 if vld_params
.get("provider-network"):
680 target_vld
["vim_info"][target_vim
]["provider_network"] = vld_params
[
683 if "sdn-ports" in vld_params
["provider-network"] and target_sdn
:
684 target_vld
["vim_info"][target_sdn
]["sdn-ports"] = vld_params
[
688 # check if WIM is needed; if needed, choose a feasible WIM able to connect VIMs
689 # if wim_account_id is specified in vld_params, validate if it is feasible.
690 wim_account_id
, db_wim
= select_feasible_wim_account(
691 db_nsr
, db_vnfrs
, target_vld
, vld_params
, self
.logger
695 # WIM is needed and a feasible one was found, populate WIM target and SDN ports
696 self
.logger
.info("WIM selected: {:s}".format(str(wim_account_id
)))
697 # update vld_params with correct WIM account Id
698 vld_params
["wimAccountId"] = wim_account_id
700 target_wim
= "wim:{}".format(wim_account_id
)
701 target_wim_attrs
= get_target_wim_attrs(nsr_id
, target_vld
, vld_params
)
702 sdn_ports
= get_sdn_ports(vld_params
, db_wim
)
703 if len(sdn_ports
) > 0:
704 target_vld
["vim_info"][target_wim
] = target_wim_attrs
705 target_vld
["vim_info"][target_wim
]["sdn-ports"] = sdn_ports
708 "Target VLD with WIM data: {:s}".format(str(target_vld
))
711 for param
in ("vim-network-name", "vim-network-id"):
712 if vld_params
.get(param
):
713 if isinstance(vld_params
[param
], dict):
714 for vim
, vim_net
in vld_params
[param
].items():
715 other_target_vim
= "vim:" + vim
717 target_vld
["vim_info"],
718 (other_target_vim
, param
.replace("-", "_")),
721 else: # isinstance str
722 target_vld
["vim_info"][target_vim
][
723 param
.replace("-", "_")
724 ] = vld_params
[param
]
725 if vld_params
.get("common_id"):
726 target_vld
["common_id"] = vld_params
.get("common_id")
728 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
729 def update_ns_vld_target(target
, ns_params
):
730 for vnf_params
in ns_params
.get("vnf", ()):
731 if vnf_params
.get("vimAccountId"):
735 for vnfr
in db_vnfrs
.values()
736 if vnf_params
["member-vnf-index"]
737 == vnfr
["member-vnf-index-ref"]
741 vdur
= next((vdur
for vdur
in target_vnf
.get("vdur", ())), None)
744 for a_index
, a_vld
in enumerate(target
["ns"]["vld"]):
745 target_vld
= find_in_list(
746 get_iterable(vdur
, "interfaces"),
747 lambda iface
: iface
.get("ns-vld-id") == a_vld
["name"],
750 vld_params
= find_in_list(
751 get_iterable(ns_params
, "vld"),
752 lambda v_vld
: v_vld
["name"] in (a_vld
["name"], a_vld
["id"]),
755 if vnf_params
.get("vimAccountId") not in a_vld
.get(
758 target_vim_network_list
= [
759 v
for _
, v
in a_vld
.get("vim_info").items()
761 target_vim_network_name
= next(
763 item
.get("vim_network_name", "")
764 for item
in target_vim_network_list
769 target
["ns"]["vld"][a_index
].get("vim_info").update(
771 "vim:{}".format(vnf_params
["vimAccountId"]): {
772 "vim_network_name": target_vim_network_name
,
778 for param
in ("vim-network-name", "vim-network-id"):
779 if vld_params
.get(param
) and isinstance(
780 vld_params
[param
], dict
782 for vim
, vim_net
in vld_params
[
785 other_target_vim
= "vim:" + vim
787 target
["ns"]["vld"][a_index
].get(
792 param
.replace("-", "_"),
797 nslcmop_id
= db_nslcmop
["_id"]
799 "name": db_nsr
["name"],
802 "image": deepcopy(db_nsr
["image"]),
803 "flavor": deepcopy(db_nsr
["flavor"]),
804 "action_id": nslcmop_id
,
805 "cloud_init_content": {},
807 for image
in target
["image"]:
808 image
["vim_info"] = {}
809 for flavor
in target
["flavor"]:
810 flavor
["vim_info"] = {}
811 if db_nsr
.get("shared-volumes"):
812 target
["shared-volumes"] = deepcopy(db_nsr
["shared-volumes"])
813 for shared_volumes
in target
["shared-volumes"]:
814 shared_volumes
["vim_info"] = {}
815 if db_nsr
.get("affinity-or-anti-affinity-group"):
816 target
["affinity-or-anti-affinity-group"] = deepcopy(
817 db_nsr
["affinity-or-anti-affinity-group"]
819 for affinity_or_anti_affinity_group
in target
[
820 "affinity-or-anti-affinity-group"
822 affinity_or_anti_affinity_group
["vim_info"] = {}
824 if db_nslcmop
.get("lcmOperationType") != "instantiate":
825 # get parameters of instantiation:
826 db_nslcmop_instantiate
= self
.db
.get_list(
829 "nsInstanceId": db_nslcmop
["nsInstanceId"],
830 "lcmOperationType": "instantiate",
833 ns_params
= db_nslcmop_instantiate
.get("operationParams")
835 ns_params
= db_nslcmop
.get("operationParams")
836 ssh_keys_instantiation
= ns_params
.get("ssh_keys") or []
837 ssh_keys_all
= ssh_keys_instantiation
+ (n2vc_key_list
or [])
840 for vld_index
, vld
in enumerate(db_nsr
.get("vld")):
841 target_vim
= "vim:{}".format(ns_params
["vimAccountId"])
845 "mgmt-network": vld
.get("mgmt-network", False),
846 "type": vld
.get("type"),
849 "vim_network_name": vld
.get("vim-network-name"),
850 "vim_account_id": ns_params
["vimAccountId"],
854 # check if this network needs SDN assist
855 if vld
.get("pci-interfaces"):
856 db_vim
= get_vim_account(ns_params
["vimAccountId"])
857 if vim_config
:= db_vim
.get("config"):
858 if sdnc_id
:= vim_config
.get("sdn-controller"):
859 sdn_vld
= "nsrs:{}:vld.{}".format(nsr_id
, vld
["id"])
860 target_sdn
= "sdn:{}".format(sdnc_id
)
861 target_vld
["vim_info"][target_sdn
] = {
863 "target_vim": target_vim
,
865 "type": vld
.get("type"),
868 nsd_vnf_profiles
= get_vnf_profiles(nsd
)
869 for nsd_vnf_profile
in nsd_vnf_profiles
:
870 for cp
in nsd_vnf_profile
["virtual-link-connectivity"]:
871 if cp
["virtual-link-profile-id"] == vld
["id"]:
873 "member_vnf:{}.{}".format(
874 cp
["constituent-cpd-id"][0][
875 "constituent-base-element-id"
877 cp
["constituent-cpd-id"][0]["constituent-cpd-id"],
879 ] = "nsrs:{}:vld.{}".format(nsr_id
, vld_index
)
881 # check at nsd descriptor, if there is an ip-profile
883 nsd_vlp
= find_in_list(
884 get_virtual_link_profiles(nsd
),
885 lambda a_link_profile
: a_link_profile
["virtual-link-desc-id"]
890 and nsd_vlp
.get("virtual-link-protocol-data")
891 and nsd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
893 vld_params
["ip-profile"] = nsd_vlp
["virtual-link-protocol-data"][
897 # update vld_params with instantiation params
898 vld_instantiation_params
= find_in_list(
899 get_iterable(ns_params
, "vld"),
900 lambda a_vld
: a_vld
["name"] in (vld
["name"], vld
["id"]),
902 if vld_instantiation_params
:
903 vld_params
.update(vld_instantiation_params
)
904 parse_vld_instantiation_params(target_vim
, target_vld
, vld_params
, None)
905 target
["ns"]["vld"].append(target_vld
)
906 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
907 update_ns_vld_target(target
, ns_params
)
909 for vnfr
in db_vnfrs
.values():
911 db_vnfds
, lambda db_vnf
: db_vnf
["id"] == vnfr
["vnfd-ref"]
913 vnf_params
= find_in_list(
914 get_iterable(ns_params
, "vnf"),
915 lambda a_vnf
: a_vnf
["member-vnf-index"] == vnfr
["member-vnf-index-ref"],
917 target_vnf
= deepcopy(vnfr
)
918 target_vim
= "vim:{}".format(vnfr
["vim-account-id"])
919 for vld
in target_vnf
.get("vld", ()):
920 # check if connected to a ns.vld, to fill target'
921 vnf_cp
= find_in_list(
922 vnfd
.get("int-virtual-link-desc", ()),
923 lambda cpd
: cpd
.get("id") == vld
["id"],
926 ns_cp
= "member_vnf:{}.{}".format(
927 vnfr
["member-vnf-index-ref"], vnf_cp
["id"]
929 if cp2target
.get(ns_cp
):
930 vld
["target"] = cp2target
[ns_cp
]
933 target_vim
: {"vim_network_name": vld
.get("vim-network-name")}
935 # check if this network needs SDN assist
937 if vld
.get("pci-interfaces"):
938 db_vim
= get_vim_account(vnfr
["vim-account-id"])
939 sdnc_id
= db_vim
["config"].get("sdn-controller")
941 sdn_vld
= "vnfrs:{}:vld.{}".format(target_vnf
["_id"], vld
["id"])
942 target_sdn
= "sdn:{}".format(sdnc_id
)
943 vld
["vim_info"][target_sdn
] = {
945 "target_vim": target_vim
,
947 "type": vld
.get("type"),
950 # check at vnfd descriptor, if there is an ip-profile
952 vnfd_vlp
= find_in_list(
953 get_virtual_link_profiles(vnfd
),
954 lambda a_link_profile
: a_link_profile
["id"] == vld
["id"],
958 and vnfd_vlp
.get("virtual-link-protocol-data")
959 and vnfd_vlp
["virtual-link-protocol-data"].get("l3-protocol-data")
961 vld_params
["ip-profile"] = vnfd_vlp
["virtual-link-protocol-data"][
964 # update vld_params with instantiation params
966 vld_instantiation_params
= find_in_list(
967 get_iterable(vnf_params
, "internal-vld"),
968 lambda i_vld
: i_vld
["name"] == vld
["id"],
970 if vld_instantiation_params
:
971 vld_params
.update(vld_instantiation_params
)
972 parse_vld_instantiation_params(target_vim
, vld
, vld_params
, target_sdn
)
975 for vdur
in target_vnf
.get("vdur", ()):
976 if vdur
.get("status") == "DELETING" or vdur
.get("pdu-type"):
977 continue # This vdu must not be created
978 vdur
["vim_info"] = {"vim_account_id": vnfr
["vim-account-id"]}
980 self
.logger
.debug("NS > ssh_keys > {}".format(ssh_keys_all
))
983 vdu_configuration
= get_configuration(vnfd
, vdur
["vdu-id-ref"])
984 vnf_configuration
= get_configuration(vnfd
, vnfd
["id"])
987 and vdu_configuration
.get("config-access")
988 and vdu_configuration
.get("config-access").get("ssh-access")
990 vdur
["ssh-keys"] = ssh_keys_all
991 vdur
["ssh-access-required"] = vdu_configuration
[
993 ]["ssh-access"]["required"]
996 and vnf_configuration
.get("config-access")
997 and vnf_configuration
.get("config-access").get("ssh-access")
998 and any(iface
.get("mgmt-vnf") for iface
in vdur
["interfaces"])
1000 vdur
["ssh-keys"] = ssh_keys_all
1001 vdur
["ssh-access-required"] = vnf_configuration
[
1003 ]["ssh-access"]["required"]
1004 elif ssh_keys_instantiation
and find_in_list(
1005 vdur
["interfaces"], lambda iface
: iface
.get("mgmt-vnf")
1007 vdur
["ssh-keys"] = ssh_keys_instantiation
1009 self
.logger
.debug("NS > vdur > {}".format(vdur
))
1011 vdud
= get_vdu(vnfd
, vdur
["vdu-id-ref"])
1013 if vdud
.get("cloud-init-file"):
1014 vdur
["cloud-init"] = "{}:file:{}".format(
1015 vnfd
["_id"], vdud
.get("cloud-init-file")
1017 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1018 if vdur
["cloud-init"] not in target
["cloud_init_content"]:
1019 base_folder
= vnfd
["_admin"]["storage"]
1020 if base_folder
["pkg-dir"]:
1021 cloud_init_file
= "{}/{}/cloud_init/{}".format(
1022 base_folder
["folder"],
1023 base_folder
["pkg-dir"],
1024 vdud
.get("cloud-init-file"),
1027 cloud_init_file
= "{}/Scripts/cloud_init/{}".format(
1028 base_folder
["folder"],
1029 vdud
.get("cloud-init-file"),
1031 with self
.fs
.file_open(cloud_init_file
, "r") as ci_file
:
1032 target
["cloud_init_content"][
1035 elif vdud
.get("cloud-init"):
1036 vdur
["cloud-init"] = "{}:vdu:{}".format(
1037 vnfd
["_id"], get_vdu_index(vnfd
, vdur
["vdu-id-ref"])
1039 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1040 target
["cloud_init_content"][vdur
["cloud-init"]] = vdud
[
1043 vdur
["additionalParams"] = vdur
.get("additionalParams") or {}
1044 deploy_params_vdu
= self
._format
_additional
_params
(
1045 vdur
.get("additionalParams") or {}
1047 deploy_params_vdu
["OSM"] = get_osm_params(
1048 vnfr
, vdur
["vdu-id-ref"], vdur
["count-index"]
1050 vdur
["additionalParams"] = deploy_params_vdu
1053 ns_flavor
= target
["flavor"][int(vdur
["ns-flavor-id"])]
1054 if target_vim
not in ns_flavor
["vim_info"]:
1055 ns_flavor
["vim_info"][target_vim
] = {}
1058 # in case alternative images are provided we must check if they should be applied
1059 # for the vim_type, modify the vim_type taking into account
1060 ns_image_id
= int(vdur
["ns-image-id"])
1061 if vdur
.get("alt-image-ids"):
1062 db_vim
= get_vim_account(vnfr
["vim-account-id"])
1063 vim_type
= db_vim
["vim_type"]
1064 for alt_image_id
in vdur
.get("alt-image-ids"):
1065 ns_alt_image
= target
["image"][int(alt_image_id
)]
1066 if vim_type
== ns_alt_image
.get("vim-type"):
1067 # must use alternative image
1069 "use alternative image id: {}".format(alt_image_id
)
1071 ns_image_id
= alt_image_id
1072 vdur
["ns-image-id"] = ns_image_id
1074 ns_image
= target
["image"][int(ns_image_id
)]
1075 if target_vim
not in ns_image
["vim_info"]:
1076 ns_image
["vim_info"][target_vim
] = {}
1079 if vdur
.get("affinity-or-anti-affinity-group-id"):
1080 for ags_id
in vdur
["affinity-or-anti-affinity-group-id"]:
1081 ns_ags
= target
["affinity-or-anti-affinity-group"][int(ags_id
)]
1082 if target_vim
not in ns_ags
["vim_info"]:
1083 ns_ags
["vim_info"][target_vim
] = {}
1086 if vdur
.get("shared-volumes-id"):
1087 for sv_id
in vdur
["shared-volumes-id"]:
1088 ns_sv
= find_in_list(
1089 target
["shared-volumes"], lambda sv
: sv_id
in sv
["id"]
1092 ns_sv
["vim_info"][target_vim
] = {}
1094 vdur
["vim_info"] = {target_vim
: {}}
1095 # instantiation parameters
1097 vdu_instantiation_params
= find_in_list(
1098 get_iterable(vnf_params
, "vdu"),
1099 lambda i_vdu
: i_vdu
["id"] == vdud
["id"],
1101 if vdu_instantiation_params
:
1102 # Parse the vdu_volumes from the instantiation params
1103 vdu_volumes
= get_volumes_from_instantiation_params(
1104 vdu_instantiation_params
, vdud
1106 vdur
["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1107 vdur
["additionalParams"]["OSM"][
1109 ] = vdu_instantiation_params
.get("vim-flavor-id")
1110 vdur_list
.append(vdur
)
1111 target_vnf
["vdur"] = vdur_list
1112 target
["vnf"].append(target_vnf
)
1114 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
1115 desc
= await self
.RO
.deploy(nsr_id
, target
)
1116 self
.logger
.debug("RO return > {}".format(desc
))
1117 action_id
= desc
["action_id"]
1118 await self
._wait
_ng
_ro
(
1125 operation
="instantiation",
1130 "_admin.deployed.RO.operational-status": "running",
1131 "detailed-status": " ".join(stage
),
1133 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1134 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1135 self
._write
_op
_status
(nslcmop_id
, stage
)
1137 logging_text
+ "ns deployed at RO. RO_id={}".format(action_id
)
1141 async def _wait_ng_ro(
1151 detailed_status_old
= None
1153 start_time
= start_time
or time()
1154 while time() <= start_time
+ timeout
:
1155 desc_status
= await self
.op_status_map
[operation
](nsr_id
, action_id
)
1156 self
.logger
.debug("Wait NG RO > {}".format(desc_status
))
1157 if desc_status
["status"] == "FAILED":
1158 raise NgRoException(desc_status
["details"])
1159 elif desc_status
["status"] == "BUILD":
1161 stage
[2] = "VIM: ({})".format(desc_status
["details"])
1162 elif desc_status
["status"] == "DONE":
1164 stage
[2] = "Deployed at VIM"
1167 assert False, "ROclient.check_ns_status returns unknown {}".format(
1168 desc_status
["status"]
1170 if stage
and nslcmop_id
and stage
[2] != detailed_status_old
:
1171 detailed_status_old
= stage
[2]
1172 db_nsr_update
["detailed-status"] = " ".join(stage
)
1173 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1174 self
._write
_op
_status
(nslcmop_id
, stage
)
1175 await asyncio
.sleep(15)
1176 else: # timeout_ns_deploy
1177 raise NgRoException("Timeout waiting ns to deploy")
1179 async def _terminate_ng_ro(
1180 self
, logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
1185 start_deploy
= time()
1192 "action_id": nslcmop_id
,
1194 desc
= await self
.RO
.deploy(nsr_id
, target
)
1195 action_id
= desc
["action_id"]
1196 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETING"
1199 + "ns terminate action at RO. action_id={}".format(action_id
)
1203 delete_timeout
= 20 * 60 # 20 minutes
1204 await self
._wait
_ng
_ro
(
1211 operation
="termination",
1213 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1215 await self
.RO
.delete(nsr_id
)
1216 except NgRoException
as e
:
1217 if e
.http_code
== 404: # not found
1218 db_nsr_update
["_admin.deployed.RO.nsr_id"] = None
1219 db_nsr_update
["_admin.deployed.RO.nsr_status"] = "DELETED"
1221 logging_text
+ "RO_action_id={} already deleted".format(action_id
)
1223 elif e
.http_code
== 409: # conflict
1224 failed_detail
.append("delete conflict: {}".format(e
))
1227 + "RO_action_id={} delete conflict: {}".format(action_id
, e
)
1230 failed_detail
.append("delete error: {}".format(e
))
1233 + "RO_action_id={} delete error: {}".format(action_id
, e
)
1235 except Exception as e
:
1236 failed_detail
.append("delete error: {}".format(e
))
1238 logging_text
+ "RO_action_id={} delete error: {}".format(action_id
, e
)
1242 stage
[2] = "Error deleting from VIM"
1244 stage
[2] = "Deleted from VIM"
1245 db_nsr_update
["detailed-status"] = " ".join(stage
)
1246 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
1247 self
._write
_op
_status
(nslcmop_id
, stage
)
1250 raise LcmException("; ".join(failed_detail
))
1253 async def instantiate_RO(
1267 :param logging_text: preffix text to use at logging
1268 :param nsr_id: nsr identity
1269 :param nsd: database content of ns descriptor
1270 :param db_nsr: database content of ns record
1271 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1273 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1274 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1275 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1276 :return: None or exception
1279 start_deploy
= time()
1280 ns_params
= db_nslcmop
.get("operationParams")
1281 if ns_params
and ns_params
.get("timeout_ns_deploy"):
1282 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
1284 timeout_ns_deploy
= self
.timeout
.ns_deploy
1286 # Check for and optionally request placement optimization. Database will be updated if placement activated
1287 stage
[2] = "Waiting for Placement."
1288 if await self
._do
_placement
(logging_text
, db_nslcmop
, db_vnfrs
):
1289 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1290 for vnfr
in db_vnfrs
.values():
1291 if ns_params
["vimAccountId"] == vnfr
["vim-account-id"]:
1294 ns_params
["vimAccountId"] == vnfr
["vim-account-id"]
1296 return await self
._instantiate
_ng
_ro
(
1309 except Exception as e
:
1310 stage
[2] = "ERROR deploying at VIM"
1311 self
.set_vnfr_at_error(db_vnfrs
, str(e
))
1313 "Error deploying at VIM {}".format(e
),
1314 exc_info
=not isinstance(
1317 ROclient
.ROClientException
,
1326 async def wait_kdu_up(self
, logging_text
, nsr_id
, vnfr_id
, kdu_name
):
1328 Wait for kdu to be up, get ip address
1329 :param logging_text: prefix use for logging
1333 :return: IP address, K8s services
1336 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1339 while nb_tries
< 360:
1340 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1344 for x
in get_iterable(db_vnfr
, "kdur")
1345 if x
.get("kdu-name") == kdu_name
1351 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id
, kdu_name
)
1353 if kdur
.get("status"):
1354 if kdur
["status"] in ("READY", "ENABLED"):
1355 return kdur
.get("ip-address"), kdur
.get("services")
1358 "target KDU={} is in error state".format(kdu_name
)
1361 await asyncio
.sleep(10)
1363 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name
))
1365 async def wait_vm_up_insert_key_ro(
1366 self
, logging_text
, nsr_id
, vnfr_id
, vdu_id
, vdu_index
, pub_key
=None, user
=None
1369 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1370 :param logging_text: prefix use for logging
1375 :param pub_key: public ssh key to inject, None to skip
1376 :param user: user to apply the public ssh key
1380 self
.logger
.debug(logging_text
+ "Starting wait_vm_up_insert_key_ro")
1382 target_vdu_id
= None
1387 if ro_retries
>= 360: # 1 hour
1389 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id
)
1392 await asyncio
.sleep(10)
1395 if not target_vdu_id
:
1396 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
1398 if not vdu_id
: # for the VNF case
1399 if db_vnfr
.get("status") == "ERROR":
1401 "Cannot inject ssh-key because target VNF is in error state"
1403 ip_address
= db_vnfr
.get("ip-address")
1409 for x
in get_iterable(db_vnfr
, "vdur")
1410 if x
.get("ip-address") == ip_address
1418 for x
in get_iterable(db_vnfr
, "vdur")
1419 if x
.get("vdu-id-ref") == vdu_id
1420 and x
.get("count-index") == vdu_index
1426 not vdur
and len(db_vnfr
.get("vdur", ())) == 1
1427 ): # If only one, this should be the target vdu
1428 vdur
= db_vnfr
["vdur"][0]
1431 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1432 vnfr_id
, vdu_id
, vdu_index
1435 # New generation RO stores information at "vim_info"
1438 if vdur
.get("vim_info"):
1440 t
for t
in vdur
["vim_info"]
1441 ) # there should be only one key
1442 ng_ro_status
= vdur
["vim_info"][target_vim
].get("vim_status")
1444 vdur
.get("pdu-type")
1445 or vdur
.get("status") == "ACTIVE"
1446 or ng_ro_status
== "ACTIVE"
1448 ip_address
= vdur
.get("ip-address")
1451 target_vdu_id
= vdur
["vdu-id-ref"]
1452 elif vdur
.get("status") == "ERROR" or ng_ro_status
== "ERROR":
1454 "Cannot inject ssh-key because target VM is in error state"
1457 if not target_vdu_id
:
1460 # inject public key into machine
1461 if pub_key
and user
:
1462 self
.logger
.debug(logging_text
+ "Inserting RO key")
1463 self
.logger
.debug("SSH > PubKey > {}".format(pub_key
))
1464 if vdur
.get("pdu-type"):
1465 self
.logger
.error(logging_text
+ "Cannot inject ssh-ky to a PDU")
1470 "action": "inject_ssh_key",
1474 "vnf": [{"_id": vnfr_id
, "vdur": [{"id": vdur
["id"]}]}],
1476 desc
= await self
.RO
.deploy(nsr_id
, target
)
1477 action_id
= desc
["action_id"]
1478 await self
._wait
_ng
_ro
(
1479 nsr_id
, action_id
, timeout
=600, operation
="instantiation"
1482 except NgRoException
as e
:
1484 "Reaching max tries injecting key. Error: {}".format(e
)
1491 async def _wait_dependent_n2vc(self
, nsr_id
, vca_deployed_list
, vca_index
):
1493 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1495 my_vca
= vca_deployed_list
[vca_index
]
1496 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
1497 # vdu or kdu: no dependencies
1501 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
1502 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1503 configuration_status_list
= db_nsr
["configurationStatus"]
1504 for index
, vca_deployed
in enumerate(configuration_status_list
):
1505 if index
== vca_index
:
1508 if not my_vca
.get("member-vnf-index") or (
1509 vca_deployed
.get("member-vnf-index")
1510 == my_vca
.get("member-vnf-index")
1512 internal_status
= configuration_status_list
[index
].get("status")
1513 if internal_status
== "READY":
1515 elif internal_status
== "BROKEN":
1517 "Configuration aborted because dependent charm/s has failed"
1522 # no dependencies, return
1524 await asyncio
.sleep(10)
1527 raise LcmException("Configuration aborted because dependent charm/s timeout")
1529 def get_vca_id(self
, db_vnfr
: dict, db_nsr
: dict):
1532 vca_id
= deep_get(db_vnfr
, ("vca-id",))
1534 vim_account_id
= deep_get(db_nsr
, ("instantiate_params", "vimAccountId"))
1535 vca_id
= VimAccountDB
.get_vim_account_with_id(vim_account_id
).get("vca")
1538 async def instantiate_N2VC(
1556 ee_config_descriptor
,
1558 nsr_id
= db_nsr
["_id"]
1559 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
1560 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
1561 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
1562 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
1564 "collection": "nsrs",
1565 "filter": {"_id": nsr_id
},
1566 "path": db_update_entry
,
1571 element_under_configuration
= nsr_id
1575 vnfr_id
= db_vnfr
["_id"]
1576 osm_config
["osm"]["vnf_id"] = vnfr_id
1578 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
1580 if vca_type
== "native_charm":
1583 index_number
= vdu_index
or 0
1586 element_type
= "VNF"
1587 element_under_configuration
= vnfr_id
1588 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
1590 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
1591 element_type
= "VDU"
1592 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
1593 osm_config
["osm"]["vdu_id"] = vdu_id
1595 namespace
+= ".{}".format(kdu_name
)
1596 element_type
= "KDU"
1597 element_under_configuration
= kdu_name
1598 osm_config
["osm"]["kdu_name"] = kdu_name
1601 if base_folder
["pkg-dir"]:
1602 artifact_path
= "{}/{}/{}/{}".format(
1603 base_folder
["folder"],
1604 base_folder
["pkg-dir"],
1607 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1612 artifact_path
= "{}/Scripts/{}/{}/".format(
1613 base_folder
["folder"],
1616 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1621 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
1623 # get initial_config_primitive_list that applies to this element
1624 initial_config_primitive_list
= config_descriptor
.get(
1625 "initial-config-primitive"
1629 "Initial config primitive list > {}".format(
1630 initial_config_primitive_list
1634 # add config if not present for NS charm
1635 ee_descriptor_id
= ee_config_descriptor
.get("id")
1636 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
1637 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
1638 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
1642 "Initial config primitive list #2 > {}".format(
1643 initial_config_primitive_list
1646 # n2vc_redesign STEP 3.1
1647 # find old ee_id if exists
1648 ee_id
= vca_deployed
.get("ee_id")
1650 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
1651 # create or register execution environment in VCA
1652 if vca_type
in ("lxc_proxy_charm", "k8s_proxy_charm", "helm-v3"):
1653 self
._write
_configuration
_status
(
1655 vca_index
=vca_index
,
1657 element_under_configuration
=element_under_configuration
,
1658 element_type
=element_type
,
1661 step
= "create execution environment"
1662 self
.logger
.debug(logging_text
+ step
)
1666 if vca_type
== "k8s_proxy_charm":
1667 ee_id
= await self
.vca_map
[vca_type
].install_k8s_proxy_charm(
1668 charm_name
=artifact_path
[artifact_path
.rfind("/") + 1 :],
1669 namespace
=namespace
,
1670 artifact_path
=artifact_path
,
1674 elif vca_type
== "helm-v3":
1675 ee_id
, credentials
= await self
.vca_map
[
1677 ].create_execution_environment(
1682 artifact_path
=artifact_path
,
1683 chart_model
=vca_name
,
1687 ee_id
, credentials
= await self
.vca_map
[
1689 ].create_execution_environment(
1690 namespace
=namespace
,
1696 elif vca_type
== "native_charm":
1697 step
= "Waiting to VM being up and getting IP address"
1698 self
.logger
.debug(logging_text
+ step
)
1699 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
1708 credentials
= {"hostname": rw_mgmt_ip
}
1710 username
= deep_get(
1711 config_descriptor
, ("config-access", "ssh-access", "default-user")
1713 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1714 # merged. Meanwhile let's get username from initial-config-primitive
1715 if not username
and initial_config_primitive_list
:
1716 for config_primitive
in initial_config_primitive_list
:
1717 for param
in config_primitive
.get("parameter", ()):
1718 if param
["name"] == "ssh-username":
1719 username
= param
["value"]
1723 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1724 "'config-access.ssh-access.default-user'"
1726 credentials
["username"] = username
1727 # n2vc_redesign STEP 3.2
1729 self
._write
_configuration
_status
(
1731 vca_index
=vca_index
,
1732 status
="REGISTERING",
1733 element_under_configuration
=element_under_configuration
,
1734 element_type
=element_type
,
1737 step
= "register execution environment {}".format(credentials
)
1738 self
.logger
.debug(logging_text
+ step
)
1739 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
1740 credentials
=credentials
,
1741 namespace
=namespace
,
1746 # for compatibility with MON/POL modules, the need model and application name at database
1747 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1748 ee_id_parts
= ee_id
.split(".")
1749 db_nsr_update
= {db_update_entry
+ "ee_id": ee_id
}
1750 if len(ee_id_parts
) >= 2:
1751 model_name
= ee_id_parts
[0]
1752 application_name
= ee_id_parts
[1]
1753 db_nsr_update
[db_update_entry
+ "model"] = model_name
1754 db_nsr_update
[db_update_entry
+ "application"] = application_name
1756 # n2vc_redesign STEP 3.3
1757 step
= "Install configuration Software"
1759 self
._write
_configuration
_status
(
1761 vca_index
=vca_index
,
1762 status
="INSTALLING SW",
1763 element_under_configuration
=element_under_configuration
,
1764 element_type
=element_type
,
1765 other_update
=db_nsr_update
,
1768 # TODO check if already done
1769 self
.logger
.debug(logging_text
+ step
)
1771 if vca_type
== "native_charm":
1772 config_primitive
= next(
1773 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
1776 if config_primitive
:
1777 config
= self
._map
_primitive
_params
(
1778 config_primitive
, {}, deploy_params
1781 if vca_type
== "lxc_proxy_charm":
1782 if element_type
== "NS":
1783 num_units
= db_nsr
.get("config-units") or 1
1784 elif element_type
== "VNF":
1785 num_units
= db_vnfr
.get("config-units") or 1
1786 elif element_type
== "VDU":
1787 for v
in db_vnfr
["vdur"]:
1788 if vdu_id
== v
["vdu-id-ref"]:
1789 num_units
= v
.get("config-units") or 1
1791 if vca_type
!= "k8s_proxy_charm":
1792 await self
.vca_map
[vca_type
].install_configuration_sw(
1794 artifact_path
=artifact_path
,
1797 num_units
=num_units
,
1802 # write in db flag of configuration_sw already installed
1804 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
1807 # add relations for this VCA (wait for other peers related with this VCA)
1808 is_relation_added
= await self
._add
_vca
_relations
(
1809 logging_text
=logging_text
,
1812 vca_index
=vca_index
,
1815 if not is_relation_added
:
1816 raise LcmException("Relations could not be added to VCA.")
1818 # if SSH access is required, then get execution environment SSH public
1819 # if native charm we have waited already to VM be UP
1820 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
1823 # self.logger.debug("get ssh key block")
1825 config_descriptor
, ("config-access", "ssh-access", "required")
1827 # self.logger.debug("ssh key needed")
1828 # Needed to inject a ssh key
1831 ("config-access", "ssh-access", "default-user"),
1833 step
= "Install configuration Software, getting public ssh key"
1834 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
1835 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
1838 step
= "Insert public key into VM user={} ssh_key={}".format(
1842 # self.logger.debug("no need to get ssh key")
1843 step
= "Waiting to VM being up and getting IP address"
1844 self
.logger
.debug(logging_text
+ step
)
1846 # default rw_mgmt_ip to None, avoiding the non definition of the variable
1849 # n2vc_redesign STEP 5.1
1850 # wait for RO (ip-address) Insert pub_key into VM
1853 rw_mgmt_ip
, services
= await self
.wait_kdu_up(
1854 logging_text
, nsr_id
, vnfr_id
, kdu_name
1856 vnfd
= self
.db
.get_one(
1858 {"_id": f
'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
1860 kdu
= get_kdu(vnfd
, kdu_name
)
1862 service
["name"] for service
in get_kdu_services(kdu
)
1864 exposed_services
= []
1865 for service
in services
:
1866 if any(s
in service
["name"] for s
in kdu_services
):
1867 exposed_services
.append(service
)
1868 await self
.vca_map
[vca_type
].exec_primitive(
1870 primitive_name
="config",
1872 "osm-config": json
.dumps(
1874 k8s
={"services": exposed_services
}
1881 # This verification is needed in order to avoid trying to add a public key
1882 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
1883 # for a KNF and not for its KDUs, the previous verification gives False, and the code
1884 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
1886 elif db_vnfr
.get("vdur"):
1887 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
1897 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
1899 # store rw_mgmt_ip in deploy params for later replacement
1900 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
1902 # n2vc_redesign STEP 6 Execute initial config primitive
1903 step
= "execute initial config primitive"
1905 # wait for dependent primitives execution (NS -> VNF -> VDU)
1906 if initial_config_primitive_list
:
1907 await self
._wait
_dependent
_n
2vc
(nsr_id
, vca_deployed_list
, vca_index
)
1909 # stage, in function of element type: vdu, kdu, vnf or ns
1910 my_vca
= vca_deployed_list
[vca_index
]
1911 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
1913 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
1914 elif my_vca
.get("member-vnf-index"):
1916 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
1919 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
1921 self
._write
_configuration
_status
(
1922 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
1925 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
1927 check_if_terminated_needed
= True
1928 for initial_config_primitive
in initial_config_primitive_list
:
1929 # adding information on the vca_deployed if it is a NS execution environment
1930 if not vca_deployed
["member-vnf-index"]:
1931 deploy_params
["ns_config_info"] = json
.dumps(
1932 self
._get
_ns
_config
_info
(nsr_id
)
1934 # TODO check if already done
1935 primitive_params_
= self
._map
_primitive
_params
(
1936 initial_config_primitive
, {}, deploy_params
1939 step
= "execute primitive '{}' params '{}'".format(
1940 initial_config_primitive
["name"], primitive_params_
1942 self
.logger
.debug(logging_text
+ step
)
1943 await self
.vca_map
[vca_type
].exec_primitive(
1945 primitive_name
=initial_config_primitive
["name"],
1946 params_dict
=primitive_params_
,
1951 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1952 if check_if_terminated_needed
:
1953 if config_descriptor
.get("terminate-config-primitive"):
1955 "nsrs", nsr_id
, {db_update_entry
+ "needed_terminate": True}
1957 check_if_terminated_needed
= False
1959 # TODO register in database that primitive is done
1961 # STEP 7 Configure metrics
1962 if vca_type
== "helm-v3":
1963 # TODO: review for those cases where the helm chart is a reference and
1964 # is not part of the NF package
1965 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
1967 artifact_path
=artifact_path
,
1968 ee_config_descriptor
=ee_config_descriptor
,
1971 target_ip
=rw_mgmt_ip
,
1972 element_type
=element_type
,
1973 vnf_member_index
=db_vnfr
.get("member-vnf-index-ref", ""),
1975 vdu_index
=vdu_index
,
1977 kdu_index
=kdu_index
,
1983 {db_update_entry
+ "prometheus_jobs": prometheus_jobs
},
1986 for job
in prometheus_jobs
:
1989 {"job_name": job
["job_name"]},
1992 fail_on_empty
=False,
1995 step
= "instantiated at VCA"
1996 self
.logger
.debug(logging_text
+ step
)
1998 self
._write
_configuration
_status
(
1999 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
2002 except Exception as e
: # TODO not use Exception but N2VC exception
2003 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2005 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
2008 "Exception while {} : {}".format(step
, e
), exc_info
=True
2010 self
._write
_configuration
_status
(
2011 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
2013 raise LcmException("{}. {}".format(step
, e
)) from e
2015 def _write_ns_status(
2019 current_operation
: str,
2020 current_operation_id
: str,
2021 error_description
: str = None,
2022 error_detail
: str = None,
2023 other_update
: dict = None,
2026 Update db_nsr fields.
2029 :param current_operation:
2030 :param current_operation_id:
2031 :param error_description:
2032 :param error_detail:
2033 :param other_update: Other required changes at database if provided, will be cleared
2037 db_dict
= other_update
or {}
2040 ] = current_operation_id
# for backward compatibility
2041 db_dict
["_admin.current-operation"] = current_operation_id
2042 db_dict
["_admin.operation-type"] = (
2043 current_operation
if current_operation
!= "IDLE" else None
2045 db_dict
["currentOperation"] = current_operation
2046 db_dict
["currentOperationID"] = current_operation_id
2047 db_dict
["errorDescription"] = error_description
2048 db_dict
["errorDetail"] = error_detail
2051 db_dict
["nsState"] = ns_state
2052 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2053 except DbException
as e
:
2054 self
.logger
.warn("Error writing NS status, ns={}: {}".format(nsr_id
, e
))
2056 def _write_op_status(
2060 error_message
: str = None,
2061 queuePosition
: int = 0,
2062 operation_state
: str = None,
2063 other_update
: dict = None,
2066 db_dict
= other_update
or {}
2067 db_dict
["queuePosition"] = queuePosition
2068 if isinstance(stage
, list):
2069 db_dict
["stage"] = stage
[0]
2070 db_dict
["detailed-status"] = " ".join(stage
)
2071 elif stage
is not None:
2072 db_dict
["stage"] = str(stage
)
2074 if error_message
is not None:
2075 db_dict
["errorMessage"] = error_message
2076 if operation_state
is not None:
2077 db_dict
["operationState"] = operation_state
2078 db_dict
["statusEnteredTime"] = time()
2079 self
.update_db_2("nslcmops", op_id
, db_dict
)
2080 except DbException
as e
:
2082 "Error writing OPERATION status for op_id: {} -> {}".format(op_id
, e
)
2085 def _write_all_config_status(self
, db_nsr
: dict, status
: str):
2087 nsr_id
= db_nsr
["_id"]
2088 # configurationStatus
2089 config_status
= db_nsr
.get("configurationStatus")
2092 "configurationStatus.{}.status".format(index
): status
2093 for index
, v
in enumerate(config_status
)
2097 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2099 except DbException
as e
:
2101 "Error writing all configuration status, ns={}: {}".format(nsr_id
, e
)
2104 def _write_configuration_status(
2109 element_under_configuration
: str = None,
2110 element_type
: str = None,
2111 other_update
: dict = None,
2113 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2114 # .format(vca_index, status))
2117 db_path
= "configurationStatus.{}.".format(vca_index
)
2118 db_dict
= other_update
or {}
2120 db_dict
[db_path
+ "status"] = status
2121 if element_under_configuration
:
2123 db_path
+ "elementUnderConfiguration"
2124 ] = element_under_configuration
2126 db_dict
[db_path
+ "elementType"] = element_type
2127 self
.update_db_2("nsrs", nsr_id
, db_dict
)
2128 except DbException
as e
:
2130 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2131 status
, nsr_id
, vca_index
, e
2135 async def _do_placement(self
, logging_text
, db_nslcmop
, db_vnfrs
):
2137 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2138 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2139 Database is used because the result can be obtained from a different LCM worker in case of HA.
2140 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2141 :param db_nslcmop: database content of nslcmop
2142 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2143 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2144 computed 'vim-account-id'
2147 nslcmop_id
= db_nslcmop
["_id"]
2148 placement_engine
= deep_get(db_nslcmop
, ("operationParams", "placement-engine"))
2149 if placement_engine
== "PLA":
2151 logging_text
+ "Invoke and wait for placement optimization"
2153 await self
.msg
.aiowrite("pla", "get_placement", {"nslcmopId": nslcmop_id
})
2154 db_poll_interval
= 5
2155 wait
= db_poll_interval
* 10
2157 while not pla_result
and wait
>= 0:
2158 await asyncio
.sleep(db_poll_interval
)
2159 wait
-= db_poll_interval
2160 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2161 pla_result
= deep_get(db_nslcmop
, ("_admin", "pla"))
2165 "Placement timeout for nslcmopId={}".format(nslcmop_id
)
2168 for pla_vnf
in pla_result
["vnf"]:
2169 vnfr
= db_vnfrs
.get(pla_vnf
["member-vnf-index"])
2170 if not pla_vnf
.get("vimAccountId") or not vnfr
:
2175 {"_id": vnfr
["_id"]},
2176 {"vim-account-id": pla_vnf
["vimAccountId"]},
2179 vnfr
["vim-account-id"] = pla_vnf
["vimAccountId"]
2182 def _gather_vnfr_healing_alerts(self
, vnfr
, vnfd
):
2184 nsr_id
= vnfr
["nsr-id-ref"]
2185 df
= vnfd
.get("df", [{}])[0]
2186 # Checking for auto-healing configuration
2187 if "healing-aspect" in df
:
2188 healing_aspects
= df
["healing-aspect"]
2189 for healing
in healing_aspects
:
2190 for healing_policy
in healing
.get("healing-policy", ()):
2191 vdu_id
= healing_policy
["vdu-id"]
2193 (vdur
for vdur
in vnfr
["vdur"] if vdu_id
== vdur
["vdu-id-ref"]),
2198 metric_name
= "vm_status"
2199 vdu_name
= vdur
.get("name")
2200 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2202 name
= f
"healing_{uuid}"
2203 action
= healing_policy
2204 # action_on_recovery = healing.get("action-on-recovery")
2205 # cooldown_time = healing.get("cooldown-time")
2206 # day1 = healing.get("day1")
2210 "metric": metric_name
,
2213 "vnf_member_index": vnf_member_index
,
2214 "vdu_name": vdu_name
,
2216 "alarm_status": "ok",
2217 "action_type": "healing",
2220 alerts
.append(alert
)
2223 def _gather_vnfr_scaling_alerts(self
, vnfr
, vnfd
):
2225 nsr_id
= vnfr
["nsr-id-ref"]
2226 df
= vnfd
.get("df", [{}])[0]
2227 # Checking for auto-scaling configuration
2228 if "scaling-aspect" in df
:
2229 scaling_aspects
= df
["scaling-aspect"]
2230 all_vnfd_monitoring_params
= {}
2231 for ivld
in vnfd
.get("int-virtual-link-desc", ()):
2232 for mp
in ivld
.get("monitoring-parameters", ()):
2233 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2234 for vdu
in vnfd
.get("vdu", ()):
2235 for mp
in vdu
.get("monitoring-parameter", ()):
2236 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2237 for df
in vnfd
.get("df", ()):
2238 for mp
in df
.get("monitoring-parameter", ()):
2239 all_vnfd_monitoring_params
[mp
.get("id")] = mp
2240 for scaling_aspect
in scaling_aspects
:
2241 scaling_group_name
= scaling_aspect
.get("name", "")
2242 # Get monitored VDUs
2243 all_monitored_vdus
= set()
2244 for delta
in scaling_aspect
.get("aspect-delta-details", {}).get(
2247 for vdu_delta
in delta
.get("vdu-delta", ()):
2248 all_monitored_vdus
.add(vdu_delta
.get("id"))
2249 monitored_vdurs
= list(
2251 lambda vdur
: vdur
["vdu-id-ref"] in all_monitored_vdus
,
2255 if not monitored_vdurs
:
2257 "Scaling criteria is referring to a vnf-monitoring-param that does not contain a reference to a vdu or vnf metric"
2260 for scaling_policy
in scaling_aspect
.get("scaling-policy", ()):
2261 if scaling_policy
["scaling-type"] != "automatic":
2263 threshold_time
= scaling_policy
.get("threshold-time", "1")
2264 cooldown_time
= scaling_policy
.get("cooldown-time", "0")
2265 for scaling_criteria
in scaling_policy
["scaling-criteria"]:
2266 monitoring_param_ref
= scaling_criteria
.get(
2267 "vnf-monitoring-param-ref"
2269 vnf_monitoring_param
= all_vnfd_monitoring_params
[
2270 monitoring_param_ref
2272 for vdur
in monitored_vdurs
:
2273 vdu_id
= vdur
["vdu-id-ref"]
2274 metric_name
= vnf_monitoring_param
.get("performance-metric")
2275 metric_name
= f
"osm_{metric_name}"
2276 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2277 scalein_threshold
= scaling_criteria
.get(
2278 "scale-in-threshold"
2280 scaleout_threshold
= scaling_criteria
.get(
2281 "scale-out-threshold"
2283 # Looking for min/max-number-of-instances
2284 instances_min_number
= 1
2285 instances_max_number
= 1
2286 vdu_profile
= df
["vdu-profile"]
2289 item
for item
in vdu_profile
if item
["id"] == vdu_id
2291 instances_min_number
= profile
.get(
2292 "min-number-of-instances", 1
2294 instances_max_number
= profile
.get(
2295 "max-number-of-instances", 1
2298 if scalein_threshold
:
2300 name
= f
"scalein_{uuid}"
2301 operation
= scaling_criteria
[
2302 "scale-in-relational-operation"
2304 rel_operator
= self
.rel_operation_types
.get(
2307 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2308 expression
= f
"(count ({metric_selector}) > {instances_min_number}) and (avg({metric_selector}) {rel_operator} {scalein_threshold})"
2311 "vnf_member_index": vnf_member_index
,
2317 "for": str(threshold_time
) + "m",
2320 action
= scaling_policy
2322 "scaling-group": scaling_group_name
,
2323 "cooldown-time": cooldown_time
,
2328 "metric": metric_name
,
2331 "vnf_member_index": vnf_member_index
,
2334 "alarm_status": "ok",
2335 "action_type": "scale_in",
2337 "prometheus_config": prom_cfg
,
2339 alerts
.append(alert
)
2341 if scaleout_threshold
:
2343 name
= f
"scaleout_{uuid}"
2344 operation
= scaling_criteria
[
2345 "scale-out-relational-operation"
2347 rel_operator
= self
.rel_operation_types
.get(
2350 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2351 expression
= f
"(count ({metric_selector}) < {instances_max_number}) and (avg({metric_selector}) {rel_operator} {scaleout_threshold})"
2354 "vnf_member_index": vnf_member_index
,
2360 "for": str(threshold_time
) + "m",
2363 action
= scaling_policy
2365 "scaling-group": scaling_group_name
,
2366 "cooldown-time": cooldown_time
,
2371 "metric": metric_name
,
2374 "vnf_member_index": vnf_member_index
,
2377 "alarm_status": "ok",
2378 "action_type": "scale_out",
2380 "prometheus_config": prom_cfg
,
2382 alerts
.append(alert
)
2385 def _gather_vnfr_alarm_alerts(self
, vnfr
, vnfd
):
2387 nsr_id
= vnfr
["nsr-id-ref"]
2388 vnf_member_index
= vnfr
["member-vnf-index-ref"]
2390 # Checking for VNF alarm configuration
2391 for vdur
in vnfr
["vdur"]:
2392 vdu_id
= vdur
["vdu-id-ref"]
2393 vdu
= next(filter(lambda vdu
: vdu
["id"] == vdu_id
, vnfd
["vdu"]))
2395 # Get VDU monitoring params, since alerts are based on them
2396 vdu_monitoring_params
= {}
2397 for mp
in vdu
.get("monitoring-parameter", []):
2398 vdu_monitoring_params
[mp
.get("id")] = mp
2399 if not vdu_monitoring_params
:
2401 "VDU alarm refers to a VDU monitoring param, but there are no VDU monitoring params in the VDU"
2404 # Get alarms in the VDU
2405 alarm_descriptors
= vdu
["alarm"]
2406 # Create VDU alarms for each alarm in the VDU
2407 for alarm_descriptor
in alarm_descriptors
:
2408 # Check that the VDU alarm refers to a proper monitoring param
2409 alarm_monitoring_param
= alarm_descriptor
.get(
2410 "vnf-monitoring-param-ref", ""
2412 vdu_specific_monitoring_param
= vdu_monitoring_params
.get(
2413 alarm_monitoring_param
, {}
2415 if not vdu_specific_monitoring_param
:
2417 "VDU alarm refers to a VDU monitoring param not present in the VDU"
2420 metric_name
= vdu_specific_monitoring_param
.get(
2421 "performance-metric"
2425 "VDU alarm refers to a VDU monitoring param that has no associated performance-metric"
2428 # Set params of the alarm to be created in Prometheus
2429 metric_name
= f
"osm_{metric_name}"
2430 metric_threshold
= alarm_descriptor
.get("value")
2432 alert_name
= f
"vdu_alarm_{uuid}"
2433 operation
= alarm_descriptor
["operation"]
2434 rel_operator
= self
.rel_operation_types
.get(operation
, "<=")
2435 metric_selector
= f
'{metric_name}{{ns_id="{nsr_id}", vnf_member_index="{vnf_member_index}", vdu_id="{vdu_id}"}}'
2436 expression
= f
"{metric_selector} {rel_operator} {metric_threshold}"
2439 "vnf_member_index": vnf_member_index
,
2441 "vdu_name": "{{ $labels.vdu_name }}",
2444 "alert": alert_name
,
2446 "for": "1m", # default value. Ideally, this should be related to an IM param, but there is not such param
2449 alarm_action
= dict()
2450 for action_type
in ["ok", "insufficient-data", "alarm"]:
2452 "actions" in alarm_descriptor
2453 and action_type
in alarm_descriptor
["actions"]
2455 alarm_action
[action_type
] = alarm_descriptor
["actions"][
2461 "metric": metric_name
,
2464 "vnf_member_index": vnf_member_index
,
2467 "alarm_status": "ok",
2468 "action_type": "vdu_alarm",
2469 "action": alarm_action
,
2470 "prometheus_config": prom_cfg
,
2472 alerts
.append(alert
)
2475 def update_nsrs_with_pla_result(self
, params
):
2477 nslcmop_id
= deep_get(params
, ("placement", "nslcmopId"))
2479 "nslcmops", nslcmop_id
, {"_admin.pla": params
.get("placement")}
2481 except Exception as e
:
2482 self
.logger
.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id
, e
))
2484 async def instantiate(self
, nsr_id
, nslcmop_id
):
2487 :param nsr_id: ns instance to deploy
2488 :param nslcmop_id: operation to run
2492 # Try to lock HA task here
2493 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
2494 if not task_is_locked_by_me
:
2496 "instantiate() task is not locked by me, ns={}".format(nsr_id
)
2500 logging_text
= "Task ns={} instantiate={} ".format(nsr_id
, nslcmop_id
)
2501 self
.logger
.debug(logging_text
+ "Enter")
2503 # get all needed from database
2505 # database nsrs record
2508 # database nslcmops record
2511 # update operation on nsrs
2513 # update operation on nslcmops
2514 db_nslcmop_update
= {}
2516 timeout_ns_deploy
= self
.timeout
.ns_deploy
2518 nslcmop_operation_state
= None
2519 db_vnfrs
= {} # vnf's info indexed by member-index
2521 tasks_dict_info
= {} # from task to info text
2525 "Stage 1/5: preparation of the environment.",
2526 "Waiting for previous operations to terminate.",
2529 # ^ stage, step, VIM progress
2531 # wait for any previous tasks in process
2532 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
2534 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2535 stage
[1] = "Reading from database."
2536 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2537 db_nsr_update
["detailed-status"] = "creating"
2538 db_nsr_update
["operational-status"] = "init"
2539 self
._write
_ns
_status
(
2541 ns_state
="BUILDING",
2542 current_operation
="INSTANTIATING",
2543 current_operation_id
=nslcmop_id
,
2544 other_update
=db_nsr_update
,
2546 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
2548 # read from db: operation
2549 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
2550 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
2551 if db_nslcmop
["operationParams"].get("additionalParamsForVnf"):
2552 db_nslcmop
["operationParams"]["additionalParamsForVnf"] = json
.loads(
2553 db_nslcmop
["operationParams"]["additionalParamsForVnf"]
2555 ns_params
= db_nslcmop
.get("operationParams")
2556 if ns_params
and ns_params
.get("timeout_ns_deploy"):
2557 timeout_ns_deploy
= ns_params
["timeout_ns_deploy"]
2560 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
2561 self
.logger
.debug(logging_text
+ stage
[1])
2562 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
2563 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
2564 self
.logger
.debug(logging_text
+ stage
[1])
2565 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
2566 self
.fs
.sync(db_nsr
["nsd-id"])
2568 # nsr_name = db_nsr["name"] # TODO short-name??
2570 # read from db: vnf's of this ns
2571 stage
[1] = "Getting vnfrs from db."
2572 self
.logger
.debug(logging_text
+ stage
[1])
2573 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
2575 # read from db: vnfd's for every vnf
2576 db_vnfds
= [] # every vnfd data
2578 # for each vnf in ns, read vnfd
2579 for vnfr
in db_vnfrs_list
:
2580 if vnfr
.get("kdur"):
2582 for kdur
in vnfr
["kdur"]:
2583 if kdur
.get("additionalParams"):
2584 kdur
["additionalParams"] = json
.loads(
2585 kdur
["additionalParams"]
2587 kdur_list
.append(kdur
)
2588 vnfr
["kdur"] = kdur_list
2590 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
2591 vnfd_id
= vnfr
["vnfd-id"]
2592 vnfd_ref
= vnfr
["vnfd-ref"]
2593 self
.fs
.sync(vnfd_id
)
2595 # if we haven't this vnfd, read it from db
2596 if vnfd_id
not in db_vnfds
:
2598 stage
[1] = "Getting vnfd={} id='{}' from db.".format(
2601 self
.logger
.debug(logging_text
+ stage
[1])
2602 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
2605 db_vnfds
.append(vnfd
)
2607 # Get or generates the _admin.deployed.VCA list
2608 vca_deployed_list
= None
2609 if db_nsr
["_admin"].get("deployed"):
2610 vca_deployed_list
= db_nsr
["_admin"]["deployed"].get("VCA")
2611 if vca_deployed_list
is None:
2612 vca_deployed_list
= []
2613 configuration_status_list
= []
2614 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2615 db_nsr_update
["configurationStatus"] = configuration_status_list
2616 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2617 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2618 elif isinstance(vca_deployed_list
, dict):
2619 # maintain backward compatibility. Change a dict to list at database
2620 vca_deployed_list
= list(vca_deployed_list
.values())
2621 db_nsr_update
["_admin.deployed.VCA"] = vca_deployed_list
2622 populate_dict(db_nsr
, ("_admin", "deployed", "VCA"), vca_deployed_list
)
2625 deep_get(db_nsr
, ("_admin", "deployed", "RO", "vnfd")), list
2627 populate_dict(db_nsr
, ("_admin", "deployed", "RO", "vnfd"), [])
2628 db_nsr_update
["_admin.deployed.RO.vnfd"] = []
2630 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2631 db_nsr_update
["_admin.nsState"] = "INSTANTIATED"
2632 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
2634 "vnfrs", {"nsr-id-ref": nsr_id
}, {"_admin.nsState": "INSTANTIATED"}
2637 # n2vc_redesign STEP 2 Deploy Network Scenario
2638 stage
[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2639 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
2641 stage
[1] = "Deploying KDUs."
2642 # self.logger.debug(logging_text + "Before deploy_kdus")
2643 # Call to deploy_kdus in case exists the "vdu:kdu" param
2644 await self
.deploy_kdus(
2645 logging_text
=logging_text
,
2647 nslcmop_id
=nslcmop_id
,
2650 task_instantiation_info
=tasks_dict_info
,
2653 stage
[1] = "Getting VCA public key."
2654 # n2vc_redesign STEP 1 Get VCA public ssh-key
2655 # feature 1429. Add n2vc public key to needed VMs
2656 n2vc_key
= self
.n2vc
.get_public_key()
2657 n2vc_key_list
= [n2vc_key
]
2658 if self
.vca_config
.public_key
:
2659 n2vc_key_list
.append(self
.vca_config
.public_key
)
2661 stage
[1] = "Deploying NS at VIM."
2662 task_ro
= asyncio
.ensure_future(
2663 self
.instantiate_RO(
2664 logging_text
=logging_text
,
2668 db_nslcmop
=db_nslcmop
,
2671 n2vc_key_list
=n2vc_key_list
,
2675 self
.lcm_tasks
.register("ns", nsr_id
, nslcmop_id
, "instantiate_RO", task_ro
)
2676 tasks_dict_info
[task_ro
] = "Deploying at VIM"
2678 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2679 stage
[1] = "Deploying Execution Environments."
2680 self
.logger
.debug(logging_text
+ stage
[1])
2682 # create namespace and certificate if any helm based EE is present in the NS
2683 if check_helm_ee_in_ns(db_vnfds
):
2684 await self
.vca_map
["helm-v3"].setup_ns_namespace(
2687 # create TLS certificates
2688 await self
.vca_map
["helm-v3"].create_tls_certificate(
2689 secret_name
=self
.EE_TLS_NAME
,
2692 usage
="server auth",
2696 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
2697 for vnf_profile
in get_vnf_profiles(nsd
):
2698 vnfd_id
= vnf_profile
["vnfd-id"]
2699 vnfd
= find_in_list(db_vnfds
, lambda a_vnf
: a_vnf
["id"] == vnfd_id
)
2700 member_vnf_index
= str(vnf_profile
["id"])
2701 db_vnfr
= db_vnfrs
[member_vnf_index
]
2702 base_folder
= vnfd
["_admin"]["storage"]
2709 # Get additional parameters
2710 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
2711 if db_vnfr
.get("additionalParamsForVnf"):
2712 deploy_params
.update(
2713 parse_yaml_strings(db_vnfr
["additionalParamsForVnf"].copy())
2716 descriptor_config
= get_configuration(vnfd
, vnfd
["id"])
2717 if descriptor_config
:
2719 logging_text
=logging_text
2720 + "member_vnf_index={} ".format(member_vnf_index
),
2723 nslcmop_id
=nslcmop_id
,
2729 member_vnf_index
=member_vnf_index
,
2730 vdu_index
=vdu_index
,
2731 kdu_index
=kdu_index
,
2733 deploy_params
=deploy_params
,
2734 descriptor_config
=descriptor_config
,
2735 base_folder
=base_folder
,
2736 task_instantiation_info
=tasks_dict_info
,
2740 # Deploy charms for each VDU that supports one.
2741 for vdud
in get_vdu_list(vnfd
):
2743 descriptor_config
= get_configuration(vnfd
, vdu_id
)
2744 vdur
= find_in_list(
2745 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
2748 if vdur
.get("additionalParams"):
2749 deploy_params_vdu
= parse_yaml_strings(vdur
["additionalParams"])
2751 deploy_params_vdu
= deploy_params
2752 deploy_params_vdu
["OSM"] = get_osm_params(
2753 db_vnfr
, vdu_id
, vdu_count_index
=0
2755 vdud_count
= get_number_of_instances(vnfd
, vdu_id
)
2757 self
.logger
.debug("VDUD > {}".format(vdud
))
2759 "Descriptor config > {}".format(descriptor_config
)
2761 if descriptor_config
:
2765 for vdu_index
in range(vdud_count
):
2766 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2768 logging_text
=logging_text
2769 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2770 member_vnf_index
, vdu_id
, vdu_index
2774 nslcmop_id
=nslcmop_id
,
2780 kdu_index
=kdu_index
,
2781 member_vnf_index
=member_vnf_index
,
2782 vdu_index
=vdu_index
,
2784 deploy_params
=deploy_params_vdu
,
2785 descriptor_config
=descriptor_config
,
2786 base_folder
=base_folder
,
2787 task_instantiation_info
=tasks_dict_info
,
2790 for kdud
in get_kdu_list(vnfd
):
2791 kdu_name
= kdud
["name"]
2792 descriptor_config
= get_configuration(vnfd
, kdu_name
)
2793 if descriptor_config
:
2797 kdu_index
, kdur
= next(
2799 for x
in enumerate(db_vnfr
["kdur"])
2800 if x
[1]["kdu-name"] == kdu_name
2802 deploy_params_kdu
= {"OSM": get_osm_params(db_vnfr
)}
2803 if kdur
.get("additionalParams"):
2804 deploy_params_kdu
.update(
2805 parse_yaml_strings(kdur
["additionalParams"].copy())
2809 logging_text
=logging_text
,
2812 nslcmop_id
=nslcmop_id
,
2818 member_vnf_index
=member_vnf_index
,
2819 vdu_index
=vdu_index
,
2820 kdu_index
=kdu_index
,
2822 deploy_params
=deploy_params_kdu
,
2823 descriptor_config
=descriptor_config
,
2824 base_folder
=base_folder
,
2825 task_instantiation_info
=tasks_dict_info
,
2829 # Check if each vnf has exporter for metric collection if so update prometheus job records
2830 if "exporters-endpoints" in vnfd
.get("df")[0]:
2831 exporter_config
= vnfd
.get("df")[0].get("exporters-endpoints")
2832 self
.logger
.debug("exporter config :{}".format(exporter_config
))
2833 artifact_path
= "{}/{}/{}".format(
2834 base_folder
["folder"],
2835 base_folder
["pkg-dir"],
2836 "exporter-endpoint",
2839 ee_config_descriptor
= exporter_config
2840 vnfr_id
= db_vnfr
["id"]
2841 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
2850 self
.logger
.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip
))
2851 self
.logger
.debug("Artifact_path:{}".format(artifact_path
))
2852 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
2853 vdu_id_for_prom
= None
2854 vdu_index_for_prom
= None
2855 for x
in get_iterable(db_vnfr
, "vdur"):
2856 vdu_id_for_prom
= x
.get("vdu-id-ref")
2857 vdu_index_for_prom
= x
.get("count-index")
2858 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
2860 artifact_path
=artifact_path
,
2861 ee_config_descriptor
=ee_config_descriptor
,
2864 target_ip
=rw_mgmt_ip
,
2866 vdu_id
=vdu_id_for_prom
,
2867 vdu_index
=vdu_index_for_prom
,
2870 self
.logger
.debug("Prometheus job:{}".format(prometheus_jobs
))
2872 db_nsr_update
["_admin.deployed.prometheus_jobs"] = prometheus_jobs
2879 for job
in prometheus_jobs
:
2882 {"job_name": job
["job_name"]},
2885 fail_on_empty
=False,
2888 # Check if this NS has a charm configuration
2889 descriptor_config
= nsd
.get("ns-configuration")
2890 if descriptor_config
and descriptor_config
.get("juju"):
2893 member_vnf_index
= None
2900 # Get additional parameters
2901 deploy_params
= {"OSM": {"vim_account_id": ns_params
["vimAccountId"]}}
2902 if db_nsr
.get("additionalParamsForNs"):
2903 deploy_params
.update(
2904 parse_yaml_strings(db_nsr
["additionalParamsForNs"].copy())
2906 base_folder
= nsd
["_admin"]["storage"]
2908 logging_text
=logging_text
,
2911 nslcmop_id
=nslcmop_id
,
2917 member_vnf_index
=member_vnf_index
,
2918 vdu_index
=vdu_index
,
2919 kdu_index
=kdu_index
,
2921 deploy_params
=deploy_params
,
2922 descriptor_config
=descriptor_config
,
2923 base_folder
=base_folder
,
2924 task_instantiation_info
=tasks_dict_info
,
2928 # rest of staff will be done at finally
2931 ROclient
.ROClientException
,
2937 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
)
2940 except asyncio
.CancelledError
:
2942 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
2944 exc
= "Operation was cancelled"
2945 except Exception as e
:
2946 exc
= traceback
.format_exc()
2947 self
.logger
.critical(
2948 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
2953 error_list
.append(str(exc
))
2955 # wait for pending tasks
2957 stage
[1] = "Waiting for instantiate pending tasks."
2958 self
.logger
.debug(logging_text
+ stage
[1])
2959 error_list
+= await self
._wait
_for
_tasks
(
2967 stage
[1] = stage
[2] = ""
2968 except asyncio
.CancelledError
:
2969 error_list
.append("Cancelled")
2970 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
2971 await self
._wait
_for
_tasks
(
2979 except Exception as exc
:
2980 error_list
.append(str(exc
))
2982 # update operation-status
2983 db_nsr_update
["operational-status"] = "running"
2984 # let's begin with VCA 'configured' status (later we can change it)
2985 db_nsr_update
["config-status"] = "configured"
2986 for task
, task_name
in tasks_dict_info
.items():
2987 if not task
.done() or task
.cancelled() or task
.exception():
2988 if task_name
.startswith(self
.task_name_deploy_vca
):
2989 # A N2VC task is pending
2990 db_nsr_update
["config-status"] = "failed"
2992 # RO or KDU task is pending
2993 db_nsr_update
["operational-status"] = "failed"
2995 # update status at database
2997 error_detail
= ". ".join(error_list
)
2998 self
.logger
.error(logging_text
+ error_detail
)
2999 error_description_nslcmop
= "{} Detail: {}".format(
3000 stage
[0], error_detail
3002 error_description_nsr
= "Operation: INSTANTIATING.{}, {}".format(
3003 nslcmop_id
, stage
[0]
3006 db_nsr_update
["detailed-status"] = (
3007 error_description_nsr
+ " Detail: " + error_detail
3009 db_nslcmop_update
["detailed-status"] = error_detail
3010 nslcmop_operation_state
= "FAILED"
3014 error_description_nsr
= error_description_nslcmop
= None
3016 db_nsr_update
["detailed-status"] = "Done"
3017 db_nslcmop_update
["detailed-status"] = "Done"
3018 nslcmop_operation_state
= "COMPLETED"
3019 # Gather auto-healing and auto-scaling alerts for each vnfr
3022 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
3024 (sub
for sub
in db_vnfds
if sub
["_id"] == vnfr
["vnfd-id"]), None
3026 healing_alerts
= self
._gather
_vnfr
_healing
_alerts
(vnfr
, vnfd
)
3027 for alert
in healing_alerts
:
3028 self
.logger
.info(f
"Storing healing alert in MongoDB: {alert}")
3029 self
.db
.create("alerts", alert
)
3031 scaling_alerts
= self
._gather
_vnfr
_scaling
_alerts
(vnfr
, vnfd
)
3032 for alert
in scaling_alerts
:
3033 self
.logger
.info(f
"Storing scaling alert in MongoDB: {alert}")
3034 self
.db
.create("alerts", alert
)
3036 alarm_alerts
= self
._gather
_vnfr
_alarm
_alerts
(vnfr
, vnfd
)
3037 for alert
in alarm_alerts
:
3038 self
.logger
.info(f
"Storing VNF alarm alert in MongoDB: {alert}")
3039 self
.db
.create("alerts", alert
)
3041 self
._write
_ns
_status
(
3044 current_operation
="IDLE",
3045 current_operation_id
=None,
3046 error_description
=error_description_nsr
,
3047 error_detail
=error_detail
,
3048 other_update
=db_nsr_update
,
3050 self
._write
_op
_status
(
3053 error_message
=error_description_nslcmop
,
3054 operation_state
=nslcmop_operation_state
,
3055 other_update
=db_nslcmop_update
,
3058 if nslcmop_operation_state
:
3060 await self
.msg
.aiowrite(
3065 "nslcmop_id": nslcmop_id
,
3066 "operationState": nslcmop_operation_state
,
3067 "startTime": db_nslcmop
["startTime"],
3068 "links": db_nslcmop
["links"],
3069 "operationParams": {
3070 "nsInstanceId": nsr_id
,
3071 "nsdId": db_nsr
["nsd-id"],
3075 except Exception as e
:
3077 logging_text
+ "kafka_write notification Exception {}".format(e
)
3080 self
.logger
.debug(logging_text
+ "Exit")
3081 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_instantiate")
3083 def _get_vnfd(self
, vnfd_id
: str, projects_read
: str, cached_vnfds
: Dict
[str, Any
]):
3084 if vnfd_id
not in cached_vnfds
:
3085 cached_vnfds
[vnfd_id
] = self
.db
.get_one(
3086 "vnfds", {"id": vnfd_id
, "_admin.projects_read": projects_read
}
3088 return cached_vnfds
[vnfd_id
]
3090 def _get_vnfr(self
, nsr_id
: str, vnf_profile_id
: str, cached_vnfrs
: Dict
[str, Any
]):
3091 if vnf_profile_id
not in cached_vnfrs
:
3092 cached_vnfrs
[vnf_profile_id
] = self
.db
.get_one(
3095 "member-vnf-index-ref": vnf_profile_id
,
3096 "nsr-id-ref": nsr_id
,
3099 return cached_vnfrs
[vnf_profile_id
]
3101 def _is_deployed_vca_in_relation(
3102 self
, vca
: DeployedVCA
, relation
: Relation
3105 for endpoint
in (relation
.provider
, relation
.requirer
):
3106 if endpoint
["kdu-resource-profile-id"]:
3109 vca
.vnf_profile_id
== endpoint
.vnf_profile_id
3110 and vca
.vdu_profile_id
== endpoint
.vdu_profile_id
3111 and vca
.execution_environment_ref
== endpoint
.execution_environment_ref
3117 def _update_ee_relation_data_with_implicit_data(
3118 self
, nsr_id
, nsd
, ee_relation_data
, cached_vnfds
, vnf_profile_id
: str = None
3120 ee_relation_data
= safe_get_ee_relation(
3121 nsr_id
, ee_relation_data
, vnf_profile_id
=vnf_profile_id
3123 ee_relation_level
= EELevel
.get_level(ee_relation_data
)
3124 if (ee_relation_level
in (EELevel
.VNF
, EELevel
.VDU
)) and not ee_relation_data
[
3125 "execution-environment-ref"
3127 vnf_profile
= get_vnf_profile(nsd
, ee_relation_data
["vnf-profile-id"])
3128 vnfd_id
= vnf_profile
["vnfd-id"]
3129 project
= nsd
["_admin"]["projects_read"][0]
3130 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3133 if ee_relation_level
== EELevel
.VNF
3134 else ee_relation_data
["vdu-profile-id"]
3136 ee
= get_juju_ee_ref(db_vnfd
, entity_id
)
3139 f
"not execution environments found for ee_relation {ee_relation_data}"
3141 ee_relation_data
["execution-environment-ref"] = ee
["id"]
3142 return ee_relation_data
3144 def _get_ns_relations(
3147 nsd
: Dict
[str, Any
],
3149 cached_vnfds
: Dict
[str, Any
],
3150 ) -> List
[Relation
]:
3152 db_ns_relations
= get_ns_configuration_relation_list(nsd
)
3153 for r
in db_ns_relations
:
3154 provider_dict
= None
3155 requirer_dict
= None
3156 if all(key
in r
for key
in ("provider", "requirer")):
3157 provider_dict
= r
["provider"]
3158 requirer_dict
= r
["requirer"]
3159 elif "entities" in r
:
3160 provider_id
= r
["entities"][0]["id"]
3163 "endpoint": r
["entities"][0]["endpoint"],
3165 if provider_id
!= nsd
["id"]:
3166 provider_dict
["vnf-profile-id"] = provider_id
3167 requirer_id
= r
["entities"][1]["id"]
3170 "endpoint": r
["entities"][1]["endpoint"],
3172 if requirer_id
!= nsd
["id"]:
3173 requirer_dict
["vnf-profile-id"] = requirer_id
3176 "provider/requirer or entities must be included in the relation."
3178 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3179 nsr_id
, nsd
, provider_dict
, cached_vnfds
3181 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3182 nsr_id
, nsd
, requirer_dict
, cached_vnfds
3184 provider
= EERelation(relation_provider
)
3185 requirer
= EERelation(relation_requirer
)
3186 relation
= Relation(r
["name"], provider
, requirer
)
3187 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3189 relations
.append(relation
)
3192 def _get_vnf_relations(
3195 nsd
: Dict
[str, Any
],
3197 cached_vnfds
: Dict
[str, Any
],
3198 ) -> List
[Relation
]:
3200 if vca
.target_element
== "ns":
3201 self
.logger
.debug("VCA is a NS charm, not a VNF.")
3203 vnf_profile
= get_vnf_profile(nsd
, vca
.vnf_profile_id
)
3204 vnf_profile_id
= vnf_profile
["id"]
3205 vnfd_id
= vnf_profile
["vnfd-id"]
3206 project
= nsd
["_admin"]["projects_read"][0]
3207 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3208 db_vnf_relations
= get_relation_list(db_vnfd
, vnfd_id
)
3209 for r
in db_vnf_relations
:
3210 provider_dict
= None
3211 requirer_dict
= None
3212 if all(key
in r
for key
in ("provider", "requirer")):
3213 provider_dict
= r
["provider"]
3214 requirer_dict
= r
["requirer"]
3215 elif "entities" in r
:
3216 provider_id
= r
["entities"][0]["id"]
3219 "vnf-profile-id": vnf_profile_id
,
3220 "endpoint": r
["entities"][0]["endpoint"],
3222 if provider_id
!= vnfd_id
:
3223 provider_dict
["vdu-profile-id"] = provider_id
3224 requirer_id
= r
["entities"][1]["id"]
3227 "vnf-profile-id": vnf_profile_id
,
3228 "endpoint": r
["entities"][1]["endpoint"],
3230 if requirer_id
!= vnfd_id
:
3231 requirer_dict
["vdu-profile-id"] = requirer_id
3234 "provider/requirer or entities must be included in the relation."
3236 relation_provider
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3237 nsr_id
, nsd
, provider_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3239 relation_requirer
= self
._update
_ee
_relation
_data
_with
_implicit
_data
(
3240 nsr_id
, nsd
, requirer_dict
, cached_vnfds
, vnf_profile_id
=vnf_profile_id
3242 provider
= EERelation(relation_provider
)
3243 requirer
= EERelation(relation_requirer
)
3244 relation
= Relation(r
["name"], provider
, requirer
)
3245 vca_in_relation
= self
._is
_deployed
_vca
_in
_relation
(vca
, relation
)
3247 relations
.append(relation
)
3250 def _get_kdu_resource_data(
3252 ee_relation
: EERelation
,
3253 db_nsr
: Dict
[str, Any
],
3254 cached_vnfds
: Dict
[str, Any
],
3255 ) -> DeployedK8sResource
:
3256 nsd
= get_nsd(db_nsr
)
3257 vnf_profiles
= get_vnf_profiles(nsd
)
3258 vnfd_id
= find_in_list(
3260 lambda vnf_profile
: vnf_profile
["id"] == ee_relation
.vnf_profile_id
,
3262 project
= nsd
["_admin"]["projects_read"][0]
3263 db_vnfd
= self
._get
_vnfd
(vnfd_id
, project
, cached_vnfds
)
3264 kdu_resource_profile
= get_kdu_resource_profile(
3265 db_vnfd
, ee_relation
.kdu_resource_profile_id
3267 kdu_name
= kdu_resource_profile
["kdu-name"]
3268 deployed_kdu
, _
= get_deployed_kdu(
3269 db_nsr
.get("_admin", ()).get("deployed", ()),
3271 ee_relation
.vnf_profile_id
,
3273 deployed_kdu
.update({"resource-name": kdu_resource_profile
["resource-name"]})
3276 def _get_deployed_component(
3278 ee_relation
: EERelation
,
3279 db_nsr
: Dict
[str, Any
],
3280 cached_vnfds
: Dict
[str, Any
],
3281 ) -> DeployedComponent
:
3282 nsr_id
= db_nsr
["_id"]
3283 deployed_component
= None
3284 ee_level
= EELevel
.get_level(ee_relation
)
3285 if ee_level
== EELevel
.NS
:
3286 vca
= get_deployed_vca(db_nsr
, {"vdu_id": None, "member-vnf-index": None})
3288 deployed_component
= DeployedVCA(nsr_id
, vca
)
3289 elif ee_level
== EELevel
.VNF
:
3290 vca
= get_deployed_vca(
3294 "member-vnf-index": ee_relation
.vnf_profile_id
,
3295 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3299 deployed_component
= DeployedVCA(nsr_id
, vca
)
3300 elif ee_level
== EELevel
.VDU
:
3301 vca
= get_deployed_vca(
3304 "vdu_id": ee_relation
.vdu_profile_id
,
3305 "member-vnf-index": ee_relation
.vnf_profile_id
,
3306 "ee_descriptor_id": ee_relation
.execution_environment_ref
,
3310 deployed_component
= DeployedVCA(nsr_id
, vca
)
3311 elif ee_level
== EELevel
.KDU
:
3312 kdu_resource_data
= self
._get
_kdu
_resource
_data
(
3313 ee_relation
, db_nsr
, cached_vnfds
3315 if kdu_resource_data
:
3316 deployed_component
= DeployedK8sResource(kdu_resource_data
)
3317 return deployed_component
3319 async def _add_relation(
3323 db_nsr
: Dict
[str, Any
],
3324 cached_vnfds
: Dict
[str, Any
],
3325 cached_vnfrs
: Dict
[str, Any
],
3327 deployed_provider
= self
._get
_deployed
_component
(
3328 relation
.provider
, db_nsr
, cached_vnfds
3330 deployed_requirer
= self
._get
_deployed
_component
(
3331 relation
.requirer
, db_nsr
, cached_vnfds
3335 and deployed_requirer
3336 and deployed_provider
.config_sw_installed
3337 and deployed_requirer
.config_sw_installed
3339 provider_db_vnfr
= (
3341 relation
.provider
.nsr_id
,
3342 relation
.provider
.vnf_profile_id
,
3345 if relation
.provider
.vnf_profile_id
3348 requirer_db_vnfr
= (
3350 relation
.requirer
.nsr_id
,
3351 relation
.requirer
.vnf_profile_id
,
3354 if relation
.requirer
.vnf_profile_id
3357 provider_vca_id
= self
.get_vca_id(provider_db_vnfr
, db_nsr
)
3358 requirer_vca_id
= self
.get_vca_id(requirer_db_vnfr
, db_nsr
)
3359 provider_relation_endpoint
= RelationEndpoint(
3360 deployed_provider
.ee_id
,
3362 relation
.provider
.endpoint
,
3364 requirer_relation_endpoint
= RelationEndpoint(
3365 deployed_requirer
.ee_id
,
3367 relation
.requirer
.endpoint
,
3370 await self
.vca_map
[vca_type
].add_relation(
3371 provider
=provider_relation_endpoint
,
3372 requirer
=requirer_relation_endpoint
,
3374 except N2VCException
as exception
:
3375 self
.logger
.error(exception
)
3376 raise LcmException(exception
)
3380 async def _add_vca_relations(
3386 timeout
: int = 3600,
3389 # 1. find all relations for this VCA
3390 # 2. wait for other peers related
3394 # STEP 1: find all relations for this VCA
3397 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3398 nsd
= get_nsd(db_nsr
)
3401 deployed_vca_dict
= get_deployed_vca_list(db_nsr
)[vca_index
]
3402 my_vca
= DeployedVCA(nsr_id
, deployed_vca_dict
)
3407 relations
.extend(self
._get
_ns
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3408 relations
.extend(self
._get
_vnf
_relations
(nsr_id
, nsd
, my_vca
, cached_vnfds
))
3410 # if no relations, terminate
3412 self
.logger
.debug(logging_text
+ " No relations")
3415 self
.logger
.debug(logging_text
+ " adding relations {}".format(relations
))
3422 if now
- start
>= timeout
:
3423 self
.logger
.error(logging_text
+ " : timeout adding relations")
3426 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3427 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
3429 # for each relation, find the VCA's related
3430 for relation
in relations
.copy():
3431 added
= await self
._add
_relation
(
3439 relations
.remove(relation
)
3442 self
.logger
.debug("Relations added")
3444 await asyncio
.sleep(5.0)
3448 except Exception as e
:
3449 self
.logger
.warn(logging_text
+ " ERROR adding relations: {}".format(e
))
3452 async def _install_kdu(
3460 k8s_instance_info
: dict,
3461 k8params
: dict = None,
3466 k8sclustertype
= k8s_instance_info
["k8scluster-type"]
3469 "collection": "nsrs",
3470 "filter": {"_id": nsr_id
},
3471 "path": nsr_db_path
,
3474 if k8s_instance_info
.get("kdu-deployment-name"):
3475 kdu_instance
= k8s_instance_info
.get("kdu-deployment-name")
3477 kdu_instance
= self
.k8scluster_map
[
3479 ].generate_kdu_instance_name(
3480 db_dict
=db_dict_install
,
3481 kdu_model
=k8s_instance_info
["kdu-model"],
3482 kdu_name
=k8s_instance_info
["kdu-name"],
3485 # Update the nsrs table with the kdu-instance value
3489 _desc
={nsr_db_path
+ ".kdu-instance": kdu_instance
},
3492 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3493 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3494 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3495 # namespace, this first verification could be removed, and the next step would be done for any kind
3497 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3498 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3499 if k8sclustertype
in ("juju", "juju-bundle"):
3500 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3501 # that the user passed a namespace which he wants its KDU to be deployed in)
3507 "_admin.projects_write": k8s_instance_info
["namespace"],
3508 "_admin.projects_read": k8s_instance_info
["namespace"],
3514 f
"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3519 _desc
={f
"{nsr_db_path}.namespace": kdu_instance
},
3521 k8s_instance_info
["namespace"] = kdu_instance
3523 await self
.k8scluster_map
[k8sclustertype
].install(
3524 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3525 kdu_model
=k8s_instance_info
["kdu-model"],
3528 db_dict
=db_dict_install
,
3530 kdu_name
=k8s_instance_info
["kdu-name"],
3531 namespace
=k8s_instance_info
["namespace"],
3532 kdu_instance
=kdu_instance
,
3536 # Obtain services to obtain management service ip
3537 services
= await self
.k8scluster_map
[k8sclustertype
].get_services(
3538 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3539 kdu_instance
=kdu_instance
,
3540 namespace
=k8s_instance_info
["namespace"],
3543 # Obtain management service info (if exists)
3544 vnfr_update_dict
= {}
3545 kdu_config
= get_configuration(vnfd
, kdud
["name"])
3547 target_ee_list
= kdu_config
.get("execution-environment-list", [])
3552 vnfr_update_dict
["kdur.{}.services".format(kdu_index
)] = services
3555 for service
in kdud
.get("service", [])
3556 if service
.get("mgmt-service")
3558 for mgmt_service
in mgmt_services
:
3559 for service
in services
:
3560 if service
["name"].startswith(mgmt_service
["name"]):
3561 # Mgmt service found, Obtain service ip
3562 ip
= service
.get("external_ip", service
.get("cluster_ip"))
3563 if isinstance(ip
, list) and len(ip
) == 1:
3567 "kdur.{}.ip-address".format(kdu_index
)
3570 # Check if must update also mgmt ip at the vnf
3571 service_external_cp
= mgmt_service
.get(
3572 "external-connection-point-ref"
3574 if service_external_cp
:
3576 deep_get(vnfd
, ("mgmt-interface", "cp"))
3577 == service_external_cp
3579 vnfr_update_dict
["ip-address"] = ip
3584 "external-connection-point-ref", ""
3586 == service_external_cp
,
3589 "kdur.{}.ip-address".format(kdu_index
)
3594 "Mgmt service name: {} not found".format(
3595 mgmt_service
["name"]
3599 vnfr_update_dict
["kdur.{}.status".format(kdu_index
)] = "READY"
3600 self
.update_db_2("vnfrs", vnfr_data
.get("_id"), vnfr_update_dict
)
3602 kdu_config
= get_configuration(vnfd
, k8s_instance_info
["kdu-name"])
3605 and kdu_config
.get("initial-config-primitive")
3606 and get_juju_ee_ref(vnfd
, k8s_instance_info
["kdu-name"]) is None
3608 initial_config_primitive_list
= kdu_config
.get(
3609 "initial-config-primitive"
3611 initial_config_primitive_list
.sort(key
=lambda val
: int(val
["seq"]))
3613 for initial_config_primitive
in initial_config_primitive_list
:
3614 primitive_params_
= self
._map
_primitive
_params
(
3615 initial_config_primitive
, {}, {}
3618 await asyncio
.wait_for(
3619 self
.k8scluster_map
[k8sclustertype
].exec_primitive(
3620 cluster_uuid
=k8s_instance_info
["k8scluster-uuid"],
3621 kdu_instance
=kdu_instance
,
3622 primitive_name
=initial_config_primitive
["name"],
3623 params
=primitive_params_
,
3624 db_dict
=db_dict_install
,
3630 except Exception as e
:
3631 # Prepare update db with error and raise exception
3634 "nsrs", nsr_id
, {nsr_db_path
+ ".detailed-status": str(e
)}
3638 vnfr_data
.get("_id"),
3639 {"kdur.{}.status".format(kdu_index
): "ERROR"},
3641 except Exception as error
:
3642 # ignore to keep original exception
3643 self
.logger
.warning(
3644 f
"An exception occurred while updating DB: {str(error)}"
3646 # reraise original error
3651 async def deploy_kdus(
3658 task_instantiation_info
,
3660 # Launch kdus if present in the descriptor
3662 k8scluster_id_2_uuic
= {
3663 "helm-chart-v3": {},
3667 async def _get_cluster_id(cluster_id
, cluster_type
):
3668 nonlocal k8scluster_id_2_uuic
3669 if cluster_id
in k8scluster_id_2_uuic
[cluster_type
]:
3670 return k8scluster_id_2_uuic
[cluster_type
][cluster_id
]
3672 # check if K8scluster is creating and wait look if previous tasks in process
3673 task_name
, task_dependency
= self
.lcm_tasks
.lookfor_related(
3674 "k8scluster", cluster_id
3677 text
= "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3678 task_name
, cluster_id
3680 self
.logger
.debug(logging_text
+ text
)
3681 await asyncio
.wait(task_dependency
, timeout
=3600)
3683 db_k8scluster
= self
.db
.get_one(
3684 "k8sclusters", {"_id": cluster_id
}, fail_on_empty
=False
3686 if not db_k8scluster
:
3687 raise LcmException("K8s cluster {} cannot be found".format(cluster_id
))
3689 k8s_id
= deep_get(db_k8scluster
, ("_admin", cluster_type
, "id"))
3691 if cluster_type
== "helm-chart-v3":
3693 # backward compatibility for existing clusters that have not been initialized for helm v3
3694 k8s_credentials
= yaml
.safe_dump(
3695 db_k8scluster
.get("credentials")
3697 k8s_id
, uninstall_sw
= await self
.k8sclusterhelm3
.init_env(
3698 k8s_credentials
, reuse_cluster_uuid
=cluster_id
3700 db_k8scluster_update
= {}
3701 db_k8scluster_update
["_admin.helm-chart-v3.error_msg"] = None
3702 db_k8scluster_update
["_admin.helm-chart-v3.id"] = k8s_id
3703 db_k8scluster_update
[
3704 "_admin.helm-chart-v3.created"
3706 db_k8scluster_update
[
3707 "_admin.helm-chart-v3.operationalState"
3710 "k8sclusters", cluster_id
, db_k8scluster_update
3712 except Exception as e
:
3715 + "error initializing helm-v3 cluster: {}".format(str(e
))
3718 "K8s cluster '{}' has not been initialized for '{}'".format(
3719 cluster_id
, cluster_type
3724 "K8s cluster '{}' has not been initialized for '{}'".format(
3725 cluster_id
, cluster_type
3728 k8scluster_id_2_uuic
[cluster_type
][cluster_id
] = k8s_id
3731 logging_text
+= "Deploy kdus: "
3734 db_nsr_update
= {"_admin.deployed.K8s": []}
3735 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3738 updated_cluster_list
= []
3739 updated_v3_cluster_list
= []
3741 for vnfr_data
in db_vnfrs
.values():
3742 vca_id
= self
.get_vca_id(vnfr_data
, {})
3743 for kdu_index
, kdur
in enumerate(get_iterable(vnfr_data
, "kdur")):
3744 # Step 0: Prepare and set parameters
3745 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
3746 vnfd_id
= vnfr_data
.get("vnfd-id")
3747 vnfd_with_id
= find_in_list(
3748 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3752 for kdud
in vnfd_with_id
["kdu"]
3753 if kdud
["name"] == kdur
["kdu-name"]
3755 namespace
= kdur
.get("k8s-namespace")
3756 kdu_deployment_name
= kdur
.get("kdu-deployment-name")
3757 if kdur
.get("helm-chart"):
3758 kdumodel
= kdur
["helm-chart"]
3759 # Default version: helm3, if helm-version is v2 assign v2
3760 k8sclustertype
= "helm-chart-v3"
3761 self
.logger
.debug("kdur: {}".format(kdur
))
3762 elif kdur
.get("juju-bundle"):
3763 kdumodel
= kdur
["juju-bundle"]
3764 k8sclustertype
= "juju-bundle"
3767 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3768 "juju-bundle. Maybe an old NBI version is running".format(
3769 vnfr_data
["member-vnf-index-ref"], kdur
["kdu-name"]
3772 # check if kdumodel is a file and exists
3774 vnfd_with_id
= find_in_list(
3775 db_vnfds
, lambda vnfd
: vnfd
["_id"] == vnfd_id
3777 storage
= deep_get(vnfd_with_id
, ("_admin", "storage"))
3778 if storage
: # may be not present if vnfd has not artifacts
3779 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3780 if storage
["pkg-dir"]:
3781 filename
= "{}/{}/{}s/{}".format(
3788 filename
= "{}/Scripts/{}s/{}".format(
3793 if self
.fs
.file_exists(
3794 filename
, mode
="file"
3795 ) or self
.fs
.file_exists(filename
, mode
="dir"):
3796 kdumodel
= self
.fs
.path
+ filename
3797 except (asyncio
.TimeoutError
, asyncio
.CancelledError
):
3799 except Exception as e
: # it is not a file
3800 self
.logger
.warning(f
"An exception occurred: {str(e)}")
3802 k8s_cluster_id
= kdur
["k8s-cluster"]["id"]
3803 step
= "Synchronize repos for k8s cluster '{}'".format(
3806 cluster_uuid
= await _get_cluster_id(k8s_cluster_id
, k8sclustertype
)
3810 k8sclustertype
== "helm-chart"
3811 and cluster_uuid
not in updated_cluster_list
3813 k8sclustertype
== "helm-chart-v3"
3814 and cluster_uuid
not in updated_v3_cluster_list
3816 del_repo_list
, added_repo_dict
= await asyncio
.ensure_future(
3817 self
.k8scluster_map
[k8sclustertype
].synchronize_repos(
3818 cluster_uuid
=cluster_uuid
3821 if del_repo_list
or added_repo_dict
:
3822 if k8sclustertype
== "helm-chart":
3824 "_admin.helm_charts_added." + item
: None
3825 for item
in del_repo_list
3828 "_admin.helm_charts_added." + item
: name
3829 for item
, name
in added_repo_dict
.items()
3831 updated_cluster_list
.append(cluster_uuid
)
3832 elif k8sclustertype
== "helm-chart-v3":
3834 "_admin.helm_charts_v3_added." + item
: None
3835 for item
in del_repo_list
3838 "_admin.helm_charts_v3_added." + item
: name
3839 for item
, name
in added_repo_dict
.items()
3841 updated_v3_cluster_list
.append(cluster_uuid
)
3843 logging_text
+ "repos synchronized on k8s cluster "
3844 "'{}' to_delete: {}, to_add: {}".format(
3845 k8s_cluster_id
, del_repo_list
, added_repo_dict
3850 {"_id": k8s_cluster_id
},
3856 step
= "Instantiating KDU {}.{} in k8s cluster {}".format(
3857 vnfr_data
["member-vnf-index-ref"],
3861 k8s_instance_info
= {
3862 "kdu-instance": None,
3863 "k8scluster-uuid": cluster_uuid
,
3864 "k8scluster-type": k8sclustertype
,
3865 "member-vnf-index": vnfr_data
["member-vnf-index-ref"],
3866 "kdu-name": kdur
["kdu-name"],
3867 "kdu-model": kdumodel
,
3868 "namespace": namespace
,
3869 "kdu-deployment-name": kdu_deployment_name
,
3871 db_path
= "_admin.deployed.K8s.{}".format(index
)
3872 db_nsr_update
[db_path
] = k8s_instance_info
3873 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3874 vnfd_with_id
= find_in_list(
3875 db_vnfds
, lambda vnf
: vnf
["_id"] == vnfd_id
3877 task
= asyncio
.ensure_future(
3886 k8params
=desc_params
,
3891 self
.lcm_tasks
.register(
3895 "instantiate_KDU-{}".format(index
),
3898 task_instantiation_info
[task
] = "Deploying KDU {}".format(
3904 except (LcmException
, asyncio
.CancelledError
):
3906 except Exception as e
:
3907 msg
= "Exception {} while {}: {}".format(type(e
).__name
__, step
, e
)
3908 if isinstance(e
, (N2VCException
, DbException
)):
3909 self
.logger
.error(logging_text
+ msg
)
3911 self
.logger
.critical(logging_text
+ msg
, exc_info
=True)
3912 raise LcmException(msg
)
3915 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
3935 task_instantiation_info
,
3938 # launch instantiate_N2VC in a asyncio task and register task object
3939 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3940 # if not found, create one entry and update database
3941 # fill db_nsr._admin.deployed.VCA.<index>
3944 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
3948 get_charm_name
= False
3949 if "execution-environment-list" in descriptor_config
:
3950 ee_list
= descriptor_config
.get("execution-environment-list", [])
3951 elif "juju" in descriptor_config
:
3952 ee_list
= [descriptor_config
] # ns charms
3953 if "execution-environment-list" not in descriptor_config
:
3954 # charm name is only required for ns charms
3955 get_charm_name
= True
3956 else: # other types as script are not supported
3959 for ee_item
in ee_list
:
3962 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3963 ee_item
.get("juju"), ee_item
.get("helm-chart")
3966 ee_descriptor_id
= ee_item
.get("id")
3967 if ee_item
.get("juju"):
3968 vca_name
= ee_item
["juju"].get("charm")
3970 charm_name
= self
.find_charm_name(db_nsr
, str(vca_name
))
3973 if ee_item
["juju"].get("charm") is not None
3976 if ee_item
["juju"].get("cloud") == "k8s":
3977 vca_type
= "k8s_proxy_charm"
3978 elif ee_item
["juju"].get("proxy") is False:
3979 vca_type
= "native_charm"
3980 elif ee_item
.get("helm-chart"):
3981 vca_name
= ee_item
["helm-chart"]
3982 vca_type
= "helm-v3"
3985 logging_text
+ "skipping non juju neither charm configuration"
3990 for vca_index
, vca_deployed
in enumerate(
3991 db_nsr
["_admin"]["deployed"]["VCA"]
3993 if not vca_deployed
:
3996 vca_deployed
.get("member-vnf-index") == member_vnf_index
3997 and vca_deployed
.get("vdu_id") == vdu_id
3998 and vca_deployed
.get("kdu_name") == kdu_name
3999 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
4000 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
4004 # not found, create one.
4006 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
4009 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
4011 target
+= "/kdu/{}".format(kdu_name
)
4013 "target_element": target
,
4014 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
4015 "member-vnf-index": member_vnf_index
,
4017 "kdu_name": kdu_name
,
4018 "vdu_count_index": vdu_index
,
4019 "operational-status": "init", # TODO revise
4020 "detailed-status": "", # TODO revise
4021 "step": "initial-deploy", # TODO revise
4023 "vdu_name": vdu_name
,
4025 "ee_descriptor_id": ee_descriptor_id
,
4026 "charm_name": charm_name
,
4030 # create VCA and configurationStatus in db
4032 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
4033 "configurationStatus.{}".format(vca_index
): dict(),
4035 self
.update_db_2("nsrs", nsr_id
, db_dict
)
4037 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
4039 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
4040 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
4041 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
4044 task_n2vc
= asyncio
.ensure_future(
4045 self
.instantiate_N2VC(
4046 logging_text
=logging_text
,
4047 vca_index
=vca_index
,
4053 vdu_index
=vdu_index
,
4054 kdu_index
=kdu_index
,
4055 deploy_params
=deploy_params
,
4056 config_descriptor
=descriptor_config
,
4057 base_folder
=base_folder
,
4058 nslcmop_id
=nslcmop_id
,
4062 ee_config_descriptor
=ee_item
,
4065 self
.lcm_tasks
.register(
4069 "instantiate_N2VC-{}".format(vca_index
),
4072 task_instantiation_info
[
4074 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
4075 member_vnf_index
or "", vdu_id
or ""
4078 def _format_additional_params(self
, params
):
4079 params
= params
or {}
4080 for key
, value
in params
.items():
4081 if str(value
).startswith("!!yaml "):
4082 params
[key
] = yaml
.safe_load(value
[7:])
4085 def _get_terminate_primitive_params(self
, seq
, vnf_index
):
4086 primitive
= seq
.get("name")
4087 primitive_params
= {}
4089 "member_vnf_index": vnf_index
,
4090 "primitive": primitive
,
4091 "primitive_params": primitive_params
,
4094 return self
._map
_primitive
_params
(seq
, params
, desc_params
)
4098 def _retry_or_skip_suboperation(self
, db_nslcmop
, op_index
):
4099 op
= deep_get(db_nslcmop
, ("_admin", "operations"), [])[op_index
]
4100 if op
.get("operationState") == "COMPLETED":
4101 # b. Skip sub-operation
4102 # _ns_execute_primitive() or RO.create_action() will NOT be executed
4103 return self
.SUBOPERATION_STATUS_SKIP
4105 # c. retry executing sub-operation
4106 # The sub-operation exists, and operationState != 'COMPLETED'
4107 # Update operationState = 'PROCESSING' to indicate a retry.
4108 operationState
= "PROCESSING"
4109 detailed_status
= "In progress"
4110 self
._update
_suboperation
_status
(
4111 db_nslcmop
, op_index
, operationState
, detailed_status
4113 # Return the sub-operation index
4114 # _ns_execute_primitive() or RO.create_action() will be called from scale()
4115 # with arguments extracted from the sub-operation
4118 # Find a sub-operation where all keys in a matching dictionary must match
4119 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
4120 def _find_suboperation(self
, db_nslcmop
, match
):
4121 if db_nslcmop
and match
:
4122 op_list
= db_nslcmop
.get("_admin", {}).get("operations", [])
4123 for i
, op
in enumerate(op_list
):
4124 if all(op
.get(k
) == match
[k
] for k
in match
):
4126 return self
.SUBOPERATION_STATUS_NOT_FOUND
4128 # Update status for a sub-operation given its index
4129 def _update_suboperation_status(
4130 self
, db_nslcmop
, op_index
, operationState
, detailed_status
4132 # Update DB for HA tasks
4133 q_filter
= {"_id": db_nslcmop
["_id"]}
4135 "_admin.operations.{}.operationState".format(op_index
): operationState
,
4136 "_admin.operations.{}.detailed-status".format(op_index
): detailed_status
,
4139 "nslcmops", q_filter
=q_filter
, update_dict
=update_dict
, fail_on_empty
=False
4142 # Add sub-operation, return the index of the added sub-operation
4143 # Optionally, set operationState, detailed-status, and operationType
4144 # Status and type are currently set for 'scale' sub-operations:
4145 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
4146 # 'detailed-status' : status message
4147 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
4148 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
4149 def _add_suboperation(
4157 mapped_primitive_params
,
4158 operationState
=None,
4159 detailed_status
=None,
4162 RO_scaling_info
=None,
4165 return self
.SUBOPERATION_STATUS_NOT_FOUND
4166 # Get the "_admin.operations" list, if it exists
4167 db_nslcmop_admin
= db_nslcmop
.get("_admin", {})
4168 op_list
= db_nslcmop_admin
.get("operations")
4169 # Create or append to the "_admin.operations" list
4171 "member_vnf_index": vnf_index
,
4173 "vdu_count_index": vdu_count_index
,
4174 "primitive": primitive
,
4175 "primitive_params": mapped_primitive_params
,
4178 new_op
["operationState"] = operationState
4180 new_op
["detailed-status"] = detailed_status
4182 new_op
["lcmOperationType"] = operationType
4184 new_op
["RO_nsr_id"] = RO_nsr_id
4186 new_op
["RO_scaling_info"] = RO_scaling_info
4188 # No existing operations, create key 'operations' with current operation as first list element
4189 db_nslcmop_admin
.update({"operations": [new_op
]})
4190 op_list
= db_nslcmop_admin
.get("operations")
4192 # Existing operations, append operation to list
4193 op_list
.append(new_op
)
4195 db_nslcmop_update
= {"_admin.operations": op_list
}
4196 self
.update_db_2("nslcmops", db_nslcmop
["_id"], db_nslcmop_update
)
4197 op_index
= len(op_list
) - 1
4200 # Helper methods for scale() sub-operations
4202 # pre-scale/post-scale:
4203 # Check for 3 different cases:
4204 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4205 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4206 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4207 def _check_or_add_scale_suboperation(
4211 vnf_config_primitive
,
4215 RO_scaling_info
=None,
4217 # Find this sub-operation
4218 if RO_nsr_id
and RO_scaling_info
:
4219 operationType
= "SCALE-RO"
4221 "member_vnf_index": vnf_index
,
4222 "RO_nsr_id": RO_nsr_id
,
4223 "RO_scaling_info": RO_scaling_info
,
4227 "member_vnf_index": vnf_index
,
4228 "primitive": vnf_config_primitive
,
4229 "primitive_params": primitive_params
,
4230 "lcmOperationType": operationType
,
4232 op_index
= self
._find
_suboperation
(db_nslcmop
, match
)
4233 if op_index
== self
.SUBOPERATION_STATUS_NOT_FOUND
:
4234 # a. New sub-operation
4235 # The sub-operation does not exist, add it.
4236 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4237 # The following parameters are set to None for all kind of scaling:
4239 vdu_count_index
= None
4241 if RO_nsr_id
and RO_scaling_info
:
4242 vnf_config_primitive
= None
4243 primitive_params
= None
4246 RO_scaling_info
= None
4247 # Initial status for sub-operation
4248 operationState
= "PROCESSING"
4249 detailed_status
= "In progress"
4250 # Add sub-operation for pre/post-scaling (zero or more operations)
4251 self
._add
_suboperation
(
4257 vnf_config_primitive
,
4265 return self
.SUBOPERATION_STATUS_NEW
4267 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4268 # or op_index (operationState != 'COMPLETED')
4269 return self
._retry
_or
_skip
_suboperation
(db_nslcmop
, op_index
)
4271 # Function to return execution_environment id
4273 async def destroy_N2VC(
4281 exec_primitives
=True,
4286 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4287 :param logging_text:
4289 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4290 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4291 :param vca_index: index in the database _admin.deployed.VCA
4292 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4293 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4294 not executed properly
4295 :param scaling_in: True destroys the application, False destroys the model
4296 :return: None or exception
4301 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4302 vca_index
, vca_deployed
, config_descriptor
, destroy_ee
4306 vca_type
= vca_deployed
.get("type", "lxc_proxy_charm")
4308 # execute terminate_primitives
4310 terminate_primitives
= get_ee_sorted_terminate_config_primitive_list(
4311 config_descriptor
.get("terminate-config-primitive"),
4312 vca_deployed
.get("ee_descriptor_id"),
4314 vdu_id
= vca_deployed
.get("vdu_id")
4315 vdu_count_index
= vca_deployed
.get("vdu_count_index")
4316 vdu_name
= vca_deployed
.get("vdu_name")
4317 vnf_index
= vca_deployed
.get("member-vnf-index")
4318 if terminate_primitives
and vca_deployed
.get("needed_terminate"):
4319 for seq
in terminate_primitives
:
4320 # For each sequence in list, get primitive and call _ns_execute_primitive()
4321 step
= "Calling terminate action for vnf_member_index={} primitive={}".format(
4322 vnf_index
, seq
.get("name")
4324 self
.logger
.debug(logging_text
+ step
)
4325 # Create the primitive for each sequence, i.e. "primitive": "touch"
4326 primitive
= seq
.get("name")
4327 mapped_primitive_params
= self
._get
_terminate
_primitive
_params
(
4332 self
._add
_suboperation
(
4339 mapped_primitive_params
,
4341 # Sub-operations: Call _ns_execute_primitive() instead of action()
4343 result
, result_detail
= await self
._ns
_execute
_primitive
(
4344 vca_deployed
["ee_id"],
4346 mapped_primitive_params
,
4350 except LcmException
:
4351 # this happens when VCA is not deployed. In this case it is not needed to terminate
4353 result_ok
= ["COMPLETED", "PARTIALLY_COMPLETED"]
4354 if result
not in result_ok
:
4356 "terminate_primitive {} for vnf_member_index={} fails with "
4357 "error {}".format(seq
.get("name"), vnf_index
, result_detail
)
4359 # set that this VCA do not need terminated
4360 db_update_entry
= "_admin.deployed.VCA.{}.needed_terminate".format(
4364 "nsrs", db_nslcmop
["nsInstanceId"], {db_update_entry
: False}
4367 # Delete Prometheus Jobs if any
4368 # This uses NSR_ID, so it will destroy any jobs under this index
4369 self
.db
.del_list("prometheus_jobs", {"nsr_id": db_nslcmop
["nsInstanceId"]})
4372 await self
.vca_map
[vca_type
].delete_execution_environment(
4373 vca_deployed
["ee_id"],
4374 scaling_in
=scaling_in
,
4379 async def _delete_all_N2VC(self
, db_nsr
: dict, vca_id
: str = None):
4380 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="TERMINATING")
4381 namespace
= "." + db_nsr
["_id"]
4383 await self
.n2vc
.delete_namespace(
4384 namespace
=namespace
,
4385 total_timeout
=self
.timeout
.charm_delete
,
4388 except N2VCNotFound
: # already deleted. Skip
4390 self
._write
_all
_config
_status
(db_nsr
=db_nsr
, status
="DELETED")
4392 async def terminate(self
, nsr_id
, nslcmop_id
):
4393 # Try to lock HA task here
4394 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
4395 if not task_is_locked_by_me
:
4398 logging_text
= "Task ns={} terminate={} ".format(nsr_id
, nslcmop_id
)
4399 self
.logger
.debug(logging_text
+ "Enter")
4400 timeout_ns_terminate
= self
.timeout
.ns_terminate
4403 operation_params
= None
4405 error_list
= [] # annotates all failed error messages
4406 db_nslcmop_update
= {}
4407 autoremove
= False # autoremove after terminated
4408 tasks_dict_info
= {}
4411 "Stage 1/3: Preparing task.",
4412 "Waiting for previous operations to terminate.",
4415 # ^ contains [stage, step, VIM-status]
4417 # wait for any previous tasks in process
4418 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
4420 stage
[1] = "Getting nslcmop={} from db.".format(nslcmop_id
)
4421 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
4422 operation_params
= db_nslcmop
.get("operationParams") or {}
4423 if operation_params
.get("timeout_ns_terminate"):
4424 timeout_ns_terminate
= operation_params
["timeout_ns_terminate"]
4425 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
4426 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
4428 db_nsr_update
["operational-status"] = "terminating"
4429 db_nsr_update
["config-status"] = "terminating"
4430 self
._write
_ns
_status
(
4432 ns_state
="TERMINATING",
4433 current_operation
="TERMINATING",
4434 current_operation_id
=nslcmop_id
,
4435 other_update
=db_nsr_update
,
4437 self
._write
_op
_status
(op_id
=nslcmop_id
, queuePosition
=0, stage
=stage
)
4438 nsr_deployed
= deepcopy(db_nsr
["_admin"].get("deployed")) or {}
4439 if db_nsr
["_admin"]["nsState"] == "NOT_INSTANTIATED":
4442 stage
[1] = "Getting vnf descriptors from db."
4443 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
4445 db_vnfr
["member-vnf-index-ref"]: db_vnfr
for db_vnfr
in db_vnfrs_list
4447 db_vnfds_from_id
= {}
4448 db_vnfds_from_member_index
= {}
4450 for vnfr
in db_vnfrs_list
:
4451 vnfd_id
= vnfr
["vnfd-id"]
4452 if vnfd_id
not in db_vnfds_from_id
:
4453 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
4454 db_vnfds_from_id
[vnfd_id
] = vnfd
4455 db_vnfds_from_member_index
[
4456 vnfr
["member-vnf-index-ref"]
4457 ] = db_vnfds_from_id
[vnfd_id
]
4459 # Destroy individual execution environments when there are terminating primitives.
4460 # Rest of EE will be deleted at once
4461 # TODO - check before calling _destroy_N2VC
4462 # if not operation_params.get("skip_terminate_primitives"):#
4463 # or not vca.get("needed_terminate"):
4464 stage
[0] = "Stage 2/3 execute terminating primitives."
4465 self
.logger
.debug(logging_text
+ stage
[0])
4466 stage
[1] = "Looking execution environment that needs terminate."
4467 self
.logger
.debug(logging_text
+ stage
[1])
4469 for vca_index
, vca
in enumerate(get_iterable(nsr_deployed
, "VCA")):
4470 config_descriptor
= None
4471 vca_member_vnf_index
= vca
.get("member-vnf-index")
4472 vca_id
= self
.get_vca_id(
4473 db_vnfrs_dict
.get(vca_member_vnf_index
)
4474 if vca_member_vnf_index
4478 if not vca
or not vca
.get("ee_id"):
4480 if not vca
.get("member-vnf-index"):
4482 config_descriptor
= db_nsr
.get("ns-configuration")
4483 elif vca
.get("vdu_id"):
4484 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4485 config_descriptor
= get_configuration(db_vnfd
, vca
.get("vdu_id"))
4486 elif vca
.get("kdu_name"):
4487 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4488 config_descriptor
= get_configuration(db_vnfd
, vca
.get("kdu_name"))
4490 db_vnfd
= db_vnfds_from_member_index
[vca
["member-vnf-index"]]
4491 config_descriptor
= get_configuration(db_vnfd
, db_vnfd
["id"])
4492 vca_type
= vca
.get("type")
4493 exec_terminate_primitives
= not operation_params
.get(
4494 "skip_terminate_primitives"
4495 ) and vca
.get("needed_terminate")
4496 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4497 # pending native charms
4498 destroy_ee
= True if vca_type
in ("helm-v3", "native_charm") else False
4499 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4500 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4501 task
= asyncio
.ensure_future(
4509 exec_terminate_primitives
,
4513 tasks_dict_info
[task
] = "Terminating VCA {}".format(vca
.get("ee_id"))
4515 # wait for pending tasks of terminate primitives
4519 + "Waiting for tasks {}".format(list(tasks_dict_info
.keys()))
4521 error_list
= await self
._wait
_for
_tasks
(
4524 min(self
.timeout
.charm_delete
, timeout_ns_terminate
),
4528 tasks_dict_info
.clear()
4530 return # raise LcmException("; ".join(error_list))
4532 # remove All execution environments at once
4533 stage
[0] = "Stage 3/3 delete all."
4535 if nsr_deployed
.get("VCA"):
4536 stage
[1] = "Deleting all execution environments."
4537 self
.logger
.debug(logging_text
+ stage
[1])
4538 vca_id
= self
.get_vca_id({}, db_nsr
)
4539 task_delete_ee
= asyncio
.ensure_future(
4541 self
._delete
_all
_N
2VC
(db_nsr
=db_nsr
, vca_id
=vca_id
),
4542 timeout
=self
.timeout
.charm_delete
,
4545 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4546 tasks_dict_info
[task_delete_ee
] = "Terminating all VCA"
4548 # Delete Namespace and Certificates if necessary
4549 if check_helm_ee_in_ns(list(db_vnfds_from_member_index
.values())):
4550 await self
.vca_map
["helm-v3"].delete_tls_certificate(
4551 namespace
=db_nslcmop
["nsInstanceId"],
4552 certificate_name
=self
.EE_TLS_NAME
,
4554 await self
.vca_map
["helm-v3"].delete_namespace(
4555 namespace
=db_nslcmop
["nsInstanceId"],
4558 # Delete from k8scluster
4559 stage
[1] = "Deleting KDUs."
4560 self
.logger
.debug(logging_text
+ stage
[1])
4561 # print(nsr_deployed)
4562 for kdu
in get_iterable(nsr_deployed
, "K8s"):
4563 if not kdu
or not kdu
.get("kdu-instance"):
4565 kdu_instance
= kdu
.get("kdu-instance")
4566 if kdu
.get("k8scluster-type") in self
.k8scluster_map
:
4567 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4568 vca_id
= self
.get_vca_id({}, db_nsr
)
4569 task_delete_kdu_instance
= asyncio
.ensure_future(
4570 self
.k8scluster_map
[kdu
["k8scluster-type"]].uninstall(
4571 cluster_uuid
=kdu
.get("k8scluster-uuid"),
4572 kdu_instance
=kdu_instance
,
4574 namespace
=kdu
.get("namespace"),
4580 + "Unknown k8s deployment type {}".format(
4581 kdu
.get("k8scluster-type")
4586 task_delete_kdu_instance
4587 ] = "Terminating KDU '{}'".format(kdu
.get("kdu-name"))
4590 stage
[1] = "Deleting ns from VIM."
4591 if self
.ro_config
.ng
:
4592 task_delete_ro
= asyncio
.ensure_future(
4593 self
._terminate
_ng
_ro
(
4594 logging_text
, nsr_deployed
, nsr_id
, nslcmop_id
, stage
4597 tasks_dict_info
[task_delete_ro
] = "Removing deployment from VIM"
4599 # rest of staff will be done at finally
4602 ROclient
.ROClientException
,
4607 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
4609 except asyncio
.CancelledError
:
4611 logging_text
+ "Cancelled Exception while '{}'".format(stage
[1])
4613 exc
= "Operation was cancelled"
4614 except Exception as e
:
4615 exc
= traceback
.format_exc()
4616 self
.logger
.critical(
4617 logging_text
+ "Exit Exception while '{}': {}".format(stage
[1], e
),
4622 error_list
.append(str(exc
))
4624 # wait for pending tasks
4626 stage
[1] = "Waiting for terminate pending tasks."
4627 self
.logger
.debug(logging_text
+ stage
[1])
4628 error_list
+= await self
._wait
_for
_tasks
(
4631 timeout_ns_terminate
,
4635 stage
[1] = stage
[2] = ""
4636 except asyncio
.CancelledError
:
4637 error_list
.append("Cancelled")
4638 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
4639 await self
._wait
_for
_tasks
(
4642 timeout_ns_terminate
,
4646 except Exception as exc
:
4647 error_list
.append(str(exc
))
4648 # update status at database
4650 error_detail
= "; ".join(error_list
)
4651 # self.logger.error(logging_text + error_detail)
4652 error_description_nslcmop
= "{} Detail: {}".format(
4653 stage
[0], error_detail
4655 error_description_nsr
= "Operation: TERMINATING.{}, {}.".format(
4656 nslcmop_id
, stage
[0]
4659 db_nsr_update
["operational-status"] = "failed"
4660 db_nsr_update
["detailed-status"] = (
4661 error_description_nsr
+ " Detail: " + error_detail
4663 db_nslcmop_update
["detailed-status"] = error_detail
4664 nslcmop_operation_state
= "FAILED"
4668 error_description_nsr
= error_description_nslcmop
= None
4669 ns_state
= "NOT_INSTANTIATED"
4670 db_nsr_update
["operational-status"] = "terminated"
4671 db_nsr_update
["detailed-status"] = "Done"
4672 db_nsr_update
["_admin.nsState"] = "NOT_INSTANTIATED"
4673 db_nslcmop_update
["detailed-status"] = "Done"
4674 nslcmop_operation_state
= "COMPLETED"
4677 self
._write
_ns
_status
(
4680 current_operation
="IDLE",
4681 current_operation_id
=None,
4682 error_description
=error_description_nsr
,
4683 error_detail
=error_detail
,
4684 other_update
=db_nsr_update
,
4686 self
._write
_op
_status
(
4689 error_message
=error_description_nslcmop
,
4690 operation_state
=nslcmop_operation_state
,
4691 other_update
=db_nslcmop_update
,
4693 if ns_state
== "NOT_INSTANTIATED":
4697 {"nsr-id-ref": nsr_id
},
4698 {"_admin.nsState": "NOT_INSTANTIATED"},
4700 except DbException
as e
:
4703 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4707 if operation_params
:
4708 autoremove
= operation_params
.get("autoremove", False)
4709 if nslcmop_operation_state
:
4711 await self
.msg
.aiowrite(
4716 "nslcmop_id": nslcmop_id
,
4717 "operationState": nslcmop_operation_state
,
4718 "autoremove": autoremove
,
4721 except Exception as e
:
4723 logging_text
+ "kafka_write notification Exception {}".format(e
)
4725 self
.logger
.debug(f
"Deleting alerts: ns_id={nsr_id}")
4726 self
.db
.del_list("alerts", {"tags.ns_id": nsr_id
})
4728 self
.logger
.debug(logging_text
+ "Exit")
4729 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_terminate")
4731 async def _wait_for_tasks(
4732 self
, logging_text
, created_tasks_info
, timeout
, stage
, nslcmop_id
, nsr_id
=None
4735 error_detail_list
= []
4737 pending_tasks
= list(created_tasks_info
.keys())
4738 num_tasks
= len(pending_tasks
)
4740 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4741 self
._write
_op
_status
(nslcmop_id
, stage
)
4742 while pending_tasks
:
4744 _timeout
= timeout
+ time_start
- time()
4745 done
, pending_tasks
= await asyncio
.wait(
4746 pending_tasks
, timeout
=_timeout
, return_when
=asyncio
.FIRST_COMPLETED
4748 num_done
+= len(done
)
4749 if not done
: # Timeout
4750 for task
in pending_tasks
:
4751 new_error
= created_tasks_info
[task
] + ": Timeout"
4752 error_detail_list
.append(new_error
)
4753 error_list
.append(new_error
)
4756 if task
.cancelled():
4759 exc
= task
.exception()
4761 if isinstance(exc
, asyncio
.TimeoutError
):
4763 new_error
= created_tasks_info
[task
] + ": {}".format(exc
)
4764 error_list
.append(created_tasks_info
[task
])
4765 error_detail_list
.append(new_error
)
4772 ROclient
.ROClientException
,
4778 self
.logger
.error(logging_text
+ new_error
)
4780 exc_traceback
= "".join(
4781 traceback
.format_exception(None, exc
, exc
.__traceback
__)
4785 + created_tasks_info
[task
]
4791 logging_text
+ created_tasks_info
[task
] + ": Done"
4793 stage
[1] = "{}/{}.".format(num_done
, num_tasks
)
4795 stage
[1] += " Errors: " + ". ".join(error_detail_list
) + "."
4796 if nsr_id
: # update also nsr
4801 "errorDescription": "Error at: " + ", ".join(error_list
),
4802 "errorDetail": ". ".join(error_detail_list
),
4805 self
._write
_op
_status
(nslcmop_id
, stage
)
4806 return error_detail_list
4808 async def _cancel_pending_tasks(self
, logging_text
, created_tasks_info
):
4809 for task
, name
in created_tasks_info
.items():
4810 self
.logger
.debug(logging_text
+ "Cancelling task: " + name
)
4814 def _map_primitive_params(primitive_desc
, params
, instantiation_params
):
4816 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4817 The default-value is used. If it is between < > it look for a value at instantiation_params
4818 :param primitive_desc: portion of VNFD/NSD that describes primitive
4819 :param params: Params provided by user
4820 :param instantiation_params: Instantiation params provided by user
4821 :return: a dictionary with the calculated params
4823 calculated_params
= {}
4824 for parameter
in primitive_desc
.get("parameter", ()):
4825 param_name
= parameter
["name"]
4826 if param_name
in params
:
4827 calculated_params
[param_name
] = params
[param_name
]
4828 elif "default-value" in parameter
or "value" in parameter
:
4829 if "value" in parameter
:
4830 calculated_params
[param_name
] = parameter
["value"]
4832 calculated_params
[param_name
] = parameter
["default-value"]
4834 isinstance(calculated_params
[param_name
], str)
4835 and calculated_params
[param_name
].startswith("<")
4836 and calculated_params
[param_name
].endswith(">")
4838 if calculated_params
[param_name
][1:-1] in instantiation_params
:
4839 calculated_params
[param_name
] = instantiation_params
[
4840 calculated_params
[param_name
][1:-1]
4844 "Parameter {} needed to execute primitive {} not provided".format(
4845 calculated_params
[param_name
], primitive_desc
["name"]
4850 "Parameter {} needed to execute primitive {} not provided".format(
4851 param_name
, primitive_desc
["name"]
4855 if isinstance(calculated_params
[param_name
], (dict, list, tuple)):
4856 calculated_params
[param_name
] = yaml
.safe_dump(
4857 calculated_params
[param_name
], default_flow_style
=True, width
=256
4859 elif isinstance(calculated_params
[param_name
], str) and calculated_params
[
4861 ].startswith("!!yaml "):
4862 calculated_params
[param_name
] = calculated_params
[param_name
][7:]
4863 if parameter
.get("data-type") == "INTEGER":
4865 calculated_params
[param_name
] = int(calculated_params
[param_name
])
4866 except ValueError: # error converting string to int
4868 "Parameter {} of primitive {} must be integer".format(
4869 param_name
, primitive_desc
["name"]
4872 elif parameter
.get("data-type") == "BOOLEAN":
4873 calculated_params
[param_name
] = not (
4874 (str(calculated_params
[param_name
])).lower() == "false"
4877 # add always ns_config_info if primitive name is config
4878 if primitive_desc
["name"] == "config":
4879 if "ns_config_info" in instantiation_params
:
4880 calculated_params
["ns_config_info"] = instantiation_params
[
4883 return calculated_params
4885 def _look_for_deployed_vca(
4892 ee_descriptor_id
=None,
4894 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4895 for vca
in deployed_vca
:
4898 if member_vnf_index
!= vca
["member-vnf-index"] or vdu_id
!= vca
["vdu_id"]:
4901 vdu_count_index
is not None
4902 and vdu_count_index
!= vca
["vdu_count_index"]
4905 if kdu_name
and kdu_name
!= vca
["kdu_name"]:
4907 if ee_descriptor_id
and ee_descriptor_id
!= vca
["ee_descriptor_id"]:
4911 # vca_deployed not found
4913 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4914 " is not deployed".format(
4923 ee_id
= vca
.get("ee_id")
4925 "type", "lxc_proxy_charm"
4926 ) # default value for backward compatibility - proxy charm
4929 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4930 "execution environment".format(
4931 member_vnf_index
, vdu_id
, kdu_name
, vdu_count_index
4934 return ee_id
, vca_type
4936 async def _ns_execute_primitive(
4942 retries_interval
=30,
4949 if primitive
== "config":
4950 primitive_params
= {"params": primitive_params
}
4952 vca_type
= vca_type
or "lxc_proxy_charm"
4956 output
= await asyncio
.wait_for(
4957 self
.vca_map
[vca_type
].exec_primitive(
4959 primitive_name
=primitive
,
4960 params_dict
=primitive_params
,
4961 progress_timeout
=self
.timeout
.progress_primitive
,
4962 total_timeout
=self
.timeout
.primitive
,
4967 timeout
=timeout
or self
.timeout
.primitive
,
4971 except asyncio
.CancelledError
:
4973 except Exception as e
:
4977 "Error executing action {} on {} -> {}".format(
4982 await asyncio
.sleep(retries_interval
)
4984 if isinstance(e
, asyncio
.TimeoutError
):
4986 message
="Timed out waiting for action to complete"
4988 return "FAILED", getattr(e
, "message", repr(e
))
4990 return "COMPLETED", output
4992 except (LcmException
, asyncio
.CancelledError
):
4994 except Exception as e
:
4995 return "FAIL", "Error executing action {}: {}".format(primitive
, e
)
4997 async def vca_status_refresh(self
, nsr_id
, nslcmop_id
):
4999 Updating the vca_status with latest juju information in nsrs record
5000 :param: nsr_id: Id of the nsr
5001 :param: nslcmop_id: Id of the nslcmop
5005 self
.logger
.debug("Task ns={} action={} Enter".format(nsr_id
, nslcmop_id
))
5006 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5007 vca_id
= self
.get_vca_id({}, db_nsr
)
5008 if db_nsr
["_admin"]["deployed"]["K8s"]:
5009 for _
, k8s
in enumerate(db_nsr
["_admin"]["deployed"]["K8s"]):
5010 cluster_uuid
, kdu_instance
, cluster_type
= (
5011 k8s
["k8scluster-uuid"],
5012 k8s
["kdu-instance"],
5013 k8s
["k8scluster-type"],
5015 await self
._on
_update
_k
8s
_db
(
5016 cluster_uuid
=cluster_uuid
,
5017 kdu_instance
=kdu_instance
,
5018 filter={"_id": nsr_id
},
5020 cluster_type
=cluster_type
,
5023 for vca_index
, _
in enumerate(db_nsr
["_admin"]["deployed"]["VCA"]):
5024 table
, filter = "nsrs", {"_id": nsr_id
}
5025 path
= "_admin.deployed.VCA.{}.".format(vca_index
)
5026 await self
._on
_update
_n
2vc
_db
(table
, filter, path
, {})
5028 self
.logger
.debug("Task ns={} action={} Exit".format(nsr_id
, nslcmop_id
))
5029 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_vca_status_refresh")
5031 async def action(self
, nsr_id
, nslcmop_id
):
5032 # Try to lock HA task here
5033 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5034 if not task_is_locked_by_me
:
5037 logging_text
= "Task ns={} action={} ".format(nsr_id
, nslcmop_id
)
5038 self
.logger
.debug(logging_text
+ "Enter")
5039 # get all needed from database
5043 db_nslcmop_update
= {}
5044 nslcmop_operation_state
= None
5045 error_description_nslcmop
= None
5049 # wait for any previous tasks in process
5050 step
= "Waiting for previous operations to terminate"
5051 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5053 self
._write
_ns
_status
(
5056 current_operation
="RUNNING ACTION",
5057 current_operation_id
=nslcmop_id
,
5060 step
= "Getting information from database"
5061 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5062 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5063 if db_nslcmop
["operationParams"].get("primitive_params"):
5064 db_nslcmop
["operationParams"]["primitive_params"] = json
.loads(
5065 db_nslcmop
["operationParams"]["primitive_params"]
5068 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5069 vnf_index
= db_nslcmop
["operationParams"].get("member_vnf_index")
5070 vdu_id
= db_nslcmop
["operationParams"].get("vdu_id")
5071 kdu_name
= db_nslcmop
["operationParams"].get("kdu_name")
5072 vdu_count_index
= db_nslcmop
["operationParams"].get("vdu_count_index")
5073 primitive
= db_nslcmop
["operationParams"]["primitive"]
5074 primitive_params
= db_nslcmop
["operationParams"]["primitive_params"]
5075 timeout_ns_action
= db_nslcmop
["operationParams"].get(
5076 "timeout_ns_action", self
.timeout
.primitive
5080 step
= "Getting vnfr from database"
5081 db_vnfr
= self
.db
.get_one(
5082 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
5084 if db_vnfr
.get("kdur"):
5086 for kdur
in db_vnfr
["kdur"]:
5087 if kdur
.get("additionalParams"):
5088 kdur
["additionalParams"] = json
.loads(
5089 kdur
["additionalParams"]
5091 kdur_list
.append(kdur
)
5092 db_vnfr
["kdur"] = kdur_list
5093 step
= "Getting vnfd from database"
5094 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
5096 # Sync filesystem before running a primitive
5097 self
.fs
.sync(db_vnfr
["vnfd-id"])
5099 step
= "Getting nsd from database"
5100 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
5102 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
5103 # for backward compatibility
5104 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
5105 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
5106 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
5107 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5109 # look for primitive
5110 config_primitive_desc
= descriptor_configuration
= None
5112 descriptor_configuration
= get_configuration(db_vnfd
, vdu_id
)
5114 descriptor_configuration
= get_configuration(db_vnfd
, kdu_name
)
5116 descriptor_configuration
= get_configuration(db_vnfd
, db_vnfd
["id"])
5118 descriptor_configuration
= db_nsd
.get("ns-configuration")
5120 if descriptor_configuration
and descriptor_configuration
.get(
5123 for config_primitive
in descriptor_configuration
["config-primitive"]:
5124 if config_primitive
["name"] == primitive
:
5125 config_primitive_desc
= config_primitive
5128 if not config_primitive_desc
:
5129 if not (kdu_name
and primitive
in ("upgrade", "rollback", "status")):
5131 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5135 primitive_name
= primitive
5136 ee_descriptor_id
= None
5138 primitive_name
= config_primitive_desc
.get(
5139 "execution-environment-primitive", primitive
5141 ee_descriptor_id
= config_primitive_desc
.get(
5142 "execution-environment-ref"
5148 (x
for x
in db_vnfr
["vdur"] if x
["vdu-id-ref"] == vdu_id
), None
5150 desc_params
= parse_yaml_strings(vdur
.get("additionalParams"))
5153 (x
for x
in db_vnfr
["kdur"] if x
["kdu-name"] == kdu_name
), None
5155 desc_params
= parse_yaml_strings(kdur
.get("additionalParams"))
5157 desc_params
= parse_yaml_strings(
5158 db_vnfr
.get("additionalParamsForVnf")
5161 desc_params
= parse_yaml_strings(db_nsr
.get("additionalParamsForNs"))
5162 if kdu_name
and get_configuration(db_vnfd
, kdu_name
):
5163 kdu_configuration
= get_configuration(db_vnfd
, kdu_name
)
5165 for primitive
in kdu_configuration
.get("initial-config-primitive", []):
5166 actions
.add(primitive
["name"])
5167 for primitive
in kdu_configuration
.get("config-primitive", []):
5168 actions
.add(primitive
["name"])
5170 nsr_deployed
["K8s"],
5171 lambda kdu
: kdu_name
== kdu
["kdu-name"]
5172 and kdu
["member-vnf-index"] == vnf_index
,
5176 if primitive_name
in actions
5177 and kdu
["k8scluster-type"] != "helm-chart-v3"
5181 # TODO check if ns is in a proper status
5183 primitive_name
in ("upgrade", "rollback", "status") or kdu_action
5185 # kdur and desc_params already set from before
5186 if primitive_params
:
5187 desc_params
.update(primitive_params
)
5188 # TODO Check if we will need something at vnf level
5189 for index
, kdu
in enumerate(get_iterable(nsr_deployed
, "K8s")):
5191 kdu_name
== kdu
["kdu-name"]
5192 and kdu
["member-vnf-index"] == vnf_index
5197 "KDU '{}' for vnf '{}' not deployed".format(kdu_name
, vnf_index
)
5200 if kdu
.get("k8scluster-type") not in self
.k8scluster_map
:
5201 msg
= "unknown k8scluster-type '{}'".format(
5202 kdu
.get("k8scluster-type")
5204 raise LcmException(msg
)
5207 "collection": "nsrs",
5208 "filter": {"_id": nsr_id
},
5209 "path": "_admin.deployed.K8s.{}".format(index
),
5213 + "Exec k8s {} on {}.{}".format(primitive_name
, vnf_index
, kdu_name
)
5215 step
= "Executing kdu {}".format(primitive_name
)
5216 if primitive_name
== "upgrade":
5217 if desc_params
.get("kdu_model"):
5218 kdu_model
= desc_params
.get("kdu_model")
5219 del desc_params
["kdu_model"]
5221 kdu_model
= kdu
.get("kdu-model")
5222 if kdu_model
.count("/") < 2: # helm chart is not embedded
5223 parts
= kdu_model
.split(sep
=":")
5225 kdu_model
= parts
[0]
5226 if desc_params
.get("kdu_atomic_upgrade"):
5227 atomic_upgrade
= desc_params
.get(
5228 "kdu_atomic_upgrade"
5229 ).lower() in ("yes", "true", "1")
5230 del desc_params
["kdu_atomic_upgrade"]
5232 atomic_upgrade
= True
5234 detailed_status
= await asyncio
.wait_for(
5235 self
.k8scluster_map
[kdu
["k8scluster-type"]].upgrade(
5236 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5237 kdu_instance
=kdu
.get("kdu-instance"),
5238 atomic
=atomic_upgrade
,
5239 kdu_model
=kdu_model
,
5242 timeout
=timeout_ns_action
,
5244 timeout
=timeout_ns_action
+ 10,
5247 logging_text
+ " Upgrade of kdu {} done".format(detailed_status
)
5249 elif primitive_name
== "rollback":
5250 detailed_status
= await asyncio
.wait_for(
5251 self
.k8scluster_map
[kdu
["k8scluster-type"]].rollback(
5252 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5253 kdu_instance
=kdu
.get("kdu-instance"),
5256 timeout
=timeout_ns_action
,
5258 elif primitive_name
== "status":
5259 detailed_status
= await asyncio
.wait_for(
5260 self
.k8scluster_map
[kdu
["k8scluster-type"]].status_kdu(
5261 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5262 kdu_instance
=kdu
.get("kdu-instance"),
5265 timeout
=timeout_ns_action
,
5268 kdu_instance
= kdu
.get("kdu-instance") or "{}-{}".format(
5269 kdu
["kdu-name"], nsr_id
5271 params
= self
._map
_primitive
_params
(
5272 config_primitive_desc
, primitive_params
, desc_params
5275 detailed_status
= await asyncio
.wait_for(
5276 self
.k8scluster_map
[kdu
["k8scluster-type"]].exec_primitive(
5277 cluster_uuid
=kdu
.get("k8scluster-uuid"),
5278 kdu_instance
=kdu_instance
,
5279 primitive_name
=primitive_name
,
5282 timeout
=timeout_ns_action
,
5285 timeout
=timeout_ns_action
,
5289 nslcmop_operation_state
= "COMPLETED"
5291 detailed_status
= ""
5292 nslcmop_operation_state
= "FAILED"
5294 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
5295 nsr_deployed
["VCA"],
5296 member_vnf_index
=vnf_index
,
5298 vdu_count_index
=vdu_count_index
,
5299 ee_descriptor_id
=ee_descriptor_id
,
5301 for vca_index
, vca_deployed
in enumerate(
5302 db_nsr
["_admin"]["deployed"]["VCA"]
5304 if vca_deployed
.get("member-vnf-index") == vnf_index
:
5306 "collection": "nsrs",
5307 "filter": {"_id": nsr_id
},
5308 "path": "_admin.deployed.VCA.{}.".format(vca_index
),
5312 nslcmop_operation_state
,
5314 ) = await self
._ns
_execute
_primitive
(
5316 primitive
=primitive_name
,
5317 primitive_params
=self
._map
_primitive
_params
(
5318 config_primitive_desc
, primitive_params
, desc_params
5320 timeout
=timeout_ns_action
,
5326 db_nslcmop_update
["detailed-status"] = detailed_status
5327 error_description_nslcmop
= (
5328 detailed_status
if nslcmop_operation_state
== "FAILED" else ""
5332 + "Done with result {} {}".format(
5333 nslcmop_operation_state
, detailed_status
5336 return # database update is called inside finally
5338 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
5339 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
5341 except asyncio
.CancelledError
:
5343 logging_text
+ "Cancelled Exception while '{}'".format(step
)
5345 exc
= "Operation was cancelled"
5346 except asyncio
.TimeoutError
:
5347 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
5349 except Exception as e
:
5350 exc
= traceback
.format_exc()
5351 self
.logger
.critical(
5352 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
5361 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
5362 nslcmop_operation_state
= "FAILED"
5364 self
._write
_ns
_status
(
5368 ], # TODO check if degraded. For the moment use previous status
5369 current_operation
="IDLE",
5370 current_operation_id
=None,
5371 # error_description=error_description_nsr,
5372 # error_detail=error_detail,
5373 other_update
=db_nsr_update
,
5376 self
._write
_op
_status
(
5379 error_message
=error_description_nslcmop
,
5380 operation_state
=nslcmop_operation_state
,
5381 other_update
=db_nslcmop_update
,
5384 if nslcmop_operation_state
:
5386 await self
.msg
.aiowrite(
5391 "nslcmop_id": nslcmop_id
,
5392 "operationState": nslcmop_operation_state
,
5395 except Exception as e
:
5397 logging_text
+ "kafka_write notification Exception {}".format(e
)
5399 self
.logger
.debug(logging_text
+ "Exit")
5400 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_action")
5401 return nslcmop_operation_state
, detailed_status
5403 async def terminate_vdus(
5404 self
, db_vnfr
, member_vnf_index
, db_nsr
, update_db_nslcmops
, stage
, logging_text
5406 """This method terminates VDUs
5409 db_vnfr: VNF instance record
5410 member_vnf_index: VNF index to identify the VDUs to be removed
5411 db_nsr: NS instance record
5412 update_db_nslcmops: Nslcmop update record
5414 vca_scaling_info
= []
5415 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5416 scaling_info
["scaling_direction"] = "IN"
5417 scaling_info
["vdu-delete"] = {}
5418 scaling_info
["kdu-delete"] = {}
5419 db_vdur
= db_vnfr
.get("vdur")
5420 vdur_list
= copy(db_vdur
)
5422 for index
, vdu
in enumerate(vdur_list
):
5423 vca_scaling_info
.append(
5425 "osm_vdu_id": vdu
["vdu-id-ref"],
5426 "member-vnf-index": member_vnf_index
,
5428 "vdu_index": count_index
,
5431 scaling_info
["vdu-delete"][vdu
["vdu-id-ref"]] = count_index
5432 scaling_info
["vdu"].append(
5434 "name": vdu
.get("name") or vdu
.get("vdu-name"),
5435 "vdu_id": vdu
["vdu-id-ref"],
5439 for interface
in vdu
["interfaces"]:
5440 scaling_info
["vdu"][index
]["interface"].append(
5442 "name": interface
["name"],
5443 "ip_address": interface
["ip-address"],
5444 "mac_address": interface
.get("mac-address"),
5447 self
.logger
.info("NS update scaling info{}".format(scaling_info
))
5448 stage
[2] = "Terminating VDUs"
5449 if scaling_info
.get("vdu-delete"):
5450 # scale_process = "RO"
5451 if self
.ro_config
.ng
:
5452 await self
._scale
_ng
_ro
(
5461 async def remove_vnf(self
, nsr_id
, nslcmop_id
, vnf_instance_id
):
5462 """This method is to Remove VNF instances from NS.
5465 nsr_id: NS instance id
5466 nslcmop_id: nslcmop id of update
5467 vnf_instance_id: id of the VNF instance to be removed
5470 result: (str, str) COMPLETED/FAILED, details
5474 logging_text
= "Task ns={} update ".format(nsr_id
)
5475 check_vnfr_count
= len(self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}))
5476 self
.logger
.info("check_vnfr_count {}".format(check_vnfr_count
))
5477 if check_vnfr_count
> 1:
5478 stage
= ["", "", ""]
5479 step
= "Getting nslcmop from database"
5481 step
+ " after having waited for previous tasks to be completed"
5483 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5484 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5485 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
5486 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5487 """ db_vnfr = self.db.get_one(
5488 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5490 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5491 await self
.terminate_vdus(
5500 constituent_vnfr
= db_nsr
.get("constituent-vnfr-ref")
5501 constituent_vnfr
.remove(db_vnfr
.get("_id"))
5502 db_nsr_update
["constituent-vnfr-ref"] = db_nsr
.get(
5503 "constituent-vnfr-ref"
5505 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5506 self
.db
.del_one("vnfrs", {"_id": db_vnfr
.get("_id")})
5507 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5508 return "COMPLETED", "Done"
5510 step
= "Terminate VNF Failed with"
5512 "{} Cannot terminate the last VNF in this NS.".format(
5516 except (LcmException
, asyncio
.CancelledError
):
5518 except Exception as e
:
5519 self
.logger
.debug("Error removing VNF {}".format(e
))
5520 return "FAILED", "Error removing VNF {}".format(e
)
5522 async def _ns_redeploy_vnf(
5530 """This method updates and redeploys VNF instances
5533 nsr_id: NS instance id
5534 nslcmop_id: nslcmop id
5535 db_vnfd: VNF descriptor
5536 db_vnfr: VNF instance record
5537 db_nsr: NS instance record
5540 result: (str, str) COMPLETED/FAILED, details
5544 stage
= ["", "", ""]
5545 logging_text
= "Task ns={} update ".format(nsr_id
)
5546 latest_vnfd_revision
= db_vnfd
["_admin"].get("revision")
5547 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5549 # Terminate old VNF resources
5550 update_db_nslcmops
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
5551 await self
.terminate_vdus(
5560 # old_vnfd_id = db_vnfr["vnfd-id"]
5561 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5562 new_db_vnfd
= db_vnfd
5563 # new_vnfd_ref = new_db_vnfd["id"]
5564 # new_vnfd_id = vnfd_id
5568 for cp
in new_db_vnfd
.get("ext-cpd", ()):
5570 "name": cp
.get("id"),
5571 "connection-point-id": cp
.get("int-cpd", {}).get("cpd"),
5572 "connection-point-vdu-id": cp
.get("int-cpd", {}).get("vdu-id"),
5575 new_vnfr_cp
.append(vnf_cp
)
5576 new_vdur
= update_db_nslcmops
["operationParams"]["newVdur"]
5577 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5578 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5580 "revision": latest_vnfd_revision
,
5581 "connection-point": new_vnfr_cp
,
5585 self
.update_db_2("vnfrs", db_vnfr
["_id"], new_vnfr_update
)
5586 updated_db_vnfr
= self
.db
.get_one(
5588 {"member-vnf-index-ref": member_vnf_index
, "nsr-id-ref": nsr_id
},
5591 # Instantiate new VNF resources
5592 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5593 vca_scaling_info
= []
5594 scaling_info
= {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5595 scaling_info
["scaling_direction"] = "OUT"
5596 scaling_info
["vdu-create"] = {}
5597 scaling_info
["kdu-create"] = {}
5598 vdud_instantiate_list
= db_vnfd
["vdu"]
5599 for index
, vdud
in enumerate(vdud_instantiate_list
):
5600 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(vdud
, db_vnfd
)
5602 additional_params
= (
5603 self
._get
_vdu
_additional
_params
(updated_db_vnfr
, vdud
["id"])
5606 cloud_init_list
= []
5608 # TODO Information of its own ip is not available because db_vnfr is not updated.
5609 additional_params
["OSM"] = get_osm_params(
5610 updated_db_vnfr
, vdud
["id"], 1
5612 cloud_init_list
.append(
5613 self
._parse
_cloud
_init
(
5620 vca_scaling_info
.append(
5622 "osm_vdu_id": vdud
["id"],
5623 "member-vnf-index": member_vnf_index
,
5625 "vdu_index": count_index
,
5628 scaling_info
["vdu-create"][vdud
["id"]] = count_index
5629 if self
.ro_config
.ng
:
5631 "New Resources to be deployed: {}".format(scaling_info
)
5633 await self
._scale
_ng
_ro
(
5641 return "COMPLETED", "Done"
5642 except (LcmException
, asyncio
.CancelledError
):
5644 except Exception as e
:
5645 self
.logger
.debug("Error updating VNF {}".format(e
))
5646 return "FAILED", "Error updating VNF {}".format(e
)
5648 async def _ns_charm_upgrade(
5654 timeout
: float = None,
5656 """This method upgrade charms in VNF instances
5659 ee_id: Execution environment id
5660 path: Local path to the charm
5662 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5663 timeout: (Float) Timeout for the ns update operation
5666 result: (str, str) COMPLETED/FAILED, details
5669 charm_type
= charm_type
or "lxc_proxy_charm"
5670 output
= await self
.vca_map
[charm_type
].upgrade_charm(
5674 charm_type
=charm_type
,
5675 timeout
=timeout
or self
.timeout
.ns_update
,
5679 return "COMPLETED", output
5681 except (LcmException
, asyncio
.CancelledError
):
5684 except Exception as e
:
5685 self
.logger
.debug("Error upgrading charm {}".format(path
))
5687 return "FAILED", "Error upgrading charm {}: {}".format(path
, e
)
5689 async def update(self
, nsr_id
, nslcmop_id
):
5690 """Update NS according to different update types
5692 This method performs upgrade of VNF instances then updates the revision
5693 number in VNF record
5696 nsr_id: Network service will be updated
5697 nslcmop_id: ns lcm operation id
5700 It may raise DbException, LcmException, N2VCException, K8sException
5703 # Try to lock HA task here
5704 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
5705 if not task_is_locked_by_me
:
5708 logging_text
= "Task ns={} update={} ".format(nsr_id
, nslcmop_id
)
5709 self
.logger
.debug(logging_text
+ "Enter")
5711 # Set the required variables to be filled up later
5713 db_nslcmop_update
= {}
5715 nslcmop_operation_state
= None
5717 error_description_nslcmop
= ""
5719 change_type
= "updated"
5720 detailed_status
= ""
5721 member_vnf_index
= None
5724 # wait for any previous tasks in process
5725 step
= "Waiting for previous operations to terminate"
5726 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
5727 self
._write
_ns
_status
(
5730 current_operation
="UPDATING",
5731 current_operation_id
=nslcmop_id
,
5734 step
= "Getting nslcmop from database"
5735 db_nslcmop
= self
.db
.get_one(
5736 "nslcmops", {"_id": nslcmop_id
}, fail_on_empty
=False
5738 update_type
= db_nslcmop
["operationParams"]["updateType"]
5740 step
= "Getting nsr from database"
5741 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
5742 old_operational_status
= db_nsr
["operational-status"]
5743 db_nsr_update
["operational-status"] = "updating"
5744 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
5745 nsr_deployed
= db_nsr
["_admin"].get("deployed")
5747 if update_type
== "CHANGE_VNFPKG":
5748 # Get the input parameters given through update request
5749 vnf_instance_id
= db_nslcmop
["operationParams"][
5750 "changeVnfPackageData"
5751 ].get("vnfInstanceId")
5753 vnfd_id
= db_nslcmop
["operationParams"]["changeVnfPackageData"].get(
5756 timeout_seconds
= db_nslcmop
["operationParams"].get("timeout_ns_update")
5758 step
= "Getting vnfr from database"
5759 db_vnfr
= self
.db
.get_one(
5760 "vnfrs", {"_id": vnf_instance_id
}, fail_on_empty
=False
5763 step
= "Getting vnfds from database"
5765 latest_vnfd
= self
.db
.get_one(
5766 "vnfds", {"_id": vnfd_id
}, fail_on_empty
=False
5768 latest_vnfd_revision
= latest_vnfd
["_admin"].get("revision")
5771 current_vnf_revision
= db_vnfr
.get("revision", 1)
5772 current_vnfd
= self
.db
.get_one(
5774 {"_id": vnfd_id
+ ":" + str(current_vnf_revision
)},
5775 fail_on_empty
=False,
5777 # Charm artifact paths will be filled up later
5779 current_charm_artifact_path
,
5780 target_charm_artifact_path
,
5781 charm_artifact_paths
,
5783 ) = ([], [], [], [])
5785 step
= "Checking if revision has changed in VNFD"
5786 if current_vnf_revision
!= latest_vnfd_revision
:
5787 change_type
= "policy_updated"
5789 # There is new revision of VNFD, update operation is required
5790 current_vnfd_path
= vnfd_id
+ ":" + str(current_vnf_revision
)
5791 latest_vnfd_path
= vnfd_id
+ ":" + str(latest_vnfd_revision
)
5793 step
= "Removing the VNFD packages if they exist in the local path"
5794 shutil
.rmtree(self
.fs
.path
+ current_vnfd_path
, ignore_errors
=True)
5795 shutil
.rmtree(self
.fs
.path
+ latest_vnfd_path
, ignore_errors
=True)
5797 step
= "Get the VNFD packages from FSMongo"
5798 self
.fs
.sync(from_path
=latest_vnfd_path
)
5799 self
.fs
.sync(from_path
=current_vnfd_path
)
5802 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5804 current_base_folder
= current_vnfd
["_admin"]["storage"]
5805 latest_base_folder
= latest_vnfd
["_admin"]["storage"]
5807 for vca_index
, vca_deployed
in enumerate(
5808 get_iterable(nsr_deployed
, "VCA")
5810 vnf_index
= db_vnfr
.get("member-vnf-index-ref")
5812 # Getting charm-id and charm-type
5813 if vca_deployed
.get("member-vnf-index") == vnf_index
:
5814 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
5815 vca_type
= vca_deployed
.get("type")
5816 vdu_count_index
= vca_deployed
.get("vdu_count_index")
5819 ee_id
= vca_deployed
.get("ee_id")
5821 step
= "Getting descriptor config"
5822 if current_vnfd
.get("kdu"):
5823 search_key
= "kdu_name"
5825 search_key
= "vnfd_id"
5827 entity_id
= vca_deployed
.get(search_key
)
5829 descriptor_config
= get_configuration(
5830 current_vnfd
, entity_id
5833 if "execution-environment-list" in descriptor_config
:
5834 ee_list
= descriptor_config
.get(
5835 "execution-environment-list", []
5840 # There could be several charm used in the same VNF
5841 for ee_item
in ee_list
:
5842 if ee_item
.get("juju"):
5843 step
= "Getting charm name"
5844 charm_name
= ee_item
["juju"].get("charm")
5846 step
= "Setting Charm artifact paths"
5847 current_charm_artifact_path
.append(
5848 get_charm_artifact_path(
5849 current_base_folder
,
5852 current_vnf_revision
,
5855 target_charm_artifact_path
.append(
5856 get_charm_artifact_path(
5860 latest_vnfd_revision
,
5863 elif ee_item
.get("helm-chart"):
5864 # add chart to list and all parameters
5865 step
= "Getting helm chart name"
5866 chart_name
= ee_item
.get("helm-chart")
5867 vca_type
= "helm-v3"
5868 step
= "Setting Helm chart artifact paths"
5870 helm_artifacts
.append(
5872 "current_artifact_path": get_charm_artifact_path(
5873 current_base_folder
,
5876 current_vnf_revision
,
5878 "target_artifact_path": get_charm_artifact_path(
5882 latest_vnfd_revision
,
5885 "vca_index": vca_index
,
5886 "vdu_index": vdu_count_index
,
5890 charm_artifact_paths
= zip(
5891 current_charm_artifact_path
, target_charm_artifact_path
5894 step
= "Checking if software version has changed in VNFD"
5895 if find_software_version(current_vnfd
) != find_software_version(
5898 step
= "Checking if existing VNF has charm"
5899 for current_charm_path
, target_charm_path
in list(
5900 charm_artifact_paths
5902 if current_charm_path
:
5904 "Software version change is not supported as VNF instance {} has charm.".format(
5909 step
= "Checking whether the descriptor has SFC"
5910 if db_nsr
.get("nsd", {}).get("vnffgd"):
5912 "Ns update is not allowed for NS with SFC"
5915 # There is no change in the charm package, then redeploy the VNF
5916 # based on new descriptor
5917 step
= "Redeploying VNF"
5918 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5919 (result
, detailed_status
) = await self
._ns
_redeploy
_vnf
(
5920 nsr_id
, nslcmop_id
, latest_vnfd
, db_vnfr
, db_nsr
5922 if result
== "FAILED":
5923 nslcmop_operation_state
= result
5924 error_description_nslcmop
= detailed_status
5925 old_operational_status
= "failed"
5926 db_nslcmop_update
["detailed-status"] = detailed_status
5927 db_nsr_update
["detailed-status"] = detailed_status
5928 scaling_aspect
= get_scaling_aspect(latest_vnfd
)
5929 scaling_group_desc
= db_nsr
.get("_admin").get(
5930 "scaling-group", None
5932 if scaling_group_desc
:
5933 for aspect
in scaling_aspect
:
5934 scaling_group_id
= aspect
.get("id")
5935 for scale_index
, scaling_group
in enumerate(
5938 if scaling_group
.get("name") == scaling_group_id
:
5940 "_admin.scaling-group.{}.nb-scale-op".format(
5946 + " step {} Done with result {} {}".format(
5947 step
, nslcmop_operation_state
, detailed_status
5952 step
= "Checking if any charm package has changed or not"
5953 for current_charm_path
, target_charm_path
in list(
5954 charm_artifact_paths
5958 and target_charm_path
5959 and self
.check_charm_hash_changed(
5960 current_charm_path
, target_charm_path
5963 step
= "Checking whether VNF uses juju bundle"
5964 if check_juju_bundle_existence(current_vnfd
):
5966 "Charm upgrade is not supported for the instance which"
5967 " uses juju-bundle: {}".format(
5968 check_juju_bundle_existence(current_vnfd
)
5972 step
= "Upgrading Charm"
5976 ) = await self
._ns
_charm
_upgrade
(
5979 charm_type
=vca_type
,
5980 path
=self
.fs
.path
+ target_charm_path
,
5981 timeout
=timeout_seconds
,
5984 if result
== "FAILED":
5985 nslcmop_operation_state
= result
5986 error_description_nslcmop
= detailed_status
5988 db_nslcmop_update
["detailed-status"] = detailed_status
5991 + " step {} Done with result {} {}".format(
5992 step
, nslcmop_operation_state
, detailed_status
5996 step
= "Updating policies"
5997 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
5998 result
= "COMPLETED"
5999 detailed_status
= "Done"
6000 db_nslcmop_update
["detailed-status"] = "Done"
6003 for item
in helm_artifacts
:
6005 item
["current_artifact_path"]
6006 and item
["target_artifact_path"]
6007 and self
.check_charm_hash_changed(
6008 item
["current_artifact_path"],
6009 item
["target_artifact_path"],
6013 db_update_entry
= "_admin.deployed.VCA.{}.".format(
6016 vnfr_id
= db_vnfr
["_id"]
6017 osm_config
= {"osm": {"ns_id": nsr_id
, "vnf_id": vnfr_id
}}
6019 "collection": "nsrs",
6020 "filter": {"_id": nsr_id
},
6021 "path": db_update_entry
,
6023 vca_type
, namespace
, helm_id
= get_ee_id_parts(item
["ee_id"])
6024 await self
.vca_map
[vca_type
].upgrade_execution_environment(
6025 namespace
=namespace
,
6029 artifact_path
=item
["target_artifact_path"],
6032 vnf_id
= db_vnfr
.get("vnfd-ref")
6033 config_descriptor
= get_configuration(latest_vnfd
, vnf_id
)
6034 self
.logger
.debug("get ssh key block")
6038 ("config-access", "ssh-access", "required"),
6040 # Needed to inject a ssh key
6043 ("config-access", "ssh-access", "default-user"),
6046 "Install configuration Software, getting public ssh key"
6048 pub_key
= await self
.vca_map
[
6050 ].get_ee_ssh_public__key(
6051 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
6055 "Insert public key into VM user={} ssh_key={}".format(
6059 self
.logger
.debug(logging_text
+ step
)
6061 # wait for RO (ip-address) Insert pub_key into VM
6062 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
6072 initial_config_primitive_list
= config_descriptor
.get(
6073 "initial-config-primitive"
6075 config_primitive
= next(
6078 for p
in initial_config_primitive_list
6079 if p
["name"] == "config"
6083 if not config_primitive
:
6086 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
6088 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
6089 if db_vnfr
.get("additionalParamsForVnf"):
6090 deploy_params
.update(
6092 db_vnfr
["additionalParamsForVnf"].copy()
6095 primitive_params_
= self
._map
_primitive
_params
(
6096 config_primitive
, {}, deploy_params
6099 step
= "execute primitive '{}' params '{}'".format(
6100 config_primitive
["name"], primitive_params_
6102 self
.logger
.debug(logging_text
+ step
)
6103 await self
.vca_map
[vca_type
].exec_primitive(
6105 primitive_name
=config_primitive
["name"],
6106 params_dict
=primitive_params_
,
6112 step
= "Updating policies"
6113 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6114 detailed_status
= "Done"
6115 db_nslcmop_update
["detailed-status"] = "Done"
6117 # If nslcmop_operation_state is None, so any operation is not failed.
6118 if not nslcmop_operation_state
:
6119 nslcmop_operation_state
= "COMPLETED"
6121 # If update CHANGE_VNFPKG nslcmop_operation is successful
6122 # vnf revision need to be updated
6123 vnfr_update
["revision"] = latest_vnfd_revision
6124 self
.update_db_2("vnfrs", db_vnfr
["_id"], vnfr_update
)
6128 + " task Done with result {} {}".format(
6129 nslcmop_operation_state
, detailed_status
6132 elif update_type
== "REMOVE_VNF":
6133 # This part is included in https://osm.etsi.org/gerrit/11876
6134 vnf_instance_id
= db_nslcmop
["operationParams"]["removeVnfInstanceId"]
6135 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_instance_id
})
6136 member_vnf_index
= db_vnfr
["member-vnf-index-ref"]
6137 step
= "Removing VNF"
6138 (result
, detailed_status
) = await self
.remove_vnf(
6139 nsr_id
, nslcmop_id
, vnf_instance_id
6141 if result
== "FAILED":
6142 nslcmop_operation_state
= result
6143 error_description_nslcmop
= detailed_status
6144 db_nslcmop_update
["detailed-status"] = detailed_status
6145 change_type
= "vnf_terminated"
6146 if not nslcmop_operation_state
:
6147 nslcmop_operation_state
= "COMPLETED"
6150 + " task Done with result {} {}".format(
6151 nslcmop_operation_state
, detailed_status
6155 elif update_type
== "OPERATE_VNF":
6156 vnf_id
= db_nslcmop
["operationParams"]["operateVnfData"][
6159 operation_type
= db_nslcmop
["operationParams"]["operateVnfData"][
6162 additional_param
= db_nslcmop
["operationParams"]["operateVnfData"][
6165 (result
, detailed_status
) = await self
.rebuild_start_stop(
6166 nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
6168 if result
== "FAILED":
6169 nslcmop_operation_state
= result
6170 error_description_nslcmop
= detailed_status
6171 db_nslcmop_update
["detailed-status"] = detailed_status
6172 if not nslcmop_operation_state
:
6173 nslcmop_operation_state
= "COMPLETED"
6176 + " task Done with result {} {}".format(
6177 nslcmop_operation_state
, detailed_status
6181 # If nslcmop_operation_state is None, so any operation is not failed.
6182 # All operations are executed in overall.
6183 if not nslcmop_operation_state
:
6184 nslcmop_operation_state
= "COMPLETED"
6185 db_nsr_update
["operational-status"] = old_operational_status
6187 except (DbException
, LcmException
, N2VCException
, K8sException
) as e
:
6188 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
6190 except asyncio
.CancelledError
:
6192 logging_text
+ "Cancelled Exception while '{}'".format(step
)
6194 exc
= "Operation was cancelled"
6195 except asyncio
.TimeoutError
:
6196 self
.logger
.error(logging_text
+ "Timeout while '{}'".format(step
))
6198 except Exception as e
:
6199 exc
= traceback
.format_exc()
6200 self
.logger
.critical(
6201 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
6210 ) = error_description_nslcmop
= "FAILED {}: {}".format(step
, exc
)
6211 nslcmop_operation_state
= "FAILED"
6212 db_nsr_update
["operational-status"] = old_operational_status
6214 self
._write
_ns
_status
(
6216 ns_state
=db_nsr
["nsState"],
6217 current_operation
="IDLE",
6218 current_operation_id
=None,
6219 other_update
=db_nsr_update
,
6222 self
._write
_op
_status
(
6225 error_message
=error_description_nslcmop
,
6226 operation_state
=nslcmop_operation_state
,
6227 other_update
=db_nslcmop_update
,
6230 if nslcmop_operation_state
:
6234 "nslcmop_id": nslcmop_id
,
6235 "operationState": nslcmop_operation_state
,
6238 change_type
in ("vnf_terminated", "policy_updated")
6239 and member_vnf_index
6241 msg
.update({"vnf_member_index": member_vnf_index
})
6242 await self
.msg
.aiowrite("ns", change_type
, msg
)
6243 except Exception as e
:
6245 logging_text
+ "kafka_write notification Exception {}".format(e
)
6247 self
.logger
.debug(logging_text
+ "Exit")
6248 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_update")
6249 return nslcmop_operation_state
, detailed_status
6251 async def scale(self
, nsr_id
, nslcmop_id
):
6252 # Try to lock HA task here
6253 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
6254 if not task_is_locked_by_me
:
6257 logging_text
= "Task ns={} scale={} ".format(nsr_id
, nslcmop_id
)
6258 stage
= ["", "", ""]
6259 tasks_dict_info
= {}
6260 # ^ stage, step, VIM progress
6261 self
.logger
.debug(logging_text
+ "Enter")
6262 # get all needed from database
6264 db_nslcmop_update
= {}
6267 # in case of error, indicates what part of scale was failed to put nsr at error status
6268 scale_process
= None
6269 old_operational_status
= ""
6270 old_config_status
= ""
6274 # wait for any previous tasks in process
6275 step
= "Waiting for previous operations to terminate"
6276 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
6277 self
._write
_ns
_status
(
6280 current_operation
="SCALING",
6281 current_operation_id
=nslcmop_id
,
6284 step
= "Getting nslcmop from database"
6286 step
+ " after having waited for previous tasks to be completed"
6288 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
6290 step
= "Getting nsr from database"
6291 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
6292 old_operational_status
= db_nsr
["operational-status"]
6293 old_config_status
= db_nsr
["config-status"]
6295 step
= "Checking whether the descriptor has SFC"
6296 if db_nsr
.get("nsd", {}).get("vnffgd"):
6297 raise LcmException("Scaling is not allowed for NS with SFC")
6299 step
= "Parsing scaling parameters"
6300 db_nsr_update
["operational-status"] = "scaling"
6301 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6302 nsr_deployed
= db_nsr
["_admin"].get("deployed")
6304 vnf_index
= db_nslcmop
["operationParams"]["scaleVnfData"][
6306 ]["member-vnf-index"]
6307 scaling_group
= db_nslcmop
["operationParams"]["scaleVnfData"][
6309 ]["scaling-group-descriptor"]
6310 scaling_type
= db_nslcmop
["operationParams"]["scaleVnfData"]["scaleVnfType"]
6311 # for backward compatibility
6312 if nsr_deployed
and isinstance(nsr_deployed
.get("VCA"), dict):
6313 nsr_deployed
["VCA"] = list(nsr_deployed
["VCA"].values())
6314 db_nsr_update
["_admin.deployed.VCA"] = nsr_deployed
["VCA"]
6315 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6317 step
= "Getting vnfr from database"
6318 db_vnfr
= self
.db
.get_one(
6319 "vnfrs", {"member-vnf-index-ref": vnf_index
, "nsr-id-ref": nsr_id
}
6322 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
6324 step
= "Getting vnfd from database"
6325 db_vnfd
= self
.db
.get_one("vnfds", {"_id": db_vnfr
["vnfd-id"]})
6327 base_folder
= db_vnfd
["_admin"]["storage"]
6329 step
= "Getting scaling-group-descriptor"
6330 scaling_descriptor
= find_in_list(
6331 get_scaling_aspect(db_vnfd
),
6332 lambda scale_desc
: scale_desc
["name"] == scaling_group
,
6334 if not scaling_descriptor
:
6336 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6337 "at vnfd:scaling-group-descriptor".format(scaling_group
)
6340 step
= "Sending scale order to VIM"
6341 # TODO check if ns is in a proper status
6343 if not db_nsr
["_admin"].get("scaling-group"):
6348 "_admin.scaling-group": [
6349 {"name": scaling_group
, "nb-scale-op": 0}
6353 admin_scale_index
= 0
6355 for admin_scale_index
, admin_scale_info
in enumerate(
6356 db_nsr
["_admin"]["scaling-group"]
6358 if admin_scale_info
["name"] == scaling_group
:
6359 nb_scale_op
= admin_scale_info
.get("nb-scale-op", 0)
6361 else: # not found, set index one plus last element and add new entry with the name
6362 admin_scale_index
+= 1
6364 "_admin.scaling-group.{}.name".format(admin_scale_index
)
6367 vca_scaling_info
= []
6368 scaling_info
= {"scaling_group_name": scaling_group
, "vdu": [], "kdu": []}
6369 if scaling_type
== "SCALE_OUT":
6370 if "aspect-delta-details" not in scaling_descriptor
:
6372 "Aspect delta details not fount in scaling descriptor {}".format(
6373 scaling_descriptor
["name"]
6376 # count if max-instance-count is reached
6377 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6379 scaling_info
["scaling_direction"] = "OUT"
6380 scaling_info
["vdu-create"] = {}
6381 scaling_info
["kdu-create"] = {}
6382 for delta
in deltas
:
6383 for vdu_delta
in delta
.get("vdu-delta", {}):
6384 vdud
= get_vdu(db_vnfd
, vdu_delta
["id"])
6385 # vdu_index also provides the number of instance of the targeted vdu
6386 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6387 if vdu_index
<= len(db_vnfr
["vdur"]):
6388 vdu_name_id
= db_vnfr
["vdur"][vdu_index
- 1]["vdu-name"]
6390 db_vnfr
["_id"] + vdu_name_id
+ str(vdu_index
- 1)
6392 prom_job_name
= prom_job_name
.replace("_", "")
6393 prom_job_name
= prom_job_name
.replace("-", "")
6395 prom_job_name
= None
6396 cloud_init_text
= self
._get
_vdu
_cloud
_init
_content
(
6400 additional_params
= (
6401 self
._get
_vdu
_additional
_params
(db_vnfr
, vdud
["id"])
6404 cloud_init_list
= []
6406 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6407 max_instance_count
= 10
6408 if vdu_profile
and "max-number-of-instances" in vdu_profile
:
6409 max_instance_count
= vdu_profile
.get(
6410 "max-number-of-instances", 10
6413 default_instance_num
= get_number_of_instances(
6416 instances_number
= vdu_delta
.get("number-of-instances", 1)
6417 nb_scale_op
+= instances_number
6419 new_instance_count
= nb_scale_op
+ default_instance_num
6420 # Control if new count is over max and vdu count is less than max.
6421 # Then assign new instance count
6422 if new_instance_count
> max_instance_count
> vdu_count
:
6423 instances_number
= new_instance_count
- max_instance_count
6425 instances_number
= instances_number
6427 if new_instance_count
> max_instance_count
:
6429 "reached the limit of {} (max-instance-count) "
6430 "scaling-out operations for the "
6431 "scaling-group-descriptor '{}'".format(
6432 nb_scale_op
, scaling_group
6435 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6437 # TODO Information of its own ip is not available because db_vnfr is not updated.
6438 additional_params
["OSM"] = get_osm_params(
6439 db_vnfr
, vdu_delta
["id"], vdu_index
+ x
6441 cloud_init_list
.append(
6442 self
._parse
_cloud
_init
(
6449 vca_scaling_info
.append(
6451 "osm_vdu_id": vdu_delta
["id"],
6452 "member-vnf-index": vnf_index
,
6454 "vdu_index": vdu_index
+ x
,
6457 scaling_info
["vdu-create"][vdu_delta
["id"]] = instances_number
6458 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6459 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6460 kdu_name
= kdu_profile
["kdu-name"]
6461 resource_name
= kdu_profile
.get("resource-name", "")
6463 # Might have different kdus in the same delta
6464 # Should have list for each kdu
6465 if not scaling_info
["kdu-create"].get(kdu_name
, None):
6466 scaling_info
["kdu-create"][kdu_name
] = []
6468 kdur
= get_kdur(db_vnfr
, kdu_name
)
6469 if kdur
.get("helm-chart"):
6470 k8s_cluster_type
= "helm-chart-v3"
6471 self
.logger
.debug("kdur: {}".format(kdur
))
6472 elif kdur
.get("juju-bundle"):
6473 k8s_cluster_type
= "juju-bundle"
6476 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6477 "juju-bundle. Maybe an old NBI version is running".format(
6478 db_vnfr
["member-vnf-index-ref"], kdu_name
6482 max_instance_count
= 10
6483 if kdu_profile
and "max-number-of-instances" in kdu_profile
:
6484 max_instance_count
= kdu_profile
.get(
6485 "max-number-of-instances", 10
6488 nb_scale_op
+= kdu_delta
.get("number-of-instances", 1)
6489 deployed_kdu
, _
= get_deployed_kdu(
6490 nsr_deployed
, kdu_name
, vnf_index
6492 if deployed_kdu
is None:
6494 "KDU '{}' for vnf '{}' not deployed".format(
6498 kdu_instance
= deployed_kdu
.get("kdu-instance")
6499 instance_num
= await self
.k8scluster_map
[
6505 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6506 kdu_model
=deployed_kdu
.get("kdu-model"),
6508 kdu_replica_count
= instance_num
+ kdu_delta
.get(
6509 "number-of-instances", 1
6512 # Control if new count is over max and instance_num is less than max.
6513 # Then assign max instance number to kdu replica count
6514 if kdu_replica_count
> max_instance_count
> instance_num
:
6515 kdu_replica_count
= max_instance_count
6516 if kdu_replica_count
> max_instance_count
:
6518 "reached the limit of {} (max-instance-count) "
6519 "scaling-out operations for the "
6520 "scaling-group-descriptor '{}'".format(
6521 instance_num
, scaling_group
6525 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6526 vca_scaling_info
.append(
6528 "osm_kdu_id": kdu_name
,
6529 "member-vnf-index": vnf_index
,
6531 "kdu_index": instance_num
+ x
- 1,
6534 scaling_info
["kdu-create"][kdu_name
].append(
6536 "member-vnf-index": vnf_index
,
6538 "k8s-cluster-type": k8s_cluster_type
,
6539 "resource-name": resource_name
,
6540 "scale": kdu_replica_count
,
6543 elif scaling_type
== "SCALE_IN":
6544 deltas
= scaling_descriptor
.get("aspect-delta-details")["deltas"]
6546 scaling_info
["scaling_direction"] = "IN"
6547 scaling_info
["vdu-delete"] = {}
6548 scaling_info
["kdu-delete"] = {}
6550 for delta
in deltas
:
6551 for vdu_delta
in delta
.get("vdu-delta", {}):
6552 vdu_count
= vdu_index
= get_vdur_index(db_vnfr
, vdu_delta
)
6553 min_instance_count
= 0
6554 vdu_profile
= get_vdu_profile(db_vnfd
, vdu_delta
["id"])
6555 if vdu_profile
and "min-number-of-instances" in vdu_profile
:
6556 min_instance_count
= vdu_profile
["min-number-of-instances"]
6558 default_instance_num
= get_number_of_instances(
6559 db_vnfd
, vdu_delta
["id"]
6561 instance_num
= vdu_delta
.get("number-of-instances", 1)
6562 nb_scale_op
-= instance_num
6564 new_instance_count
= nb_scale_op
+ default_instance_num
6566 if new_instance_count
< min_instance_count
< vdu_count
:
6567 instances_number
= min_instance_count
- new_instance_count
6569 instances_number
= instance_num
6571 if new_instance_count
< min_instance_count
:
6573 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6574 "scaling-group-descriptor '{}'".format(
6575 nb_scale_op
, scaling_group
6578 for x
in range(vdu_delta
.get("number-of-instances", 1)):
6579 vca_scaling_info
.append(
6581 "osm_vdu_id": vdu_delta
["id"],
6582 "member-vnf-index": vnf_index
,
6584 "vdu_index": vdu_index
- 1 - x
,
6587 scaling_info
["vdu-delete"][vdu_delta
["id"]] = instances_number
6588 for kdu_delta
in delta
.get("kdu-resource-delta", {}):
6589 kdu_profile
= get_kdu_resource_profile(db_vnfd
, kdu_delta
["id"])
6590 kdu_name
= kdu_profile
["kdu-name"]
6591 resource_name
= kdu_profile
.get("resource-name", "")
6593 if not scaling_info
["kdu-delete"].get(kdu_name
, None):
6594 scaling_info
["kdu-delete"][kdu_name
] = []
6596 kdur
= get_kdur(db_vnfr
, kdu_name
)
6597 if kdur
.get("helm-chart"):
6598 k8s_cluster_type
= "helm-chart-v3"
6599 self
.logger
.debug("kdur: {}".format(kdur
))
6600 elif kdur
.get("juju-bundle"):
6601 k8s_cluster_type
= "juju-bundle"
6604 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6605 "juju-bundle. Maybe an old NBI version is running".format(
6606 db_vnfr
["member-vnf-index-ref"], kdur
["kdu-name"]
6610 min_instance_count
= 0
6611 if kdu_profile
and "min-number-of-instances" in kdu_profile
:
6612 min_instance_count
= kdu_profile
["min-number-of-instances"]
6614 nb_scale_op
-= kdu_delta
.get("number-of-instances", 1)
6615 deployed_kdu
, _
= get_deployed_kdu(
6616 nsr_deployed
, kdu_name
, vnf_index
6618 if deployed_kdu
is None:
6620 "KDU '{}' for vnf '{}' not deployed".format(
6624 kdu_instance
= deployed_kdu
.get("kdu-instance")
6625 instance_num
= await self
.k8scluster_map
[
6631 cluster_uuid
=deployed_kdu
.get("k8scluster-uuid"),
6632 kdu_model
=deployed_kdu
.get("kdu-model"),
6634 kdu_replica_count
= instance_num
- kdu_delta
.get(
6635 "number-of-instances", 1
6638 if kdu_replica_count
< min_instance_count
< instance_num
:
6639 kdu_replica_count
= min_instance_count
6640 if kdu_replica_count
< min_instance_count
:
6642 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6643 "scaling-group-descriptor '{}'".format(
6644 instance_num
, scaling_group
6648 for x
in range(kdu_delta
.get("number-of-instances", 1)):
6649 vca_scaling_info
.append(
6651 "osm_kdu_id": kdu_name
,
6652 "member-vnf-index": vnf_index
,
6654 "kdu_index": instance_num
- x
- 1,
6657 scaling_info
["kdu-delete"][kdu_name
].append(
6659 "member-vnf-index": vnf_index
,
6661 "k8s-cluster-type": k8s_cluster_type
,
6662 "resource-name": resource_name
,
6663 "scale": kdu_replica_count
,
6667 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6668 vdu_delete
= copy(scaling_info
.get("vdu-delete"))
6669 if scaling_info
["scaling_direction"] == "IN":
6670 for vdur
in reversed(db_vnfr
["vdur"]):
6671 if vdu_delete
.get(vdur
["vdu-id-ref"]):
6672 vdu_delete
[vdur
["vdu-id-ref"]] -= 1
6673 scaling_info
["vdu"].append(
6675 "name": vdur
.get("name") or vdur
.get("vdu-name"),
6676 "vdu_id": vdur
["vdu-id-ref"],
6680 for interface
in vdur
["interfaces"]:
6681 scaling_info
["vdu"][-1]["interface"].append(
6683 "name": interface
["name"],
6684 "ip_address": interface
["ip-address"],
6685 "mac_address": interface
.get("mac-address"),
6688 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6691 step
= "Executing pre-scale vnf-config-primitive"
6692 if scaling_descriptor
.get("scaling-config-action"):
6693 for scaling_config_action
in scaling_descriptor
[
6694 "scaling-config-action"
6697 scaling_config_action
.get("trigger") == "pre-scale-in"
6698 and scaling_type
== "SCALE_IN"
6700 scaling_config_action
.get("trigger") == "pre-scale-out"
6701 and scaling_type
== "SCALE_OUT"
6703 vnf_config_primitive
= scaling_config_action
[
6704 "vnf-config-primitive-name-ref"
6706 step
= db_nslcmop_update
[
6708 ] = "executing pre-scale scaling-config-action '{}'".format(
6709 vnf_config_primitive
6712 # look for primitive
6713 for config_primitive
in (
6714 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
6715 ).get("config-primitive", ()):
6716 if config_primitive
["name"] == vnf_config_primitive
:
6720 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6721 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6722 "primitive".format(scaling_group
, vnf_config_primitive
)
6725 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
6726 if db_vnfr
.get("additionalParamsForVnf"):
6727 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
6729 scale_process
= "VCA"
6730 db_nsr_update
["config-status"] = "configuring pre-scaling"
6731 primitive_params
= self
._map
_primitive
_params
(
6732 config_primitive
, {}, vnfr_params
6735 # Pre-scale retry check: Check if this sub-operation has been executed before
6736 op_index
= self
._check
_or
_add
_scale
_suboperation
(
6739 vnf_config_primitive
,
6743 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
6744 # Skip sub-operation
6745 result
= "COMPLETED"
6746 result_detail
= "Done"
6749 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6750 vnf_config_primitive
, result
, result_detail
6754 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
6755 # New sub-operation: Get index of this sub-operation
6757 len(db_nslcmop
.get("_admin", {}).get("operations"))
6762 + "vnf_config_primitive={} New sub-operation".format(
6763 vnf_config_primitive
6767 # retry: Get registered params for this existing sub-operation
6768 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
6771 vnf_index
= op
.get("member_vnf_index")
6772 vnf_config_primitive
= op
.get("primitive")
6773 primitive_params
= op
.get("primitive_params")
6776 + "vnf_config_primitive={} Sub-operation retry".format(
6777 vnf_config_primitive
6780 # Execute the primitive, either with new (first-time) or registered (reintent) args
6781 ee_descriptor_id
= config_primitive
.get(
6782 "execution-environment-ref"
6784 primitive_name
= config_primitive
.get(
6785 "execution-environment-primitive", vnf_config_primitive
6787 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
6788 nsr_deployed
["VCA"],
6789 member_vnf_index
=vnf_index
,
6791 vdu_count_index
=None,
6792 ee_descriptor_id
=ee_descriptor_id
,
6794 result
, result_detail
= await self
._ns
_execute
_primitive
(
6803 + "vnf_config_primitive={} Done with result {} {}".format(
6804 vnf_config_primitive
, result
, result_detail
6807 # Update operationState = COMPLETED | FAILED
6808 self
._update
_suboperation
_status
(
6809 db_nslcmop
, op_index
, result
, result_detail
6812 if result
== "FAILED":
6813 raise LcmException(result_detail
)
6814 db_nsr_update
["config-status"] = old_config_status
6815 scale_process
= None
6819 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index
)
6822 "_admin.scaling-group.{}.time".format(admin_scale_index
)
6825 # SCALE-IN VCA - BEGIN
6826 if vca_scaling_info
:
6827 step
= db_nslcmop_update
[
6829 ] = "Deleting the execution environments"
6830 scale_process
= "VCA"
6831 for vca_info
in vca_scaling_info
:
6832 if vca_info
["type"] == "delete" and not vca_info
.get("osm_kdu_id"):
6833 member_vnf_index
= str(vca_info
["member-vnf-index"])
6835 logging_text
+ "vdu info: {}".format(vca_info
)
6837 if vca_info
.get("osm_vdu_id"):
6838 vdu_id
= vca_info
["osm_vdu_id"]
6839 vdu_index
= int(vca_info
["vdu_index"])
6842 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6843 member_vnf_index
, vdu_id
, vdu_index
6845 stage
[2] = step
= "Scaling in VCA"
6846 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
6847 vca_update
= db_nsr
["_admin"]["deployed"]["VCA"]
6848 config_update
= db_nsr
["configurationStatus"]
6849 for vca_index
, vca
in enumerate(vca_update
):
6851 (vca
or vca
.get("ee_id"))
6852 and vca
["member-vnf-index"] == member_vnf_index
6853 and vca
["vdu_count_index"] == vdu_index
6855 if vca
.get("vdu_id"):
6856 config_descriptor
= get_configuration(
6857 db_vnfd
, vca
.get("vdu_id")
6859 elif vca
.get("kdu_name"):
6860 config_descriptor
= get_configuration(
6861 db_vnfd
, vca
.get("kdu_name")
6864 config_descriptor
= get_configuration(
6865 db_vnfd
, db_vnfd
["id"]
6867 operation_params
= (
6868 db_nslcmop
.get("operationParams") or {}
6870 exec_terminate_primitives
= not operation_params
.get(
6871 "skip_terminate_primitives"
6872 ) and vca
.get("needed_terminate")
6873 task
= asyncio
.ensure_future(
6882 exec_primitives
=exec_terminate_primitives
,
6886 timeout
=self
.timeout
.charm_delete
,
6889 tasks_dict_info
[task
] = "Terminating VCA {}".format(
6892 del vca_update
[vca_index
]
6893 del config_update
[vca_index
]
6894 # wait for pending tasks of terminate primitives
6898 + "Waiting for tasks {}".format(
6899 list(tasks_dict_info
.keys())
6902 error_list
= await self
._wait
_for
_tasks
(
6906 self
.timeout
.charm_delete
, self
.timeout
.ns_terminate
6911 tasks_dict_info
.clear()
6913 raise LcmException("; ".join(error_list
))
6915 db_vca_and_config_update
= {
6916 "_admin.deployed.VCA": vca_update
,
6917 "configurationStatus": config_update
,
6920 "nsrs", db_nsr
["_id"], db_vca_and_config_update
6922 scale_process
= None
6923 # SCALE-IN VCA - END
6926 if scaling_info
.get("vdu-create") or scaling_info
.get("vdu-delete"):
6927 scale_process
= "RO"
6928 if self
.ro_config
.ng
:
6929 await self
._scale
_ng
_ro
(
6930 logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, scaling_info
, stage
6932 scaling_info
.pop("vdu-create", None)
6933 scaling_info
.pop("vdu-delete", None)
6935 scale_process
= None
6939 if scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete"):
6940 scale_process
= "KDU"
6941 await self
._scale
_kdu
(
6942 logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
6944 scaling_info
.pop("kdu-create", None)
6945 scaling_info
.pop("kdu-delete", None)
6947 scale_process
= None
6951 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
6953 # SCALE-UP VCA - BEGIN
6954 if vca_scaling_info
:
6955 step
= db_nslcmop_update
[
6957 ] = "Creating new execution environments"
6958 scale_process
= "VCA"
6959 for vca_info
in vca_scaling_info
:
6960 if vca_info
["type"] == "create" and not vca_info
.get("osm_kdu_id"):
6961 member_vnf_index
= str(vca_info
["member-vnf-index"])
6963 logging_text
+ "vdu info: {}".format(vca_info
)
6965 vnfd_id
= db_vnfr
["vnfd-ref"]
6966 if vca_info
.get("osm_vdu_id"):
6967 vdu_index
= int(vca_info
["vdu_index"])
6968 deploy_params
= {"OSM": get_osm_params(db_vnfr
)}
6969 if db_vnfr
.get("additionalParamsForVnf"):
6970 deploy_params
.update(
6972 db_vnfr
["additionalParamsForVnf"].copy()
6975 descriptor_config
= get_configuration(
6976 db_vnfd
, db_vnfd
["id"]
6978 if descriptor_config
:
6984 logging_text
=logging_text
6985 + "member_vnf_index={} ".format(member_vnf_index
),
6988 nslcmop_id
=nslcmop_id
,
6994 kdu_index
=kdu_index
,
6995 member_vnf_index
=member_vnf_index
,
6996 vdu_index
=vdu_index
,
6998 deploy_params
=deploy_params
,
6999 descriptor_config
=descriptor_config
,
7000 base_folder
=base_folder
,
7001 task_instantiation_info
=tasks_dict_info
,
7004 vdu_id
= vca_info
["osm_vdu_id"]
7005 vdur
= find_in_list(
7006 db_vnfr
["vdur"], lambda vdu
: vdu
["vdu-id-ref"] == vdu_id
7008 descriptor_config
= get_configuration(db_vnfd
, vdu_id
)
7009 if vdur
.get("additionalParams"):
7010 deploy_params_vdu
= parse_yaml_strings(
7011 vdur
["additionalParams"]
7014 deploy_params_vdu
= deploy_params
7015 deploy_params_vdu
["OSM"] = get_osm_params(
7016 db_vnfr
, vdu_id
, vdu_count_index
=vdu_index
7018 if descriptor_config
:
7024 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7025 member_vnf_index
, vdu_id
, vdu_index
7027 stage
[2] = step
= "Scaling out VCA"
7028 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
7030 logging_text
=logging_text
7031 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
7032 member_vnf_index
, vdu_id
, vdu_index
7036 nslcmop_id
=nslcmop_id
,
7042 member_vnf_index
=member_vnf_index
,
7043 vdu_index
=vdu_index
,
7044 kdu_index
=kdu_index
,
7046 deploy_params
=deploy_params_vdu
,
7047 descriptor_config
=descriptor_config
,
7048 base_folder
=base_folder
,
7049 task_instantiation_info
=tasks_dict_info
,
7052 # SCALE-UP VCA - END
7053 scale_process
= None
7056 # execute primitive service POST-SCALING
7057 step
= "Executing post-scale vnf-config-primitive"
7058 if scaling_descriptor
.get("scaling-config-action"):
7059 for scaling_config_action
in scaling_descriptor
[
7060 "scaling-config-action"
7063 scaling_config_action
.get("trigger") == "post-scale-in"
7064 and scaling_type
== "SCALE_IN"
7066 scaling_config_action
.get("trigger") == "post-scale-out"
7067 and scaling_type
== "SCALE_OUT"
7069 vnf_config_primitive
= scaling_config_action
[
7070 "vnf-config-primitive-name-ref"
7072 step
= db_nslcmop_update
[
7074 ] = "executing post-scale scaling-config-action '{}'".format(
7075 vnf_config_primitive
7078 vnfr_params
= {"VDU_SCALE_INFO": scaling_info
}
7079 if db_vnfr
.get("additionalParamsForVnf"):
7080 vnfr_params
.update(db_vnfr
["additionalParamsForVnf"])
7082 # look for primitive
7083 for config_primitive
in (
7084 get_configuration(db_vnfd
, db_vnfd
["id"]) or {}
7085 ).get("config-primitive", ()):
7086 if config_primitive
["name"] == vnf_config_primitive
:
7090 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
7091 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
7092 "config-primitive".format(
7093 scaling_group
, vnf_config_primitive
7096 scale_process
= "VCA"
7097 db_nsr_update
["config-status"] = "configuring post-scaling"
7098 primitive_params
= self
._map
_primitive
_params
(
7099 config_primitive
, {}, vnfr_params
7102 # Post-scale retry check: Check if this sub-operation has been executed before
7103 op_index
= self
._check
_or
_add
_scale
_suboperation
(
7106 vnf_config_primitive
,
7110 if op_index
== self
.SUBOPERATION_STATUS_SKIP
:
7111 # Skip sub-operation
7112 result
= "COMPLETED"
7113 result_detail
= "Done"
7116 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
7117 vnf_config_primitive
, result
, result_detail
7121 if op_index
== self
.SUBOPERATION_STATUS_NEW
:
7122 # New sub-operation: Get index of this sub-operation
7124 len(db_nslcmop
.get("_admin", {}).get("operations"))
7129 + "vnf_config_primitive={} New sub-operation".format(
7130 vnf_config_primitive
7134 # retry: Get registered params for this existing sub-operation
7135 op
= db_nslcmop
.get("_admin", {}).get("operations", [])[
7138 vnf_index
= op
.get("member_vnf_index")
7139 vnf_config_primitive
= op
.get("primitive")
7140 primitive_params
= op
.get("primitive_params")
7143 + "vnf_config_primitive={} Sub-operation retry".format(
7144 vnf_config_primitive
7147 # Execute the primitive, either with new (first-time) or registered (reintent) args
7148 ee_descriptor_id
= config_primitive
.get(
7149 "execution-environment-ref"
7151 primitive_name
= config_primitive
.get(
7152 "execution-environment-primitive", vnf_config_primitive
7154 ee_id
, vca_type
= self
._look
_for
_deployed
_vca
(
7155 nsr_deployed
["VCA"],
7156 member_vnf_index
=vnf_index
,
7158 vdu_count_index
=None,
7159 ee_descriptor_id
=ee_descriptor_id
,
7161 result
, result_detail
= await self
._ns
_execute
_primitive
(
7170 + "vnf_config_primitive={} Done with result {} {}".format(
7171 vnf_config_primitive
, result
, result_detail
7174 # Update operationState = COMPLETED | FAILED
7175 self
._update
_suboperation
_status
(
7176 db_nslcmop
, op_index
, result
, result_detail
7179 if result
== "FAILED":
7180 raise LcmException(result_detail
)
7181 db_nsr_update
["config-status"] = old_config_status
7182 scale_process
= None
7184 # Check if each vnf has exporter for metric collection if so update prometheus job records
7185 if scaling_type
== "SCALE_OUT":
7186 if "exporters-endpoints" in db_vnfd
.get("df")[0]:
7187 vnfr_id
= db_vnfr
["id"]
7188 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
7189 exporter_config
= db_vnfd
.get("df")[0].get("exporters-endpoints")
7190 self
.logger
.debug("exporter config :{}".format(exporter_config
))
7191 artifact_path
= "{}/{}/{}".format(
7192 base_folder
["folder"],
7193 base_folder
["pkg-dir"],
7194 "exporter-endpoint",
7197 ee_config_descriptor
= exporter_config
7198 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
7202 vdu_id
=db_vnfr
["vdur"][-1]["vdu-id-ref"],
7203 vdu_index
=db_vnfr
["vdur"][-1]["count-index"],
7207 self
.logger
.debug("rw_mgmt_ip:{}".format(rw_mgmt_ip
))
7208 self
.logger
.debug("Artifact_path:{}".format(artifact_path
))
7209 vdu_id_for_prom
= None
7210 vdu_index_for_prom
= None
7211 for x
in get_iterable(db_vnfr
, "vdur"):
7212 vdu_id_for_prom
= x
.get("vdu-id-ref")
7213 vdu_index_for_prom
= x
.get("count-index")
7214 vnfr_id
= vnfr_id
+ vdu_id
+ str(vdu_index
)
7215 vnfr_id
= vnfr_id
.replace("_", "")
7216 prometheus_jobs
= await self
.extract_prometheus_scrape_jobs(
7218 artifact_path
=artifact_path
,
7219 ee_config_descriptor
=ee_config_descriptor
,
7222 target_ip
=rw_mgmt_ip
,
7224 vdu_id
=vdu_id_for_prom
,
7225 vdu_index
=vdu_index_for_prom
,
7228 self
.logger
.debug("Prometheus job:{}".format(prometheus_jobs
))
7231 "_admin.deployed.prometheus_jobs"
7239 for job
in prometheus_jobs
:
7245 fail_on_empty
=False,
7249 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
7250 db_nsr_update
["operational-status"] = (
7252 if old_operational_status
== "failed"
7253 else old_operational_status
7255 db_nsr_update
["config-status"] = old_config_status
7258 ROclient
.ROClientException
,
7263 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
7265 except asyncio
.CancelledError
:
7267 logging_text
+ "Cancelled Exception while '{}'".format(step
)
7269 exc
= "Operation was cancelled"
7270 except Exception as e
:
7271 exc
= traceback
.format_exc()
7272 self
.logger
.critical(
7273 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
7279 error_list
.append(str(exc
))
7280 self
._write
_ns
_status
(
7283 current_operation
="IDLE",
7284 current_operation_id
=None,
7288 stage
[1] = "Waiting for instantiate pending tasks."
7289 self
.logger
.debug(logging_text
+ stage
[1])
7290 exc
= await self
._wait
_for
_tasks
(
7293 self
.timeout
.ns_deploy
,
7298 except asyncio
.CancelledError
:
7299 error_list
.append("Cancelled")
7300 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
7301 await self
._wait
_for
_tasks
(
7304 self
.timeout
.ns_deploy
,
7310 error_detail
= "; ".join(error_list
)
7313 ] = error_description_nslcmop
= "FAILED {}: {}".format(
7316 nslcmop_operation_state
= "FAILED"
7318 db_nsr_update
["operational-status"] = old_operational_status
7319 db_nsr_update
["config-status"] = old_config_status
7320 db_nsr_update
["detailed-status"] = ""
7322 if "VCA" in scale_process
:
7323 db_nsr_update
["config-status"] = "failed"
7324 if "RO" in scale_process
:
7325 db_nsr_update
["operational-status"] = "failed"
7328 ] = "FAILED scaling nslcmop={} {}: {}".format(
7329 nslcmop_id
, step
, error_detail
7332 error_description_nslcmop
= None
7333 nslcmop_operation_state
= "COMPLETED"
7334 db_nslcmop_update
["detailed-status"] = "Done"
7335 if scaling_type
== "SCALE_IN" and prom_job_name
is not None:
7338 {"job_name": prom_job_name
},
7339 fail_on_empty
=False,
7342 self
._write
_op
_status
(
7345 error_message
=error_description_nslcmop
,
7346 operation_state
=nslcmop_operation_state
,
7347 other_update
=db_nslcmop_update
,
7350 self
._write
_ns
_status
(
7353 current_operation
="IDLE",
7354 current_operation_id
=None,
7355 other_update
=db_nsr_update
,
7358 if nslcmop_operation_state
:
7362 "nslcmop_id": nslcmop_id
,
7363 "operationState": nslcmop_operation_state
,
7365 await self
.msg
.aiowrite("ns", "scaled", msg
)
7366 except Exception as e
:
7368 logging_text
+ "kafka_write notification Exception {}".format(e
)
7370 self
.logger
.debug(logging_text
+ "Exit")
7371 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_scale")
7373 async def _scale_kdu(
7374 self
, logging_text
, nsr_id
, nsr_deployed
, db_vnfd
, vca_id
, scaling_info
7376 _scaling_info
= scaling_info
.get("kdu-create") or scaling_info
.get("kdu-delete")
7377 for kdu_name
in _scaling_info
:
7378 for kdu_scaling_info
in _scaling_info
[kdu_name
]:
7379 deployed_kdu
, index
= get_deployed_kdu(
7380 nsr_deployed
, kdu_name
, kdu_scaling_info
["member-vnf-index"]
7382 cluster_uuid
= deployed_kdu
["k8scluster-uuid"]
7383 kdu_instance
= deployed_kdu
["kdu-instance"]
7384 kdu_model
= deployed_kdu
.get("kdu-model")
7385 scale
= int(kdu_scaling_info
["scale"])
7386 k8s_cluster_type
= kdu_scaling_info
["k8s-cluster-type"]
7389 "collection": "nsrs",
7390 "filter": {"_id": nsr_id
},
7391 "path": "_admin.deployed.K8s.{}".format(index
),
7394 step
= "scaling application {}".format(
7395 kdu_scaling_info
["resource-name"]
7397 self
.logger
.debug(logging_text
+ step
)
7399 if kdu_scaling_info
["type"] == "delete":
7400 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7403 and kdu_config
.get("terminate-config-primitive")
7404 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7406 terminate_config_primitive_list
= kdu_config
.get(
7407 "terminate-config-primitive"
7409 terminate_config_primitive_list
.sort(
7410 key
=lambda val
: int(val
["seq"])
7414 terminate_config_primitive
7415 ) in terminate_config_primitive_list
:
7416 primitive_params_
= self
._map
_primitive
_params
(
7417 terminate_config_primitive
, {}, {}
7419 step
= "execute terminate config primitive"
7420 self
.logger
.debug(logging_text
+ step
)
7421 await asyncio
.wait_for(
7422 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7423 cluster_uuid
=cluster_uuid
,
7424 kdu_instance
=kdu_instance
,
7425 primitive_name
=terminate_config_primitive
["name"],
7426 params
=primitive_params_
,
7428 total_timeout
=self
.timeout
.primitive
,
7431 timeout
=self
.timeout
.primitive
7432 * self
.timeout
.primitive_outer_factor
,
7435 await asyncio
.wait_for(
7436 self
.k8scluster_map
[k8s_cluster_type
].scale(
7437 kdu_instance
=kdu_instance
,
7439 resource_name
=kdu_scaling_info
["resource-name"],
7440 total_timeout
=self
.timeout
.scale_on_error
,
7442 cluster_uuid
=cluster_uuid
,
7443 kdu_model
=kdu_model
,
7447 timeout
=self
.timeout
.scale_on_error
7448 * self
.timeout
.scale_on_error_outer_factor
,
7451 if kdu_scaling_info
["type"] == "create":
7452 kdu_config
= get_configuration(db_vnfd
, kdu_name
)
7455 and kdu_config
.get("initial-config-primitive")
7456 and get_juju_ee_ref(db_vnfd
, kdu_name
) is None
7458 initial_config_primitive_list
= kdu_config
.get(
7459 "initial-config-primitive"
7461 initial_config_primitive_list
.sort(
7462 key
=lambda val
: int(val
["seq"])
7465 for initial_config_primitive
in initial_config_primitive_list
:
7466 primitive_params_
= self
._map
_primitive
_params
(
7467 initial_config_primitive
, {}, {}
7469 step
= "execute initial config primitive"
7470 self
.logger
.debug(logging_text
+ step
)
7471 await asyncio
.wait_for(
7472 self
.k8scluster_map
[k8s_cluster_type
].exec_primitive(
7473 cluster_uuid
=cluster_uuid
,
7474 kdu_instance
=kdu_instance
,
7475 primitive_name
=initial_config_primitive
["name"],
7476 params
=primitive_params_
,
7483 async def _scale_ng_ro(
7484 self
, logging_text
, db_nsr
, db_nslcmop
, db_vnfr
, vdu_scaling_info
, stage
7486 nsr_id
= db_nslcmop
["nsInstanceId"]
7487 db_nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
7490 # read from db: vnfd's for every vnf
7493 # for each vnf in ns, read vnfd
7494 for vnfr
in self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
}):
7495 db_vnfrs
[vnfr
["member-vnf-index-ref"]] = vnfr
7496 vnfd_id
= vnfr
["vnfd-id"] # vnfd uuid for this vnf
7497 # if we haven't this vnfd, read it from db
7498 if not find_in_list(db_vnfds
, lambda a_vnfd
: a_vnfd
["id"] == vnfd_id
):
7500 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
7501 db_vnfds
.append(vnfd
)
7502 n2vc_key
= self
.n2vc
.get_public_key()
7503 n2vc_key_list
= [n2vc_key
]
7506 vdu_scaling_info
.get("vdu-create"),
7507 vdu_scaling_info
.get("vdu-delete"),
7510 # db_vnfr has been updated, update db_vnfrs to use it
7511 db_vnfrs
[db_vnfr
["member-vnf-index-ref"]] = db_vnfr
7512 await self
._instantiate
_ng
_ro
(
7522 start_deploy
=time(),
7523 timeout_ns_deploy
=self
.timeout
.ns_deploy
,
7525 if vdu_scaling_info
.get("vdu-delete"):
7527 db_vnfr
, None, vdu_scaling_info
["vdu-delete"], mark_delete
=False
7530 async def extract_prometheus_scrape_jobs(
7534 ee_config_descriptor
: dict,
7539 vnf_member_index
: str = "",
7541 vdu_index
: int = None,
7543 kdu_index
: int = None,
7545 """Method to extract prometheus scrape jobs from EE's Prometheus template job file
7546 This method will wait until the corresponding VDU or KDU is fully instantiated
7549 ee_id (str): Execution Environment ID
7550 artifact_path (str): Path where the EE's content is (including the Prometheus template file)
7551 ee_config_descriptor (dict): Execution Environment's configuration descriptor
7552 vnfr_id (str): VNFR ID where this EE applies
7553 nsr_id (str): NSR ID where this EE applies
7554 target_ip (str): VDU/KDU instance IP address
7555 element_type (str): NS or VNF or VDU or KDU
7556 vnf_member_index (str, optional): VNF index where this EE applies. Defaults to "".
7557 vdu_id (str, optional): VDU ID where this EE applies. Defaults to "".
7558 vdu_index (int, optional): VDU index where this EE applies. Defaults to None.
7559 kdu_name (str, optional): KDU name where this EE applies. Defaults to "".
7560 kdu_index (int, optional): KDU index where this EE applies. Defaults to None.
7563 LcmException: When the VDU or KDU instance was not found in an hour
7566 _type_: Prometheus jobs
7568 # default the vdur and kdur names to an empty string, to avoid any later
7569 # problem with Prometheus when the element type is not VDU or KDU
7573 # look if exist a file called 'prometheus*.j2' and
7574 artifact_content
= self
.fs
.dir_ls(artifact_path
)
7578 for f
in artifact_content
7579 if f
.startswith("prometheus") and f
.endswith(".j2")
7585 self
.logger
.debug("Artifact path{}".format(artifact_path
))
7586 self
.logger
.debug("job file{}".format(job_file
))
7587 with self
.fs
.file_open((artifact_path
, job_file
), "r") as f
:
7590 # obtain the VDUR or KDUR, if the element type is VDU or KDU
7591 if element_type
in ("VDU", "KDU"):
7592 for _
in range(360):
7593 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnfr_id
})
7594 if vdu_id
and vdu_index
is not None:
7598 for x
in get_iterable(db_vnfr
, "vdur")
7600 x
.get("vdu-id-ref") == vdu_id
7601 and x
.get("count-index") == vdu_index
7606 if vdur
.get("name"):
7607 vdur_name
= vdur
.get("name")
7609 if kdu_name
and kdu_index
is not None:
7613 for x
in get_iterable(db_vnfr
, "kdur")
7615 x
.get("kdu-name") == kdu_name
7616 and x
.get("count-index") == kdu_index
7621 if kdur
.get("name"):
7622 kdur_name
= kdur
.get("name")
7625 await asyncio
.sleep(10)
7627 if vdu_id
and vdu_index
is not None:
7629 f
"Timeout waiting VDU with name={vdu_id} and index={vdu_index} to be intantiated"
7631 if kdu_name
and kdu_index
is not None:
7633 f
"Timeout waiting KDU with name={kdu_name} and index={kdu_index} to be intantiated"
7636 if ee_id
is not None:
7637 _
, namespace
, helm_id
= get_ee_id_parts(
7639 ) # get namespace and EE gRPC service name
7640 host_name
= f
'{helm_id}-{ee_config_descriptor["metric-service"]}.{namespace}.svc' # svc_name.namespace.svc
7642 vnfr_id
= vnfr_id
.replace("-", "")
7644 "JOB_NAME": vnfr_id
,
7645 "TARGET_IP": target_ip
,
7646 "EXPORTER_POD_IP": host_name
,
7647 "EXPORTER_POD_PORT": host_port
,
7649 "VNF_MEMBER_INDEX": vnf_member_index
,
7650 "VDUR_NAME": vdur_name
,
7651 "KDUR_NAME": kdur_name
,
7652 "ELEMENT_TYPE": element_type
,
7655 metric_path
= ee_config_descriptor
["metric-path"]
7656 target_port
= ee_config_descriptor
["metric-port"]
7657 vnfr_id
= vnfr_id
.replace("-", "")
7659 "JOB_NAME": vnfr_id
,
7660 "TARGET_IP": target_ip
,
7661 "TARGET_PORT": target_port
,
7662 "METRIC_PATH": metric_path
,
7665 job_list
= parse_job(job_data
, variables
)
7666 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7667 for job
in job_list
:
7669 not isinstance(job
.get("job_name"), str)
7670 or vnfr_id
not in job
["job_name"]
7672 job
["job_name"] = vnfr_id
+ "_" + str(SystemRandom().randint(1, 10000))
7673 job
["nsr_id"] = nsr_id
7674 job
["vnfr_id"] = vnfr_id
7677 async def rebuild_start_stop(
7678 self
, nsr_id
, nslcmop_id
, vnf_id
, additional_param
, operation_type
7680 logging_text
= "Task ns={} {}={} ".format(nsr_id
, operation_type
, nslcmop_id
)
7681 self
.logger
.info(logging_text
+ "Enter")
7682 stage
= ["Preparing the environment", ""]
7683 # database nsrs record
7687 # in case of error, indicates what part of scale was failed to put nsr at error status
7688 start_deploy
= time()
7690 db_vnfr
= self
.db
.get_one("vnfrs", {"_id": vnf_id
})
7691 vim_account_id
= db_vnfr
.get("vim-account-id")
7692 vim_info_key
= "vim:" + vim_account_id
7693 vdu_id
= additional_param
["vdu_id"]
7694 vdurs
= [item
for item
in db_vnfr
["vdur"] if item
["vdu-id-ref"] == vdu_id
]
7695 vdur
= find_in_list(
7696 vdurs
, lambda vdu
: vdu
["count-index"] == additional_param
["count-index"]
7699 vdu_vim_name
= vdur
["name"]
7700 vim_vm_id
= vdur
["vim_info"][vim_info_key
]["vim_id"]
7701 target_vim
, _
= next(k_v
for k_v
in vdur
["vim_info"].items())
7703 raise LcmException("Target vdu is not found")
7704 self
.logger
.info("vdu_vim_name >> {} ".format(vdu_vim_name
))
7705 # wait for any previous tasks in process
7706 stage
[1] = "Waiting for previous operations to terminate"
7707 self
.logger
.info(stage
[1])
7708 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7710 stage
[1] = "Reading from database."
7711 self
.logger
.info(stage
[1])
7712 self
._write
_ns
_status
(
7715 current_operation
=operation_type
.upper(),
7716 current_operation_id
=nslcmop_id
,
7718 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7721 stage
[1] = "Getting nsr={} from db.".format(nsr_id
)
7722 db_nsr_update
["operational-status"] = operation_type
7723 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7727 "vim_vm_id": vim_vm_id
,
7729 "vdu_index": additional_param
["count-index"],
7730 "vdu_id": vdur
["id"],
7731 "target_vim": target_vim
,
7732 "vim_account_id": vim_account_id
,
7735 stage
[1] = "Sending rebuild request to RO... {}".format(desc
)
7736 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
, queuePosition
=0)
7737 self
.logger
.info("ro nsr id: {}".format(nsr_id
))
7738 result_dict
= await self
.RO
.operate(nsr_id
, desc
, operation_type
)
7739 self
.logger
.info("response from RO: {}".format(result_dict
))
7740 action_id
= result_dict
["action_id"]
7741 await self
._wait
_ng
_ro
(
7746 self
.timeout
.operate
,
7748 "start_stop_rebuild",
7750 return "COMPLETED", "Done"
7751 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7752 self
.logger
.error("Exit Exception {}".format(e
))
7754 except asyncio
.CancelledError
:
7755 self
.logger
.error("Cancelled Exception while '{}'".format(stage
))
7756 exc
= "Operation was cancelled"
7757 except Exception as e
:
7758 exc
= traceback
.format_exc()
7759 self
.logger
.critical(
7760 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7762 return "FAILED", "Error in operate VNF {}".format(exc
)
7764 async def migrate(self
, nsr_id
, nslcmop_id
):
7766 Migrate VNFs and VDUs instances in a NS
7768 :param: nsr_id: NS Instance ID
7769 :param: nslcmop_id: nslcmop ID of migrate
7772 # Try to lock HA task here
7773 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7774 if not task_is_locked_by_me
:
7776 logging_text
= "Task ns={} migrate ".format(nsr_id
)
7777 self
.logger
.debug(logging_text
+ "Enter")
7778 # get all needed from database
7780 db_nslcmop_update
= {}
7781 nslcmop_operation_state
= None
7785 # in case of error, indicates what part of scale was failed to put nsr at error status
7786 start_deploy
= time()
7789 # wait for any previous tasks in process
7790 step
= "Waiting for previous operations to terminate"
7791 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7793 self
._write
_ns
_status
(
7796 current_operation
="MIGRATING",
7797 current_operation_id
=nslcmop_id
,
7799 step
= "Getting nslcmop from database"
7801 step
+ " after having waited for previous tasks to be completed"
7803 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
7804 migrate_params
= db_nslcmop
.get("operationParams")
7807 target
.update(migrate_params
)
7808 desc
= await self
.RO
.migrate(nsr_id
, target
)
7809 self
.logger
.debug("RO return > {}".format(desc
))
7810 action_id
= desc
["action_id"]
7811 await self
._wait
_ng
_ro
(
7816 self
.timeout
.migrate
,
7817 operation
="migrate",
7819 except (ROclient
.ROClientException
, DbException
, LcmException
) as e
:
7820 self
.logger
.error("Exit Exception {}".format(e
))
7822 except asyncio
.CancelledError
:
7823 self
.logger
.error("Cancelled Exception while '{}'".format(step
))
7824 exc
= "Operation was cancelled"
7825 except Exception as e
:
7826 exc
= traceback
.format_exc()
7827 self
.logger
.critical(
7828 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
7831 self
._write
_ns
_status
(
7834 current_operation
="IDLE",
7835 current_operation_id
=None,
7838 db_nslcmop_update
["detailed-status"] = "FAILED {}: {}".format(step
, exc
)
7839 nslcmop_operation_state
= "FAILED"
7841 nslcmop_operation_state
= "COMPLETED"
7842 db_nslcmop_update
["detailed-status"] = "Done"
7843 db_nsr_update
["detailed-status"] = "Done"
7845 self
._write
_op
_status
(
7849 operation_state
=nslcmop_operation_state
,
7850 other_update
=db_nslcmop_update
,
7852 if nslcmop_operation_state
:
7856 "nslcmop_id": nslcmop_id
,
7857 "operationState": nslcmop_operation_state
,
7859 await self
.msg
.aiowrite("ns", "migrated", msg
)
7860 except Exception as e
:
7862 logging_text
+ "kafka_write notification Exception {}".format(e
)
7864 self
.logger
.debug(logging_text
+ "Exit")
7865 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_migrate")
7867 async def heal(self
, nsr_id
, nslcmop_id
):
7871 :param nsr_id: ns instance to heal
7872 :param nslcmop_id: operation to run
7876 # Try to lock HA task here
7877 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
7878 if not task_is_locked_by_me
:
7881 logging_text
= "Task ns={} heal={} ".format(nsr_id
, nslcmop_id
)
7882 stage
= ["", "", ""]
7883 tasks_dict_info
= {}
7884 # ^ stage, step, VIM progress
7885 self
.logger
.debug(logging_text
+ "Enter")
7886 # get all needed from database
7888 db_nslcmop_update
= {}
7890 db_vnfrs
= {} # vnf's info indexed by _id
7892 old_operational_status
= ""
7893 old_config_status
= ""
7896 # wait for any previous tasks in process
7897 step
= "Waiting for previous operations to terminate"
7898 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
7899 self
._write
_ns
_status
(
7902 current_operation
="HEALING",
7903 current_operation_id
=nslcmop_id
,
7906 step
= "Getting nslcmop from database"
7908 step
+ " after having waited for previous tasks to be completed"
7910 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
7912 step
= "Getting nsr from database"
7913 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
7914 old_operational_status
= db_nsr
["operational-status"]
7915 old_config_status
= db_nsr
["config-status"]
7918 "_admin.deployed.RO.operational-status": "healing",
7920 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
7922 step
= "Sending heal order to VIM"
7924 logging_text
=logging_text
,
7926 db_nslcmop
=db_nslcmop
,
7931 stage
[1] = "Getting nsd={} from db.".format(db_nsr
["nsd-id"])
7932 self
.logger
.debug(logging_text
+ stage
[1])
7933 nsd
= self
.db
.get_one("nsds", {"_id": db_nsr
["nsd-id"]})
7934 self
.fs
.sync(db_nsr
["nsd-id"])
7936 # read from db: vnfr's of this ns
7937 step
= "Getting vnfrs from db"
7938 db_vnfrs_list
= self
.db
.get_list("vnfrs", {"nsr-id-ref": nsr_id
})
7939 for vnfr
in db_vnfrs_list
:
7940 db_vnfrs
[vnfr
["_id"]] = vnfr
7941 self
.logger
.debug("ns.heal db_vnfrs={}".format(db_vnfrs
))
7943 # Check for each target VNF
7944 target_list
= db_nslcmop
.get("operationParams", {}).get("healVnfData", {})
7945 for target_vnf
in target_list
:
7946 # Find this VNF in the list from DB
7947 vnfr_id
= target_vnf
.get("vnfInstanceId", None)
7949 db_vnfr
= db_vnfrs
[vnfr_id
]
7950 vnfd_id
= db_vnfr
.get("vnfd-id")
7951 vnfd_ref
= db_vnfr
.get("vnfd-ref")
7952 vnfd
= self
.db
.get_one("vnfds", {"_id": vnfd_id
})
7953 base_folder
= vnfd
["_admin"]["storage"]
7958 nsi_id
= None # TODO put nsi_id when this nsr belongs to a NSI
7959 member_vnf_index
= db_vnfr
.get("member-vnf-index-ref")
7961 # Check each target VDU and deploy N2VC
7962 target_vdu_list
= target_vnf
.get("additionalParams", {}).get(
7965 if not target_vdu_list
:
7966 # Codigo nuevo para crear diccionario
7967 target_vdu_list
= []
7968 for existing_vdu
in db_vnfr
.get("vdur"):
7969 vdu_name
= existing_vdu
.get("vdu-name", None)
7970 vdu_index
= existing_vdu
.get("count-index", 0)
7971 vdu_run_day1
= target_vnf
.get("additionalParams", {}).get(
7974 vdu_to_be_healed
= {
7976 "count-index": vdu_index
,
7977 "run-day1": vdu_run_day1
,
7979 target_vdu_list
.append(vdu_to_be_healed
)
7980 for target_vdu
in target_vdu_list
:
7981 deploy_params_vdu
= target_vdu
7982 # Set run-day1 vnf level value if not vdu level value exists
7983 if not deploy_params_vdu
.get("run-day1") and target_vnf
.get(
7984 "additionalParams", {}
7986 deploy_params_vdu
["run-day1"] = target_vnf
[
7989 vdu_name
= target_vdu
.get("vdu-id", None)
7990 # TODO: Get vdu_id from vdud.
7992 # For multi instance VDU count-index is mandatory
7993 # For single session VDU count-indes is 0
7994 vdu_index
= target_vdu
.get("count-index", 0)
7996 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7997 stage
[1] = "Deploying Execution Environments."
7998 self
.logger
.debug(logging_text
+ stage
[1])
8000 # VNF Level charm. Normal case when proxy charms.
8001 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
8002 descriptor_config
= get_configuration(vnfd
, vnfd_ref
)
8003 if descriptor_config
:
8004 # Continue if healed machine is management machine
8005 vnf_ip_address
= db_vnfr
.get("ip-address")
8006 target_instance
= None
8007 for instance
in db_vnfr
.get("vdur", None):
8009 instance
["vdu-name"] == vdu_name
8010 and instance
["count-index"] == vdu_index
8012 target_instance
= instance
8014 if vnf_ip_address
== target_instance
.get("ip-address"):
8016 logging_text
=logging_text
8017 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8018 member_vnf_index
, vdu_name
, vdu_index
8022 nslcmop_id
=nslcmop_id
,
8028 member_vnf_index
=member_vnf_index
,
8031 deploy_params
=deploy_params_vdu
,
8032 descriptor_config
=descriptor_config
,
8033 base_folder
=base_folder
,
8034 task_instantiation_info
=tasks_dict_info
,
8038 # VDU Level charm. Normal case with native charms.
8039 descriptor_config
= get_configuration(vnfd
, vdu_name
)
8040 if descriptor_config
:
8042 logging_text
=logging_text
8043 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
8044 member_vnf_index
, vdu_name
, vdu_index
8048 nslcmop_id
=nslcmop_id
,
8054 member_vnf_index
=member_vnf_index
,
8055 vdu_index
=vdu_index
,
8057 deploy_params
=deploy_params_vdu
,
8058 descriptor_config
=descriptor_config
,
8059 base_folder
=base_folder
,
8060 task_instantiation_info
=tasks_dict_info
,
8065 ROclient
.ROClientException
,
8070 self
.logger
.error(logging_text
+ "Exit Exception {}".format(e
))
8072 except asyncio
.CancelledError
:
8074 logging_text
+ "Cancelled Exception while '{}'".format(step
)
8076 exc
= "Operation was cancelled"
8077 except Exception as e
:
8078 exc
= traceback
.format_exc()
8079 self
.logger
.critical(
8080 logging_text
+ "Exit Exception {} {}".format(type(e
).__name
__, e
),
8086 error_list
.append(str(exc
))
8089 stage
[1] = "Waiting for healing pending tasks."
8090 self
.logger
.debug(logging_text
+ stage
[1])
8091 exc
= await self
._wait
_for
_tasks
(
8094 self
.timeout
.ns_deploy
,
8099 except asyncio
.CancelledError
:
8100 error_list
.append("Cancelled")
8101 await self
._cancel
_pending
_tasks
(logging_text
, tasks_dict_info
)
8102 await self
._wait
_for
_tasks
(
8105 self
.timeout
.ns_deploy
,
8111 error_detail
= "; ".join(error_list
)
8114 ] = error_description_nslcmop
= "FAILED {}: {}".format(
8117 nslcmop_operation_state
= "FAILED"
8119 db_nsr_update
["operational-status"] = old_operational_status
8120 db_nsr_update
["config-status"] = old_config_status
8123 ] = "FAILED healing nslcmop={} {}: {}".format(
8124 nslcmop_id
, step
, error_detail
8126 for task
, task_name
in tasks_dict_info
.items():
8127 if not task
.done() or task
.cancelled() or task
.exception():
8128 if task_name
.startswith(self
.task_name_deploy_vca
):
8129 # A N2VC task is pending
8130 db_nsr_update
["config-status"] = "failed"
8132 # RO task is pending
8133 db_nsr_update
["operational-status"] = "failed"
8135 error_description_nslcmop
= None
8136 nslcmop_operation_state
= "COMPLETED"
8137 db_nslcmop_update
["detailed-status"] = "Done"
8138 db_nsr_update
["detailed-status"] = "Done"
8139 db_nsr_update
["operational-status"] = "running"
8140 db_nsr_update
["config-status"] = "configured"
8142 self
._write
_op
_status
(
8145 error_message
=error_description_nslcmop
,
8146 operation_state
=nslcmop_operation_state
,
8147 other_update
=db_nslcmop_update
,
8150 self
._write
_ns
_status
(
8153 current_operation
="IDLE",
8154 current_operation_id
=None,
8155 other_update
=db_nsr_update
,
8158 if nslcmop_operation_state
:
8162 "nslcmop_id": nslcmop_id
,
8163 "operationState": nslcmop_operation_state
,
8165 await self
.msg
.aiowrite("ns", "healed", msg
)
8166 except Exception as e
:
8168 logging_text
+ "kafka_write notification Exception {}".format(e
)
8170 self
.logger
.debug(logging_text
+ "Exit")
8171 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_heal")
8182 :param logging_text: preffix text to use at logging
8183 :param nsr_id: nsr identity
8184 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
8185 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
8186 :return: None or exception
8189 def get_vim_account(vim_account_id
):
8191 if vim_account_id
in db_vims
:
8192 return db_vims
[vim_account_id
]
8193 db_vim
= self
.db
.get_one("vim_accounts", {"_id": vim_account_id
})
8194 db_vims
[vim_account_id
] = db_vim
8199 ns_params
= db_nslcmop
.get("operationParams")
8200 if ns_params
and ns_params
.get("timeout_ns_heal"):
8201 timeout_ns_heal
= ns_params
["timeout_ns_heal"]
8203 timeout_ns_heal
= self
.timeout
.ns_heal
8207 nslcmop_id
= db_nslcmop
["_id"]
8209 "action_id": nslcmop_id
,
8211 self
.logger
.warning(
8212 "db_nslcmop={} and timeout_ns_heal={}".format(
8213 db_nslcmop
, timeout_ns_heal
8216 target
.update(db_nslcmop
.get("operationParams", {}))
8218 self
.logger
.debug("Send to RO > nsr_id={} target={}".format(nsr_id
, target
))
8219 desc
= await self
.RO
.recreate(nsr_id
, target
)
8220 self
.logger
.debug("RO return > {}".format(desc
))
8221 action_id
= desc
["action_id"]
8222 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
8223 await self
._wait
_ng
_ro
(
8230 operation
="healing",
8235 "_admin.deployed.RO.operational-status": "running",
8236 "detailed-status": " ".join(stage
),
8238 self
.update_db_2("nsrs", nsr_id
, db_nsr_update
)
8239 self
._write
_op
_status
(nslcmop_id
, stage
)
8241 logging_text
+ "ns healed at RO. RO_id={}".format(action_id
)
8244 except Exception as e
:
8245 stage
[2] = "ERROR healing at VIM"
8246 # self.set_vnfr_at_error(db_vnfrs, str(e))
8248 "Error healing at VIM {}".format(e
),
8249 exc_info
=not isinstance(
8252 ROclient
.ROClientException
,
8278 task_instantiation_info
,
8281 # launch instantiate_N2VC in a asyncio task and register task object
8282 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
8283 # if not found, create one entry and update database
8284 # fill db_nsr._admin.deployed.VCA.<index>
8287 logging_text
+ "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id
, vdu_id
)
8291 get_charm_name
= False
8292 if "execution-environment-list" in descriptor_config
:
8293 ee_list
= descriptor_config
.get("execution-environment-list", [])
8294 elif "juju" in descriptor_config
:
8295 ee_list
= [descriptor_config
] # ns charms
8296 if "execution-environment-list" not in descriptor_config
:
8297 # charm name is only required for ns charms
8298 get_charm_name
= True
8299 else: # other types as script are not supported
8302 for ee_item
in ee_list
:
8305 + "_deploy_n2vc ee_item juju={}, helm={}".format(
8306 ee_item
.get("juju"), ee_item
.get("helm-chart")
8309 ee_descriptor_id
= ee_item
.get("id")
8310 if ee_item
.get("juju"):
8311 vca_name
= ee_item
["juju"].get("charm")
8313 charm_name
= self
.find_charm_name(db_nsr
, str(vca_name
))
8316 if ee_item
["juju"].get("charm") is not None
8319 if ee_item
["juju"].get("cloud") == "k8s":
8320 vca_type
= "k8s_proxy_charm"
8321 elif ee_item
["juju"].get("proxy") is False:
8322 vca_type
= "native_charm"
8323 elif ee_item
.get("helm-chart"):
8324 vca_name
= ee_item
["helm-chart"]
8325 vca_type
= "helm-v3"
8328 logging_text
+ "skipping non juju neither charm configuration"
8333 for vca_index
, vca_deployed
in enumerate(
8334 db_nsr
["_admin"]["deployed"]["VCA"]
8336 if not vca_deployed
:
8339 vca_deployed
.get("member-vnf-index") == member_vnf_index
8340 and vca_deployed
.get("vdu_id") == vdu_id
8341 and vca_deployed
.get("kdu_name") == kdu_name
8342 and vca_deployed
.get("vdu_count_index", 0) == vdu_index
8343 and vca_deployed
.get("ee_descriptor_id") == ee_descriptor_id
8347 # not found, create one.
8349 "ns" if not member_vnf_index
else "vnf/{}".format(member_vnf_index
)
8352 target
+= "/vdu/{}/{}".format(vdu_id
, vdu_index
or 0)
8354 target
+= "/kdu/{}".format(kdu_name
)
8356 "target_element": target
,
8357 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
8358 "member-vnf-index": member_vnf_index
,
8360 "kdu_name": kdu_name
,
8361 "vdu_count_index": vdu_index
,
8362 "operational-status": "init", # TODO revise
8363 "detailed-status": "", # TODO revise
8364 "step": "initial-deploy", # TODO revise
8366 "vdu_name": vdu_name
,
8368 "ee_descriptor_id": ee_descriptor_id
,
8369 "charm_name": charm_name
,
8373 # create VCA and configurationStatus in db
8375 "_admin.deployed.VCA.{}".format(vca_index
): vca_deployed
,
8376 "configurationStatus.{}".format(vca_index
): dict(),
8378 self
.update_db_2("nsrs", nsr_id
, db_dict
)
8380 db_nsr
["_admin"]["deployed"]["VCA"].append(vca_deployed
)
8382 self
.logger
.debug("N2VC > NSR_ID > {}".format(nsr_id
))
8383 self
.logger
.debug("N2VC > DB_NSR > {}".format(db_nsr
))
8384 self
.logger
.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed
))
8387 task_n2vc
= asyncio
.ensure_future(
8389 logging_text
=logging_text
,
8390 vca_index
=vca_index
,
8396 vdu_index
=vdu_index
,
8397 deploy_params
=deploy_params
,
8398 config_descriptor
=descriptor_config
,
8399 base_folder
=base_folder
,
8400 nslcmop_id
=nslcmop_id
,
8404 ee_config_descriptor
=ee_item
,
8407 self
.lcm_tasks
.register(
8411 "instantiate_N2VC-{}".format(vca_index
),
8414 task_instantiation_info
[
8416 ] = self
.task_name_deploy_vca
+ " {}.{}".format(
8417 member_vnf_index
or "", vdu_id
or ""
8420 async def heal_N2VC(
8437 ee_config_descriptor
,
8439 nsr_id
= db_nsr
["_id"]
8440 db_update_entry
= "_admin.deployed.VCA.{}.".format(vca_index
)
8441 vca_deployed_list
= db_nsr
["_admin"]["deployed"]["VCA"]
8442 vca_deployed
= db_nsr
["_admin"]["deployed"]["VCA"][vca_index
]
8443 osm_config
= {"osm": {"ns_id": db_nsr
["_id"]}}
8445 "collection": "nsrs",
8446 "filter": {"_id": nsr_id
},
8447 "path": db_update_entry
,
8452 element_under_configuration
= nsr_id
8456 vnfr_id
= db_vnfr
["_id"]
8457 osm_config
["osm"]["vnf_id"] = vnfr_id
8459 namespace
= "{nsi}.{ns}".format(nsi
=nsi_id
if nsi_id
else "", ns
=nsr_id
)
8461 if vca_type
== "native_charm":
8464 index_number
= vdu_index
or 0
8467 element_type
= "VNF"
8468 element_under_configuration
= vnfr_id
8469 namespace
+= ".{}-{}".format(vnfr_id
, index_number
)
8471 namespace
+= ".{}-{}".format(vdu_id
, index_number
)
8472 element_type
= "VDU"
8473 element_under_configuration
= "{}-{}".format(vdu_id
, index_number
)
8474 osm_config
["osm"]["vdu_id"] = vdu_id
8476 namespace
+= ".{}".format(kdu_name
)
8477 element_type
= "KDU"
8478 element_under_configuration
= kdu_name
8479 osm_config
["osm"]["kdu_name"] = kdu_name
8482 if base_folder
["pkg-dir"]:
8483 artifact_path
= "{}/{}/{}/{}".format(
8484 base_folder
["folder"],
8485 base_folder
["pkg-dir"],
8488 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8493 artifact_path
= "{}/Scripts/{}/{}/".format(
8494 base_folder
["folder"],
8497 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8502 self
.logger
.debug("Artifact path > {}".format(artifact_path
))
8504 # get initial_config_primitive_list that applies to this element
8505 initial_config_primitive_list
= config_descriptor
.get(
8506 "initial-config-primitive"
8510 "Initial config primitive list > {}".format(
8511 initial_config_primitive_list
8515 # add config if not present for NS charm
8516 ee_descriptor_id
= ee_config_descriptor
.get("id")
8517 self
.logger
.debug("EE Descriptor > {}".format(ee_descriptor_id
))
8518 initial_config_primitive_list
= get_ee_sorted_initial_config_primitive_list(
8519 initial_config_primitive_list
, vca_deployed
, ee_descriptor_id
8523 "Initial config primitive list #2 > {}".format(
8524 initial_config_primitive_list
8527 # n2vc_redesign STEP 3.1
8528 # find old ee_id if exists
8529 ee_id
= vca_deployed
.get("ee_id")
8531 vca_id
= self
.get_vca_id(db_vnfr
, db_nsr
)
8532 # create or register execution environment in VCA. Only for native charms when healing
8533 if vca_type
== "native_charm":
8534 step
= "Waiting to VM being up and getting IP address"
8535 self
.logger
.debug(logging_text
+ step
)
8536 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8545 credentials
= {"hostname": rw_mgmt_ip
}
8547 username
= deep_get(
8548 config_descriptor
, ("config-access", "ssh-access", "default-user")
8550 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8551 # merged. Meanwhile let's get username from initial-config-primitive
8552 if not username
and initial_config_primitive_list
:
8553 for config_primitive
in initial_config_primitive_list
:
8554 for param
in config_primitive
.get("parameter", ()):
8555 if param
["name"] == "ssh-username":
8556 username
= param
["value"]
8560 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8561 "'config-access.ssh-access.default-user'"
8563 credentials
["username"] = username
8565 # n2vc_redesign STEP 3.2
8566 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8567 self
._write
_configuration
_status
(
8569 vca_index
=vca_index
,
8570 status
="REGISTERING",
8571 element_under_configuration
=element_under_configuration
,
8572 element_type
=element_type
,
8575 step
= "register execution environment {}".format(credentials
)
8576 self
.logger
.debug(logging_text
+ step
)
8577 ee_id
= await self
.vca_map
[vca_type
].register_execution_environment(
8578 credentials
=credentials
,
8579 namespace
=namespace
,
8584 # update ee_id en db
8586 "_admin.deployed.VCA.{}.ee_id".format(vca_index
): ee_id
,
8588 self
.update_db_2("nsrs", nsr_id
, db_dict_ee_id
)
8590 # for compatibility with MON/POL modules, the need model and application name at database
8591 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8592 # Not sure if this need to be done when healing
8594 ee_id_parts = ee_id.split(".")
8595 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8596 if len(ee_id_parts) >= 2:
8597 model_name = ee_id_parts[0]
8598 application_name = ee_id_parts[1]
8599 db_nsr_update[db_update_entry + "model"] = model_name
8600 db_nsr_update[db_update_entry + "application"] = application_name
8603 # n2vc_redesign STEP 3.3
8604 # Install configuration software. Only for native charms.
8605 step
= "Install configuration Software"
8607 self
._write
_configuration
_status
(
8609 vca_index
=vca_index
,
8610 status
="INSTALLING SW",
8611 element_under_configuration
=element_under_configuration
,
8612 element_type
=element_type
,
8613 # other_update=db_nsr_update,
8617 # TODO check if already done
8618 self
.logger
.debug(logging_text
+ step
)
8620 if vca_type
== "native_charm":
8621 config_primitive
= next(
8622 (p
for p
in initial_config_primitive_list
if p
["name"] == "config"),
8625 if config_primitive
:
8626 config
= self
._map
_primitive
_params
(
8627 config_primitive
, {}, deploy_params
8629 await self
.vca_map
[vca_type
].install_configuration_sw(
8631 artifact_path
=artifact_path
,
8639 # write in db flag of configuration_sw already installed
8641 "nsrs", nsr_id
, {db_update_entry
+ "config_sw_installed": True}
8644 # Not sure if this need to be done when healing
8646 # add relations for this VCA (wait for other peers related with this VCA)
8647 await self._add_vca_relations(
8648 logging_text=logging_text,
8651 vca_index=vca_index,
8655 # if SSH access is required, then get execution environment SSH public
8656 # if native charm we have waited already to VM be UP
8657 if vca_type
in ("k8s_proxy_charm", "lxc_proxy_charm", "helm-v3"):
8660 # self.logger.debug("get ssh key block")
8662 config_descriptor
, ("config-access", "ssh-access", "required")
8664 # self.logger.debug("ssh key needed")
8665 # Needed to inject a ssh key
8668 ("config-access", "ssh-access", "default-user"),
8670 step
= "Install configuration Software, getting public ssh key"
8671 pub_key
= await self
.vca_map
[vca_type
].get_ee_ssh_public__key(
8672 ee_id
=ee_id
, db_dict
=db_dict
, vca_id
=vca_id
8675 step
= "Insert public key into VM user={} ssh_key={}".format(
8679 # self.logger.debug("no need to get ssh key")
8680 step
= "Waiting to VM being up and getting IP address"
8681 self
.logger
.debug(logging_text
+ step
)
8683 # n2vc_redesign STEP 5.1
8684 # wait for RO (ip-address) Insert pub_key into VM
8685 # IMPORTANT: We need do wait for RO to complete healing operation.
8686 await self
._wait
_heal
_ro
(nsr_id
, self
.timeout
.ns_heal
)
8689 rw_mgmt_ip
= await self
.wait_kdu_up(
8690 logging_text
, nsr_id
, vnfr_id
, kdu_name
8693 rw_mgmt_ip
= await self
.wait_vm_up_insert_key_ro(
8703 rw_mgmt_ip
= None # This is for a NS configuration
8705 self
.logger
.debug(logging_text
+ " VM_ip_address={}".format(rw_mgmt_ip
))
8707 # store rw_mgmt_ip in deploy params for later replacement
8708 deploy_params
["rw_mgmt_ip"] = rw_mgmt_ip
8711 # get run-day1 operation parameter
8712 runDay1
= deploy_params
.get("run-day1", False)
8714 "Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id
, vdu_id
, runDay1
)
8717 # n2vc_redesign STEP 6 Execute initial config primitive
8718 step
= "execute initial config primitive"
8720 # wait for dependent primitives execution (NS -> VNF -> VDU)
8721 if initial_config_primitive_list
:
8722 await self
._wait
_dependent
_n
2vc
(
8723 nsr_id
, vca_deployed_list
, vca_index
8726 # stage, in function of element type: vdu, kdu, vnf or ns
8727 my_vca
= vca_deployed_list
[vca_index
]
8728 if my_vca
.get("vdu_id") or my_vca
.get("kdu_name"):
8730 stage
[0] = "Stage 3/5: running Day-1 primitives for VDU."
8731 elif my_vca
.get("member-vnf-index"):
8733 stage
[0] = "Stage 4/5: running Day-1 primitives for VNF."
8736 stage
[0] = "Stage 5/5: running Day-1 primitives for NS."
8738 self
._write
_configuration
_status
(
8739 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="EXECUTING PRIMITIVE"
8742 self
._write
_op
_status
(op_id
=nslcmop_id
, stage
=stage
)
8744 check_if_terminated_needed
= True
8745 for initial_config_primitive
in initial_config_primitive_list
:
8746 # adding information on the vca_deployed if it is a NS execution environment
8747 if not vca_deployed
["member-vnf-index"]:
8748 deploy_params
["ns_config_info"] = json
.dumps(
8749 self
._get
_ns
_config
_info
(nsr_id
)
8751 # TODO check if already done
8752 primitive_params_
= self
._map
_primitive
_params
(
8753 initial_config_primitive
, {}, deploy_params
8756 step
= "execute primitive '{}' params '{}'".format(
8757 initial_config_primitive
["name"], primitive_params_
8759 self
.logger
.debug(logging_text
+ step
)
8760 await self
.vca_map
[vca_type
].exec_primitive(
8762 primitive_name
=initial_config_primitive
["name"],
8763 params_dict
=primitive_params_
,
8768 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8769 if check_if_terminated_needed
:
8770 if config_descriptor
.get("terminate-config-primitive"):
8774 {db_update_entry
+ "needed_terminate": True},
8776 check_if_terminated_needed
= False
8778 # TODO register in database that primitive is done
8780 # STEP 7 Configure metrics
8781 # Not sure if this need to be done when healing
8783 if vca_type == "helm" or vca_type == "helm-v3":
8784 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8786 artifact_path=artifact_path,
8787 ee_config_descriptor=ee_config_descriptor,
8790 target_ip=rw_mgmt_ip,
8796 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8799 for job in prometheus_jobs:
8802 {"job_name": job["job_name"]},
8805 fail_on_empty=False,
8809 step
= "instantiated at VCA"
8810 self
.logger
.debug(logging_text
+ step
)
8812 self
._write
_configuration
_status
(
8813 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="READY"
8816 except Exception as e
: # TODO not use Exception but N2VC exception
8817 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8819 e
, (DbException
, N2VCException
, LcmException
, asyncio
.CancelledError
)
8822 "Exception while {} : {}".format(step
, e
), exc_info
=True
8824 self
._write
_configuration
_status
(
8825 nsr_id
=nsr_id
, vca_index
=vca_index
, status
="BROKEN"
8827 raise LcmException("{} {}".format(step
, e
)) from e
8829 async def _wait_heal_ro(
8835 while time() <= start_time
+ timeout
:
8836 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8837 operational_status_ro
= db_nsr
["_admin"]["deployed"]["RO"][
8838 "operational-status"
8840 self
.logger
.debug("Wait Heal RO > {}".format(operational_status_ro
))
8841 if operational_status_ro
!= "healing":
8843 await asyncio
.sleep(15)
8844 else: # timeout_ns_deploy
8845 raise NgRoException("Timeout waiting ns to deploy")
8847 async def vertical_scale(self
, nsr_id
, nslcmop_id
):
8849 Vertical Scale the VDUs in a NS
8851 :param: nsr_id: NS Instance ID
8852 :param: nslcmop_id: nslcmop ID of migrate
8855 # Try to lock HA task here
8856 task_is_locked_by_me
= self
.lcm_tasks
.lock_HA("ns", "nslcmops", nslcmop_id
)
8857 if not task_is_locked_by_me
:
8859 logging_text
= "Task ns={} vertical scale ".format(nsr_id
)
8860 self
.logger
.debug(logging_text
+ "Enter")
8861 # get all needed from database
8863 db_nslcmop_update
= {}
8864 nslcmop_operation_state
= None
8867 old_vdu_index
= None
8868 old_flavor_id
= None
8872 # in case of error, indicates what part of scale was failed to put nsr at error status
8873 start_deploy
= time()
8876 # wait for any previous tasks in process
8877 step
= "Waiting for previous operations to terminate"
8878 await self
.lcm_tasks
.waitfor_related_HA("ns", "nslcmops", nslcmop_id
)
8880 self
._write
_ns
_status
(
8883 current_operation
="VerticalScale",
8884 current_operation_id
=nslcmop_id
,
8886 step
= "Getting nslcmop from database"
8888 step
+ " after having waited for previous tasks to be completed"
8890 db_nslcmop
= self
.db
.get_one("nslcmops", {"_id": nslcmop_id
})
8891 operationParams
= db_nslcmop
.get("operationParams")
8892 # Update the VNFRS and NSRS with the requested flavour detail, So that ro tasks can function properly
8893 db_nsr
= self
.db
.get_one("nsrs", {"_id": nsr_id
})
8894 db_flavor
= db_nsr
.get("flavor")
8895 db_flavor_index
= str(len(db_flavor
))
8896 change_vnf_flavor_data
= operationParams
["changeVnfFlavorData"]
8897 flavor_dict
= change_vnf_flavor_data
["additionalParams"]
8898 count_index
= flavor_dict
["vduCountIndex"]
8899 vdu_id_ref
= flavor_dict
["vduid"]
8900 flavor_dict_update
= {
8901 "id": db_flavor_index
,
8902 "memory-mb": flavor_dict
["virtualMemory"],
8903 "name": f
"{vdu_id_ref}-{count_index}-flv",
8904 "storage-gb": flavor_dict
["sizeOfStorage"],
8905 "vcpu-count": flavor_dict
["numVirtualCpu"],
8907 db_flavor
.append(flavor_dict_update
)
8909 db_update
["flavor"] = db_flavor
8915 q_filter
=ns_q_filter
,
8916 update_dict
=db_update
,
8919 db_vnfr
= self
.db
.get_one(
8920 "vnfrs", {"_id": change_vnf_flavor_data
["vnfInstanceId"]}
8922 for vdu_index
, vdur
in enumerate(db_vnfr
.get("vdur", ())):
8924 vdur
.get("count-index") == count_index
8925 and vdur
.get("vdu-id-ref") == vdu_id_ref
8927 old_flavor_id
= vdur
.get("ns-flavor-id", 0)
8928 old_vdu_index
= vdu_index
8930 "_id": change_vnf_flavor_data
["vnfInstanceId"],
8931 "vdur.count-index": count_index
,
8932 "vdur.vdu-id-ref": vdu_id_ref
,
8934 q_filter
.update(filter_text
)
8937 "vdur.{}.ns-flavor-id".format(vdu_index
)
8942 update_dict
=db_update
,
8946 target
.update(operationParams
)
8947 desc
= await self
.RO
.vertical_scale(nsr_id
, target
)
8948 self
.logger
.debug("RO return > {}".format(desc
))
8949 action_id
= desc
["action_id"]
8950 await self
._wait
_ng
_ro
(
8955 self
.timeout
.verticalscale
,
8956 operation
="verticalscale",
8960 ROclient
.ROClientException
,
8964 self
.logger
.error("Exit Exception {}".format(e
))
8966 except asyncio
.CancelledError
:
8967 self
.logger
.error("Cancelled Exception while '{}'".format(step
))
8968 exc
= "Operation was cancelled"
8969 except Exception as e
:
8970 exc
= traceback
.format_exc()
8971 self
.logger
.critical(
8972 "Exit Exception {} {}".format(type(e
).__name
__, e
), exc_info
=True
8975 self
._write
_ns
_status
(
8978 current_operation
="IDLE",
8979 current_operation_id
=None,
8982 db_nslcmop_update
["detailed-status"] = "FAILED {}: {}".format(step
, exc
)
8983 nslcmop_operation_state
= "FAILED"
8985 "vdur.{}.ns-flavor-id".format(old_vdu_index
)
8988 nslcmop_operation_state
= "COMPLETED"
8989 db_nslcmop_update
["detailed-status"] = "Done"
8990 db_nsr_update
["detailed-status"] = "Done"
8992 self
._write
_op
_status
(
8996 operation_state
=nslcmop_operation_state
,
8997 other_update
=db_nslcmop_update
,
8999 if old_vdu_index
and old_db_update
!= {}:
9000 self
.logger
.critical(
9001 "Reverting Old Flavor -- : {}".format(old_db_update
)
9006 update_dict
=old_db_update
,
9009 if nslcmop_operation_state
:
9013 "nslcmop_id": nslcmop_id
,
9014 "operationState": nslcmop_operation_state
,
9016 await self
.msg
.aiowrite("ns", "verticalscaled", msg
)
9017 except Exception as e
:
9019 logging_text
+ "kafka_write notification Exception {}".format(e
)
9021 self
.logger
.debug(logging_text
+ "Exit")
9022 self
.lcm_tasks
.remove("ns", nsr_id
, nslcmop_id
, "ns_verticalscale")