Set autoescape to True in Jinja2 environment
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 select_autoescape,
34 )
35
36 from osm_lcm import ROclient
37 from osm_lcm.data_utils.nsr import (
38 get_deployed_kdu,
39 get_deployed_vca,
40 get_deployed_vca_list,
41 get_nsd,
42 )
43 from osm_lcm.data_utils.vca import (
44 DeployedComponent,
45 DeployedK8sResource,
46 DeployedVCA,
47 EELevel,
48 Relation,
49 EERelation,
50 safe_get_ee_relation,
51 )
52 from osm_lcm.ng_ro import NgRoClient, NgRoException
53 from osm_lcm.lcm_utils import (
54 LcmException,
55 LcmExceptionNoMgmtIP,
56 LcmBase,
57 deep_get,
58 get_iterable,
59 populate_dict,
60 check_juju_bundle_existence,
61 get_charm_artifact_path,
62 )
63 from osm_lcm.data_utils.nsd import (
64 get_ns_configuration_relation_list,
65 get_vnf_profile,
66 get_vnf_profiles,
67 )
68 from osm_lcm.data_utils.vnfd import (
69 get_kdu,
70 get_kdu_services,
71 get_relation_list,
72 get_vdu_list,
73 get_vdu_profile,
74 get_ee_sorted_initial_config_primitive_list,
75 get_ee_sorted_terminate_config_primitive_list,
76 get_kdu_list,
77 get_virtual_link_profiles,
78 get_vdu,
79 get_configuration,
80 get_vdu_index,
81 get_scaling_aspect,
82 get_number_of_instances,
83 get_juju_ee_ref,
84 get_kdu_resource_profile,
85 find_software_version,
86 )
87 from osm_lcm.data_utils.list_utils import find_in_list
88 from osm_lcm.data_utils.vnfr import (
89 get_osm_params,
90 get_vdur_index,
91 get_kdur,
92 get_volumes_from_instantiation_params,
93 )
94 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
95 from osm_lcm.data_utils.database.vim_account import VimAccountDB
96 from n2vc.definitions import RelationEndpoint
97 from n2vc.k8s_helm_conn import K8sHelmConnector
98 from n2vc.k8s_helm3_conn import K8sHelm3Connector
99 from n2vc.k8s_juju_conn import K8sJujuConnector
100
101 from osm_common.dbbase import DbException
102 from osm_common.fsbase import FsException
103
104 from osm_lcm.data_utils.database.database import Database
105 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
106
107 from n2vc.n2vc_juju_conn import N2VCJujuConnector
108 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
109
110 from osm_lcm.lcm_helm_conn import LCMHelmConn
111 from osm_lcm.osm_config import OsmConfigBuilder
112 from osm_lcm.prometheus import parse_job
113
114 from copy import copy, deepcopy
115 from time import time
116 from uuid import uuid4
117
118 from random import randint
119
120 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
121
122
123 class NsLcm(LcmBase):
124 timeout_vca_on_error = (
125 5 * 60
126 ) # Time for charm from first time at blocked,error status to mark as failed
127 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
128 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
129 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
130 timeout_charm_delete = 10 * 60
131 timeout_primitive = 30 * 60 # timeout for primitive execution
132 timeout_ns_update = 30 * 60 # timeout for ns update
133 timeout_progress_primitive = (
134 10 * 60
135 ) # timeout for some progress in a primitive execution
136 timeout_migrate = 1800 # default global timeout for migrating vnfs
137 timeout_operate = 1800 # default global timeout for migrating vnfs
138 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
139 SUBOPERATION_STATUS_NOT_FOUND = -1
140 SUBOPERATION_STATUS_NEW = -2
141 SUBOPERATION_STATUS_SKIP = -3
142 task_name_deploy_vca = "Deploying VCA"
143
144 def __init__(self, msg, lcm_tasks, config, loop):
145 """
146 Init, Connect to database, filesystem storage, and messaging
147 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
148 :return: None
149 """
150 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
151
152 self.db = Database().instance.db
153 self.fs = Filesystem().instance.fs
154 self.loop = loop
155 self.lcm_tasks = lcm_tasks
156 self.timeout = config["timeout"]
157 self.ro_config = config["ro_config"]
158 self.ng_ro = config["ro_config"].get("ng")
159 self.vca_config = config["VCA"].copy()
160
161 # create N2VC connector
162 self.n2vc = N2VCJujuConnector(
163 log=self.logger,
164 loop=self.loop,
165 on_update_db=self._on_update_n2vc_db,
166 fs=self.fs,
167 db=self.db,
168 )
169
170 self.conn_helm_ee = LCMHelmConn(
171 log=self.logger,
172 loop=self.loop,
173 vca_config=self.vca_config,
174 on_update_db=self._on_update_n2vc_db,
175 )
176
177 self.k8sclusterhelm2 = K8sHelmConnector(
178 kubectl_command=self.vca_config.get("kubectlpath"),
179 helm_command=self.vca_config.get("helmpath"),
180 log=self.logger,
181 on_update_db=None,
182 fs=self.fs,
183 db=self.db,
184 )
185
186 self.k8sclusterhelm3 = K8sHelm3Connector(
187 kubectl_command=self.vca_config.get("kubectlpath"),
188 helm_command=self.vca_config.get("helm3path"),
189 fs=self.fs,
190 log=self.logger,
191 db=self.db,
192 on_update_db=None,
193 )
194
195 self.k8sclusterjuju = K8sJujuConnector(
196 kubectl_command=self.vca_config.get("kubectlpath"),
197 juju_command=self.vca_config.get("jujupath"),
198 log=self.logger,
199 loop=self.loop,
200 on_update_db=self._on_update_k8s_db,
201 fs=self.fs,
202 db=self.db,
203 )
204
205 self.k8scluster_map = {
206 "helm-chart": self.k8sclusterhelm2,
207 "helm-chart-v3": self.k8sclusterhelm3,
208 "chart": self.k8sclusterhelm3,
209 "juju-bundle": self.k8sclusterjuju,
210 "juju": self.k8sclusterjuju,
211 }
212
213 self.vca_map = {
214 "lxc_proxy_charm": self.n2vc,
215 "native_charm": self.n2vc,
216 "k8s_proxy_charm": self.n2vc,
217 "helm": self.conn_helm_ee,
218 "helm-v3": self.conn_helm_ee,
219 }
220
221 # create RO client
222 self.RO = NgRoClient(self.loop, **self.ro_config)
223
224 self.op_status_map = {
225 "instantiation": self.RO.status,
226 "termination": self.RO.status,
227 "migrate": self.RO.status,
228 "healing": self.RO.recreate_status,
229 "verticalscale": self.RO.status,
230 "start_stop_rebuild": self.RO.status,
231 }
232
233 @staticmethod
234 def increment_ip_mac(ip_mac, vm_index=1):
235 if not isinstance(ip_mac, str):
236 return ip_mac
237 try:
238 # try with ipv4 look for last dot
239 i = ip_mac.rfind(".")
240 if i > 0:
241 i += 1
242 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
243 # try with ipv6 or mac look for last colon. Operate in hex
244 i = ip_mac.rfind(":")
245 if i > 0:
246 i += 1
247 # format in hex, len can be 2 for mac or 4 for ipv6
248 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
249 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
250 )
251 except Exception:
252 pass
253 return None
254
255 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
256
257 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
258
259 try:
260 # TODO filter RO descriptor fields...
261
262 # write to database
263 db_dict = dict()
264 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
265 db_dict["deploymentStatus"] = ro_descriptor
266 self.update_db_2("nsrs", nsrs_id, db_dict)
267
268 except Exception as e:
269 self.logger.warn(
270 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
271 )
272
273 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
274
275 # remove last dot from path (if exists)
276 if path.endswith("."):
277 path = path[:-1]
278
279 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
280 # .format(table, filter, path, updated_data))
281 try:
282
283 nsr_id = filter.get("_id")
284
285 # read ns record from database
286 nsr = self.db.get_one(table="nsrs", q_filter=filter)
287 current_ns_status = nsr.get("nsState")
288
289 # get vca status for NS
290 status_dict = await self.n2vc.get_status(
291 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
292 )
293
294 # vcaStatus
295 db_dict = dict()
296 db_dict["vcaStatus"] = status_dict
297
298 # update configurationStatus for this VCA
299 try:
300 vca_index = int(path[path.rfind(".") + 1 :])
301
302 vca_list = deep_get(
303 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
304 )
305 vca_status = vca_list[vca_index].get("status")
306
307 configuration_status_list = nsr.get("configurationStatus")
308 config_status = configuration_status_list[vca_index].get("status")
309
310 if config_status == "BROKEN" and vca_status != "failed":
311 db_dict["configurationStatus"][vca_index] = "READY"
312 elif config_status != "BROKEN" and vca_status == "failed":
313 db_dict["configurationStatus"][vca_index] = "BROKEN"
314 except Exception as e:
315 # not update configurationStatus
316 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
317
318 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
319 # if nsState = 'DEGRADED' check if all is OK
320 is_degraded = False
321 if current_ns_status in ("READY", "DEGRADED"):
322 error_description = ""
323 # check machines
324 if status_dict.get("machines"):
325 for machine_id in status_dict.get("machines"):
326 machine = status_dict.get("machines").get(machine_id)
327 # check machine agent-status
328 if machine.get("agent-status"):
329 s = machine.get("agent-status").get("status")
330 if s != "started":
331 is_degraded = True
332 error_description += (
333 "machine {} agent-status={} ; ".format(
334 machine_id, s
335 )
336 )
337 # check machine instance status
338 if machine.get("instance-status"):
339 s = machine.get("instance-status").get("status")
340 if s != "running":
341 is_degraded = True
342 error_description += (
343 "machine {} instance-status={} ; ".format(
344 machine_id, s
345 )
346 )
347 # check applications
348 if status_dict.get("applications"):
349 for app_id in status_dict.get("applications"):
350 app = status_dict.get("applications").get(app_id)
351 # check application status
352 if app.get("status"):
353 s = app.get("status").get("status")
354 if s != "active":
355 is_degraded = True
356 error_description += (
357 "application {} status={} ; ".format(app_id, s)
358 )
359
360 if error_description:
361 db_dict["errorDescription"] = error_description
362 if current_ns_status == "READY" and is_degraded:
363 db_dict["nsState"] = "DEGRADED"
364 if current_ns_status == "DEGRADED" and not is_degraded:
365 db_dict["nsState"] = "READY"
366
367 # write to database
368 self.update_db_2("nsrs", nsr_id, db_dict)
369
370 except (asyncio.CancelledError, asyncio.TimeoutError):
371 raise
372 except Exception as e:
373 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
374
375 async def _on_update_k8s_db(
376 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
377 ):
378 """
379 Updating vca status in NSR record
380 :param cluster_uuid: UUID of a k8s cluster
381 :param kdu_instance: The unique name of the KDU instance
382 :param filter: To get nsr_id
383 :cluster_type: The cluster type (juju, k8s)
384 :return: none
385 """
386
387 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
388 # .format(cluster_uuid, kdu_instance, filter))
389
390 nsr_id = filter.get("_id")
391 try:
392 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
393 cluster_uuid=cluster_uuid,
394 kdu_instance=kdu_instance,
395 yaml_format=False,
396 complete_status=True,
397 vca_id=vca_id,
398 )
399
400 # vcaStatus
401 db_dict = dict()
402 db_dict["vcaStatus"] = {nsr_id: vca_status}
403
404 self.logger.debug(
405 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
406 )
407
408 # write to database
409 self.update_db_2("nsrs", nsr_id, db_dict)
410 except (asyncio.CancelledError, asyncio.TimeoutError):
411 raise
412 except Exception as e:
413 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
414
415 @staticmethod
416 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
417 try:
418 env = Environment(
419 undefined=StrictUndefined,
420 autoescape=select_autoescape(default_for_string=True, default=True),
421 )
422 template = env.from_string(cloud_init_text)
423 return template.render(additional_params or {})
424 except UndefinedError as e:
425 raise LcmException(
426 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
427 "file, must be provided in the instantiation parameters inside the "
428 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
429 )
430 except (TemplateError, TemplateNotFound) as e:
431 raise LcmException(
432 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
433 vnfd_id, vdu_id, e
434 )
435 )
436
437 def _get_vdu_cloud_init_content(self, vdu, vnfd):
438 cloud_init_content = cloud_init_file = None
439 try:
440 if vdu.get("cloud-init-file"):
441 base_folder = vnfd["_admin"]["storage"]
442 if base_folder["pkg-dir"]:
443 cloud_init_file = "{}/{}/cloud_init/{}".format(
444 base_folder["folder"],
445 base_folder["pkg-dir"],
446 vdu["cloud-init-file"],
447 )
448 else:
449 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
450 base_folder["folder"],
451 vdu["cloud-init-file"],
452 )
453 with self.fs.file_open(cloud_init_file, "r") as ci_file:
454 cloud_init_content = ci_file.read()
455 elif vdu.get("cloud-init"):
456 cloud_init_content = vdu["cloud-init"]
457
458 return cloud_init_content
459 except FsException as e:
460 raise LcmException(
461 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
462 vnfd["id"], vdu["id"], cloud_init_file, e
463 )
464 )
465
466 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
467 vdur = next(
468 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
469 )
470 additional_params = vdur.get("additionalParams")
471 return parse_yaml_strings(additional_params)
472
473 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
474 """
475 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
476 :param vnfd: input vnfd
477 :param new_id: overrides vnf id if provided
478 :param additionalParams: Instantiation params for VNFs provided
479 :param nsrId: Id of the NSR
480 :return: copy of vnfd
481 """
482 vnfd_RO = deepcopy(vnfd)
483 # remove unused by RO configuration, monitoring, scaling and internal keys
484 vnfd_RO.pop("_id", None)
485 vnfd_RO.pop("_admin", None)
486 vnfd_RO.pop("monitoring-param", None)
487 vnfd_RO.pop("scaling-group-descriptor", None)
488 vnfd_RO.pop("kdu", None)
489 vnfd_RO.pop("k8s-cluster", None)
490 if new_id:
491 vnfd_RO["id"] = new_id
492
493 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
494 for vdu in get_iterable(vnfd_RO, "vdu"):
495 vdu.pop("cloud-init-file", None)
496 vdu.pop("cloud-init", None)
497 return vnfd_RO
498
499 @staticmethod
500 def ip_profile_2_RO(ip_profile):
501 RO_ip_profile = deepcopy(ip_profile)
502 if "dns-server" in RO_ip_profile:
503 if isinstance(RO_ip_profile["dns-server"], list):
504 RO_ip_profile["dns-address"] = []
505 for ds in RO_ip_profile.pop("dns-server"):
506 RO_ip_profile["dns-address"].append(ds["address"])
507 else:
508 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
509 if RO_ip_profile.get("ip-version") == "ipv4":
510 RO_ip_profile["ip-version"] = "IPv4"
511 if RO_ip_profile.get("ip-version") == "ipv6":
512 RO_ip_profile["ip-version"] = "IPv6"
513 if "dhcp-params" in RO_ip_profile:
514 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
515 return RO_ip_profile
516
517 def _get_ro_vim_id_for_vim_account(self, vim_account):
518 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
519 if db_vim["_admin"]["operationalState"] != "ENABLED":
520 raise LcmException(
521 "VIM={} is not available. operationalState={}".format(
522 vim_account, db_vim["_admin"]["operationalState"]
523 )
524 )
525 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
526 return RO_vim_id
527
528 def get_ro_wim_id_for_wim_account(self, wim_account):
529 if isinstance(wim_account, str):
530 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
531 if db_wim["_admin"]["operationalState"] != "ENABLED":
532 raise LcmException(
533 "WIM={} is not available. operationalState={}".format(
534 wim_account, db_wim["_admin"]["operationalState"]
535 )
536 )
537 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
538 return RO_wim_id
539 else:
540 return wim_account
541
542 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
543
544 db_vdu_push_list = []
545 template_vdur = []
546 db_update = {"_admin.modified": time()}
547 if vdu_create:
548 for vdu_id, vdu_count in vdu_create.items():
549 vdur = next(
550 (
551 vdur
552 for vdur in reversed(db_vnfr["vdur"])
553 if vdur["vdu-id-ref"] == vdu_id
554 ),
555 None,
556 )
557 if not vdur:
558 # Read the template saved in the db:
559 self.logger.debug(
560 "No vdur in the database. Using the vdur-template to scale"
561 )
562 vdur_template = db_vnfr.get("vdur-template")
563 if not vdur_template:
564 raise LcmException(
565 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
566 vdu_id
567 )
568 )
569 vdur = vdur_template[0]
570 # Delete a template from the database after using it
571 self.db.set_one(
572 "vnfrs",
573 {"_id": db_vnfr["_id"]},
574 None,
575 pull={"vdur-template": {"_id": vdur["_id"]}},
576 )
577 for count in range(vdu_count):
578 vdur_copy = deepcopy(vdur)
579 vdur_copy["status"] = "BUILD"
580 vdur_copy["status-detailed"] = None
581 vdur_copy["ip-address"] = None
582 vdur_copy["_id"] = str(uuid4())
583 vdur_copy["count-index"] += count + 1
584 vdur_copy["id"] = "{}-{}".format(
585 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
586 )
587 vdur_copy.pop("vim_info", None)
588 for iface in vdur_copy["interfaces"]:
589 if iface.get("fixed-ip"):
590 iface["ip-address"] = self.increment_ip_mac(
591 iface["ip-address"], count + 1
592 )
593 else:
594 iface.pop("ip-address", None)
595 if iface.get("fixed-mac"):
596 iface["mac-address"] = self.increment_ip_mac(
597 iface["mac-address"], count + 1
598 )
599 else:
600 iface.pop("mac-address", None)
601 if db_vnfr["vdur"]:
602 iface.pop(
603 "mgmt_vnf", None
604 ) # only first vdu can be managment of vnf
605 db_vdu_push_list.append(vdur_copy)
606 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
607 if vdu_delete:
608 if len(db_vnfr["vdur"]) == 1:
609 # The scale will move to 0 instances
610 self.logger.debug(
611 "Scaling to 0 !, creating the template with the last vdur"
612 )
613 template_vdur = [db_vnfr["vdur"][0]]
614 for vdu_id, vdu_count in vdu_delete.items():
615 if mark_delete:
616 indexes_to_delete = [
617 iv[0]
618 for iv in enumerate(db_vnfr["vdur"])
619 if iv[1]["vdu-id-ref"] == vdu_id
620 ]
621 db_update.update(
622 {
623 "vdur.{}.status".format(i): "DELETING"
624 for i in indexes_to_delete[-vdu_count:]
625 }
626 )
627 else:
628 # it must be deleted one by one because common.db does not allow otherwise
629 vdus_to_delete = [
630 v
631 for v in reversed(db_vnfr["vdur"])
632 if v["vdu-id-ref"] == vdu_id
633 ]
634 for vdu in vdus_to_delete[:vdu_count]:
635 self.db.set_one(
636 "vnfrs",
637 {"_id": db_vnfr["_id"]},
638 None,
639 pull={"vdur": {"_id": vdu["_id"]}},
640 )
641 db_push = {}
642 if db_vdu_push_list:
643 db_push["vdur"] = db_vdu_push_list
644 if template_vdur:
645 db_push["vdur-template"] = template_vdur
646 if not db_push:
647 db_push = None
648 db_vnfr["vdur-template"] = template_vdur
649 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
650 # modify passed dictionary db_vnfr
651 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
652 db_vnfr["vdur"] = db_vnfr_["vdur"]
653
654 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
655 """
656 Updates database nsr with the RO info for the created vld
657 :param ns_update_nsr: dictionary to be filled with the updated info
658 :param db_nsr: content of db_nsr. This is also modified
659 :param nsr_desc_RO: nsr descriptor from RO
660 :return: Nothing, LcmException is raised on errors
661 """
662
663 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
664 for net_RO in get_iterable(nsr_desc_RO, "nets"):
665 if vld["id"] != net_RO.get("ns_net_osm_id"):
666 continue
667 vld["vim-id"] = net_RO.get("vim_net_id")
668 vld["name"] = net_RO.get("vim_name")
669 vld["status"] = net_RO.get("status")
670 vld["status-detailed"] = net_RO.get("error_msg")
671 ns_update_nsr["vld.{}".format(vld_index)] = vld
672 break
673 else:
674 raise LcmException(
675 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
676 )
677
678 def set_vnfr_at_error(self, db_vnfrs, error_text):
679 try:
680 for db_vnfr in db_vnfrs.values():
681 vnfr_update = {"status": "ERROR"}
682 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
683 if "status" not in vdur:
684 vdur["status"] = "ERROR"
685 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
686 if error_text:
687 vdur["status-detailed"] = str(error_text)
688 vnfr_update[
689 "vdur.{}.status-detailed".format(vdu_index)
690 ] = "ERROR"
691 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
692 except DbException as e:
693 self.logger.error("Cannot update vnf. {}".format(e))
694
695 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
696 """
697 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
698 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
699 :param nsr_desc_RO: nsr descriptor from RO
700 :return: Nothing, LcmException is raised on errors
701 """
702 for vnf_index, db_vnfr in db_vnfrs.items():
703 for vnf_RO in nsr_desc_RO["vnfs"]:
704 if vnf_RO["member_vnf_index"] != vnf_index:
705 continue
706 vnfr_update = {}
707 if vnf_RO.get("ip_address"):
708 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
709 "ip_address"
710 ].split(";")[0]
711 elif not db_vnfr.get("ip-address"):
712 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
713 raise LcmExceptionNoMgmtIP(
714 "ns member_vnf_index '{}' has no IP address".format(
715 vnf_index
716 )
717 )
718
719 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
720 vdur_RO_count_index = 0
721 if vdur.get("pdu-type"):
722 continue
723 for vdur_RO in get_iterable(vnf_RO, "vms"):
724 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
725 continue
726 if vdur["count-index"] != vdur_RO_count_index:
727 vdur_RO_count_index += 1
728 continue
729 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
730 if vdur_RO.get("ip_address"):
731 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
732 else:
733 vdur["ip-address"] = None
734 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
735 vdur["name"] = vdur_RO.get("vim_name")
736 vdur["status"] = vdur_RO.get("status")
737 vdur["status-detailed"] = vdur_RO.get("error_msg")
738 for ifacer in get_iterable(vdur, "interfaces"):
739 for interface_RO in get_iterable(vdur_RO, "interfaces"):
740 if ifacer["name"] == interface_RO.get("internal_name"):
741 ifacer["ip-address"] = interface_RO.get(
742 "ip_address"
743 )
744 ifacer["mac-address"] = interface_RO.get(
745 "mac_address"
746 )
747 break
748 else:
749 raise LcmException(
750 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
751 "from VIM info".format(
752 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
753 )
754 )
755 vnfr_update["vdur.{}".format(vdu_index)] = vdur
756 break
757 else:
758 raise LcmException(
759 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
760 "VIM info".format(
761 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
762 )
763 )
764
765 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
766 for net_RO in get_iterable(nsr_desc_RO, "nets"):
767 if vld["id"] != net_RO.get("vnf_net_osm_id"):
768 continue
769 vld["vim-id"] = net_RO.get("vim_net_id")
770 vld["name"] = net_RO.get("vim_name")
771 vld["status"] = net_RO.get("status")
772 vld["status-detailed"] = net_RO.get("error_msg")
773 vnfr_update["vld.{}".format(vld_index)] = vld
774 break
775 else:
776 raise LcmException(
777 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
778 vnf_index, vld["id"]
779 )
780 )
781
782 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
783 break
784
785 else:
786 raise LcmException(
787 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
788 vnf_index
789 )
790 )
791
792 def _get_ns_config_info(self, nsr_id):
793 """
794 Generates a mapping between vnf,vdu elements and the N2VC id
795 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
796 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
797 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
798 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
799 """
800 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
801 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
802 mapping = {}
803 ns_config_info = {"osm-config-mapping": mapping}
804 for vca in vca_deployed_list:
805 if not vca["member-vnf-index"]:
806 continue
807 if not vca["vdu_id"]:
808 mapping[vca["member-vnf-index"]] = vca["application"]
809 else:
810 mapping[
811 "{}.{}.{}".format(
812 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
813 )
814 ] = vca["application"]
815 return ns_config_info
816
817 async def _instantiate_ng_ro(
818 self,
819 logging_text,
820 nsr_id,
821 nsd,
822 db_nsr,
823 db_nslcmop,
824 db_vnfrs,
825 db_vnfds,
826 n2vc_key_list,
827 stage,
828 start_deploy,
829 timeout_ns_deploy,
830 ):
831
832 db_vims = {}
833
834 def get_vim_account(vim_account_id):
835 nonlocal db_vims
836 if vim_account_id in db_vims:
837 return db_vims[vim_account_id]
838 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
839 db_vims[vim_account_id] = db_vim
840 return db_vim
841
842 # modify target_vld info with instantiation parameters
843 def parse_vld_instantiation_params(
844 target_vim, target_vld, vld_params, target_sdn
845 ):
846 if vld_params.get("ip-profile"):
847 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
848 "ip-profile"
849 ]
850 if vld_params.get("provider-network"):
851 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
852 "provider-network"
853 ]
854 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
855 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
856 "provider-network"
857 ]["sdn-ports"]
858 if vld_params.get("wimAccountId"):
859 target_wim = "wim:{}".format(vld_params["wimAccountId"])
860 target_vld["vim_info"][target_wim] = {}
861 for param in ("vim-network-name", "vim-network-id"):
862 if vld_params.get(param):
863 if isinstance(vld_params[param], dict):
864 for vim, vim_net in vld_params[param].items():
865 other_target_vim = "vim:" + vim
866 populate_dict(
867 target_vld["vim_info"],
868 (other_target_vim, param.replace("-", "_")),
869 vim_net,
870 )
871 else: # isinstance str
872 target_vld["vim_info"][target_vim][
873 param.replace("-", "_")
874 ] = vld_params[param]
875 if vld_params.get("common_id"):
876 target_vld["common_id"] = vld_params.get("common_id")
877
878 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
879 def update_ns_vld_target(target, ns_params):
880 for vnf_params in ns_params.get("vnf", ()):
881 if vnf_params.get("vimAccountId"):
882 target_vnf = next(
883 (
884 vnfr
885 for vnfr in db_vnfrs.values()
886 if vnf_params["member-vnf-index"]
887 == vnfr["member-vnf-index-ref"]
888 ),
889 None,
890 )
891 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
892 for a_index, a_vld in enumerate(target["ns"]["vld"]):
893 target_vld = find_in_list(
894 get_iterable(vdur, "interfaces"),
895 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
896 )
897
898 vld_params = find_in_list(
899 get_iterable(ns_params, "vld"),
900 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
901 )
902 if target_vld:
903
904 if vnf_params.get("vimAccountId") not in a_vld.get(
905 "vim_info", {}
906 ):
907 target_vim_network_list = [
908 v for _, v in a_vld.get("vim_info").items()
909 ]
910 target_vim_network_name = next(
911 (
912 item.get("vim_network_name", "")
913 for item in target_vim_network_list
914 ),
915 "",
916 )
917
918 target["ns"]["vld"][a_index].get("vim_info").update(
919 {
920 "vim:{}".format(vnf_params["vimAccountId"]): {
921 "vim_network_name": target_vim_network_name,
922 }
923 }
924 )
925
926 if vld_params:
927 for param in ("vim-network-name", "vim-network-id"):
928 if vld_params.get(param) and isinstance(
929 vld_params[param], dict
930 ):
931 for vim, vim_net in vld_params[
932 param
933 ].items():
934 other_target_vim = "vim:" + vim
935 populate_dict(
936 target["ns"]["vld"][a_index].get(
937 "vim_info"
938 ),
939 (
940 other_target_vim,
941 param.replace("-", "_"),
942 ),
943 vim_net,
944 )
945
946 nslcmop_id = db_nslcmop["_id"]
947 target = {
948 "name": db_nsr["name"],
949 "ns": {"vld": []},
950 "vnf": [],
951 "image": deepcopy(db_nsr["image"]),
952 "flavor": deepcopy(db_nsr["flavor"]),
953 "action_id": nslcmop_id,
954 "cloud_init_content": {},
955 }
956 for image in target["image"]:
957 image["vim_info"] = {}
958 for flavor in target["flavor"]:
959 flavor["vim_info"] = {}
960 if db_nsr.get("affinity-or-anti-affinity-group"):
961 target["affinity-or-anti-affinity-group"] = deepcopy(
962 db_nsr["affinity-or-anti-affinity-group"]
963 )
964 for affinity_or_anti_affinity_group in target[
965 "affinity-or-anti-affinity-group"
966 ]:
967 affinity_or_anti_affinity_group["vim_info"] = {}
968
969 if db_nslcmop.get("lcmOperationType") != "instantiate":
970 # get parameters of instantiation:
971 db_nslcmop_instantiate = self.db.get_list(
972 "nslcmops",
973 {
974 "nsInstanceId": db_nslcmop["nsInstanceId"],
975 "lcmOperationType": "instantiate",
976 },
977 )[-1]
978 ns_params = db_nslcmop_instantiate.get("operationParams")
979 else:
980 ns_params = db_nslcmop.get("operationParams")
981 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
982 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
983
984 cp2target = {}
985 for vld_index, vld in enumerate(db_nsr.get("vld")):
986 target_vim = "vim:{}".format(ns_params["vimAccountId"])
987 target_vld = {
988 "id": vld["id"],
989 "name": vld["name"],
990 "mgmt-network": vld.get("mgmt-network", False),
991 "type": vld.get("type"),
992 "vim_info": {
993 target_vim: {
994 "vim_network_name": vld.get("vim-network-name"),
995 "vim_account_id": ns_params["vimAccountId"],
996 }
997 },
998 }
999 # check if this network needs SDN assist
1000 if vld.get("pci-interfaces"):
1001 db_vim = get_vim_account(ns_params["vimAccountId"])
1002 sdnc_id = db_vim["config"].get("sdn-controller")
1003 if sdnc_id:
1004 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1005 target_sdn = "sdn:{}".format(sdnc_id)
1006 target_vld["vim_info"][target_sdn] = {
1007 "sdn": True,
1008 "target_vim": target_vim,
1009 "vlds": [sdn_vld],
1010 "type": vld.get("type"),
1011 }
1012
1013 nsd_vnf_profiles = get_vnf_profiles(nsd)
1014 for nsd_vnf_profile in nsd_vnf_profiles:
1015 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1016 if cp["virtual-link-profile-id"] == vld["id"]:
1017 cp2target[
1018 "member_vnf:{}.{}".format(
1019 cp["constituent-cpd-id"][0][
1020 "constituent-base-element-id"
1021 ],
1022 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1023 )
1024 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1025
1026 # check at nsd descriptor, if there is an ip-profile
1027 vld_params = {}
1028 nsd_vlp = find_in_list(
1029 get_virtual_link_profiles(nsd),
1030 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1031 == vld["id"],
1032 )
1033 if (
1034 nsd_vlp
1035 and nsd_vlp.get("virtual-link-protocol-data")
1036 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1037 ):
1038 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1039 "l3-protocol-data"
1040 ]
1041 ip_profile_dest_data = {}
1042 if "ip-version" in ip_profile_source_data:
1043 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1044 "ip-version"
1045 ]
1046 if "cidr" in ip_profile_source_data:
1047 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1048 "cidr"
1049 ]
1050 if "gateway-ip" in ip_profile_source_data:
1051 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1052 "gateway-ip"
1053 ]
1054 if "dhcp-enabled" in ip_profile_source_data:
1055 ip_profile_dest_data["dhcp-params"] = {
1056 "enabled": ip_profile_source_data["dhcp-enabled"]
1057 }
1058 vld_params["ip-profile"] = ip_profile_dest_data
1059
1060 # update vld_params with instantiation params
1061 vld_instantiation_params = find_in_list(
1062 get_iterable(ns_params, "vld"),
1063 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1064 )
1065 if vld_instantiation_params:
1066 vld_params.update(vld_instantiation_params)
1067 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1068 target["ns"]["vld"].append(target_vld)
1069 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1070 update_ns_vld_target(target, ns_params)
1071
1072 for vnfr in db_vnfrs.values():
1073 vnfd = find_in_list(
1074 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1075 )
1076 vnf_params = find_in_list(
1077 get_iterable(ns_params, "vnf"),
1078 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1079 )
1080 target_vnf = deepcopy(vnfr)
1081 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1082 for vld in target_vnf.get("vld", ()):
1083 # check if connected to a ns.vld, to fill target'
1084 vnf_cp = find_in_list(
1085 vnfd.get("int-virtual-link-desc", ()),
1086 lambda cpd: cpd.get("id") == vld["id"],
1087 )
1088 if vnf_cp:
1089 ns_cp = "member_vnf:{}.{}".format(
1090 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1091 )
1092 if cp2target.get(ns_cp):
1093 vld["target"] = cp2target[ns_cp]
1094
1095 vld["vim_info"] = {
1096 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1097 }
1098 # check if this network needs SDN assist
1099 target_sdn = None
1100 if vld.get("pci-interfaces"):
1101 db_vim = get_vim_account(vnfr["vim-account-id"])
1102 sdnc_id = db_vim["config"].get("sdn-controller")
1103 if sdnc_id:
1104 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1105 target_sdn = "sdn:{}".format(sdnc_id)
1106 vld["vim_info"][target_sdn] = {
1107 "sdn": True,
1108 "target_vim": target_vim,
1109 "vlds": [sdn_vld],
1110 "type": vld.get("type"),
1111 }
1112
1113 # check at vnfd descriptor, if there is an ip-profile
1114 vld_params = {}
1115 vnfd_vlp = find_in_list(
1116 get_virtual_link_profiles(vnfd),
1117 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1118 )
1119 if (
1120 vnfd_vlp
1121 and vnfd_vlp.get("virtual-link-protocol-data")
1122 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1123 ):
1124 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1125 "l3-protocol-data"
1126 ]
1127 ip_profile_dest_data = {}
1128 if "ip-version" in ip_profile_source_data:
1129 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1130 "ip-version"
1131 ]
1132 if "cidr" in ip_profile_source_data:
1133 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1134 "cidr"
1135 ]
1136 if "gateway-ip" in ip_profile_source_data:
1137 ip_profile_dest_data[
1138 "gateway-address"
1139 ] = ip_profile_source_data["gateway-ip"]
1140 if "dhcp-enabled" in ip_profile_source_data:
1141 ip_profile_dest_data["dhcp-params"] = {
1142 "enabled": ip_profile_source_data["dhcp-enabled"]
1143 }
1144
1145 vld_params["ip-profile"] = ip_profile_dest_data
1146 # update vld_params with instantiation params
1147 if vnf_params:
1148 vld_instantiation_params = find_in_list(
1149 get_iterable(vnf_params, "internal-vld"),
1150 lambda i_vld: i_vld["name"] == vld["id"],
1151 )
1152 if vld_instantiation_params:
1153 vld_params.update(vld_instantiation_params)
1154 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1155
1156 vdur_list = []
1157 for vdur in target_vnf.get("vdur", ()):
1158 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1159 continue # This vdu must not be created
1160 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1161
1162 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1163
1164 if ssh_keys_all:
1165 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1166 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1167 if (
1168 vdu_configuration
1169 and vdu_configuration.get("config-access")
1170 and vdu_configuration.get("config-access").get("ssh-access")
1171 ):
1172 vdur["ssh-keys"] = ssh_keys_all
1173 vdur["ssh-access-required"] = vdu_configuration[
1174 "config-access"
1175 ]["ssh-access"]["required"]
1176 elif (
1177 vnf_configuration
1178 and vnf_configuration.get("config-access")
1179 and vnf_configuration.get("config-access").get("ssh-access")
1180 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1181 ):
1182 vdur["ssh-keys"] = ssh_keys_all
1183 vdur["ssh-access-required"] = vnf_configuration[
1184 "config-access"
1185 ]["ssh-access"]["required"]
1186 elif ssh_keys_instantiation and find_in_list(
1187 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1188 ):
1189 vdur["ssh-keys"] = ssh_keys_instantiation
1190
1191 self.logger.debug("NS > vdur > {}".format(vdur))
1192
1193 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1194 # cloud-init
1195 if vdud.get("cloud-init-file"):
1196 vdur["cloud-init"] = "{}:file:{}".format(
1197 vnfd["_id"], vdud.get("cloud-init-file")
1198 )
1199 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1200 if vdur["cloud-init"] not in target["cloud_init_content"]:
1201 base_folder = vnfd["_admin"]["storage"]
1202 if base_folder["pkg-dir"]:
1203 cloud_init_file = "{}/{}/cloud_init/{}".format(
1204 base_folder["folder"],
1205 base_folder["pkg-dir"],
1206 vdud.get("cloud-init-file"),
1207 )
1208 else:
1209 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1210 base_folder["folder"],
1211 vdud.get("cloud-init-file"),
1212 )
1213 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1214 target["cloud_init_content"][
1215 vdur["cloud-init"]
1216 ] = ci_file.read()
1217 elif vdud.get("cloud-init"):
1218 vdur["cloud-init"] = "{}:vdu:{}".format(
1219 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1220 )
1221 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1222 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1223 "cloud-init"
1224 ]
1225 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1226 deploy_params_vdu = self._format_additional_params(
1227 vdur.get("additionalParams") or {}
1228 )
1229 deploy_params_vdu["OSM"] = get_osm_params(
1230 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1231 )
1232 vdur["additionalParams"] = deploy_params_vdu
1233
1234 # flavor
1235 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1236 if target_vim not in ns_flavor["vim_info"]:
1237 ns_flavor["vim_info"][target_vim] = {}
1238
1239 # deal with images
1240 # in case alternative images are provided we must check if they should be applied
1241 # for the vim_type, modify the vim_type taking into account
1242 ns_image_id = int(vdur["ns-image-id"])
1243 if vdur.get("alt-image-ids"):
1244 db_vim = get_vim_account(vnfr["vim-account-id"])
1245 vim_type = db_vim["vim_type"]
1246 for alt_image_id in vdur.get("alt-image-ids"):
1247 ns_alt_image = target["image"][int(alt_image_id)]
1248 if vim_type == ns_alt_image.get("vim-type"):
1249 # must use alternative image
1250 self.logger.debug(
1251 "use alternative image id: {}".format(alt_image_id)
1252 )
1253 ns_image_id = alt_image_id
1254 vdur["ns-image-id"] = ns_image_id
1255 break
1256 ns_image = target["image"][int(ns_image_id)]
1257 if target_vim not in ns_image["vim_info"]:
1258 ns_image["vim_info"][target_vim] = {}
1259
1260 # Affinity groups
1261 if vdur.get("affinity-or-anti-affinity-group-id"):
1262 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1263 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1264 if target_vim not in ns_ags["vim_info"]:
1265 ns_ags["vim_info"][target_vim] = {}
1266
1267 vdur["vim_info"] = {target_vim: {}}
1268 # instantiation parameters
1269 if vnf_params:
1270 vdu_instantiation_params = find_in_list(
1271 get_iterable(vnf_params, "vdu"),
1272 lambda i_vdu: i_vdu["id"] == vdud["id"],
1273 )
1274 if vdu_instantiation_params:
1275 # Parse the vdu_volumes from the instantiation params
1276 vdu_volumes = get_volumes_from_instantiation_params(
1277 vdu_instantiation_params, vdud
1278 )
1279 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1280 vdur_list.append(vdur)
1281 target_vnf["vdur"] = vdur_list
1282 target["vnf"].append(target_vnf)
1283
1284 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1285 desc = await self.RO.deploy(nsr_id, target)
1286 self.logger.debug("RO return > {}".format(desc))
1287 action_id = desc["action_id"]
1288 await self._wait_ng_ro(
1289 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
1290 operation="instantiation"
1291 )
1292
1293 # Updating NSR
1294 db_nsr_update = {
1295 "_admin.deployed.RO.operational-status": "running",
1296 "detailed-status": " ".join(stage),
1297 }
1298 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1299 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1300 self._write_op_status(nslcmop_id, stage)
1301 self.logger.debug(
1302 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1303 )
1304 return
1305
1306 async def _wait_ng_ro(
1307 self,
1308 nsr_id,
1309 action_id,
1310 nslcmop_id=None,
1311 start_time=None,
1312 timeout=600,
1313 stage=None,
1314 operation=None,
1315 ):
1316 detailed_status_old = None
1317 db_nsr_update = {}
1318 start_time = start_time or time()
1319 while time() <= start_time + timeout:
1320 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1321 self.logger.debug("Wait NG RO > {}".format(desc_status))
1322 if desc_status["status"] == "FAILED":
1323 raise NgRoException(desc_status["details"])
1324 elif desc_status["status"] == "BUILD":
1325 if stage:
1326 stage[2] = "VIM: ({})".format(desc_status["details"])
1327 elif desc_status["status"] == "DONE":
1328 if stage:
1329 stage[2] = "Deployed at VIM"
1330 break
1331 else:
1332 assert False, "ROclient.check_ns_status returns unknown {}".format(
1333 desc_status["status"]
1334 )
1335 if stage and nslcmop_id and stage[2] != detailed_status_old:
1336 detailed_status_old = stage[2]
1337 db_nsr_update["detailed-status"] = " ".join(stage)
1338 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1339 self._write_op_status(nslcmop_id, stage)
1340 await asyncio.sleep(15, loop=self.loop)
1341 else: # timeout_ns_deploy
1342 raise NgRoException("Timeout waiting ns to deploy")
1343
1344 async def _terminate_ng_ro(
1345 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1346 ):
1347 db_nsr_update = {}
1348 failed_detail = []
1349 action_id = None
1350 start_deploy = time()
1351 try:
1352 target = {
1353 "ns": {"vld": []},
1354 "vnf": [],
1355 "image": [],
1356 "flavor": [],
1357 "action_id": nslcmop_id,
1358 }
1359 desc = await self.RO.deploy(nsr_id, target)
1360 action_id = desc["action_id"]
1361 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1362 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1363 self.logger.debug(
1364 logging_text
1365 + "ns terminate action at RO. action_id={}".format(action_id)
1366 )
1367
1368 # wait until done
1369 delete_timeout = 20 * 60 # 20 minutes
1370 await self._wait_ng_ro(
1371 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
1372 operation="termination"
1373 )
1374
1375 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1376 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1377 # delete all nsr
1378 await self.RO.delete(nsr_id)
1379 except Exception as e:
1380 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1381 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1382 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1383 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1384 self.logger.debug(
1385 logging_text + "RO_action_id={} already deleted".format(action_id)
1386 )
1387 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1388 failed_detail.append("delete conflict: {}".format(e))
1389 self.logger.debug(
1390 logging_text
1391 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1392 )
1393 else:
1394 failed_detail.append("delete error: {}".format(e))
1395 self.logger.error(
1396 logging_text
1397 + "RO_action_id={} delete error: {}".format(action_id, e)
1398 )
1399
1400 if failed_detail:
1401 stage[2] = "Error deleting from VIM"
1402 else:
1403 stage[2] = "Deleted from VIM"
1404 db_nsr_update["detailed-status"] = " ".join(stage)
1405 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1406 self._write_op_status(nslcmop_id, stage)
1407
1408 if failed_detail:
1409 raise LcmException("; ".join(failed_detail))
1410 return
1411
1412 async def instantiate_RO(
1413 self,
1414 logging_text,
1415 nsr_id,
1416 nsd,
1417 db_nsr,
1418 db_nslcmop,
1419 db_vnfrs,
1420 db_vnfds,
1421 n2vc_key_list,
1422 stage,
1423 ):
1424 """
1425 Instantiate at RO
1426 :param logging_text: preffix text to use at logging
1427 :param nsr_id: nsr identity
1428 :param nsd: database content of ns descriptor
1429 :param db_nsr: database content of ns record
1430 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1431 :param db_vnfrs:
1432 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1433 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1434 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1435 :return: None or exception
1436 """
1437 try:
1438 start_deploy = time()
1439 ns_params = db_nslcmop.get("operationParams")
1440 if ns_params and ns_params.get("timeout_ns_deploy"):
1441 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1442 else:
1443 timeout_ns_deploy = self.timeout.get(
1444 "ns_deploy", self.timeout_ns_deploy
1445 )
1446
1447 # Check for and optionally request placement optimization. Database will be updated if placement activated
1448 stage[2] = "Waiting for Placement."
1449 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1450 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1451 for vnfr in db_vnfrs.values():
1452 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1453 break
1454 else:
1455 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1456
1457 return await self._instantiate_ng_ro(
1458 logging_text,
1459 nsr_id,
1460 nsd,
1461 db_nsr,
1462 db_nslcmop,
1463 db_vnfrs,
1464 db_vnfds,
1465 n2vc_key_list,
1466 stage,
1467 start_deploy,
1468 timeout_ns_deploy,
1469 )
1470 except Exception as e:
1471 stage[2] = "ERROR deploying at VIM"
1472 self.set_vnfr_at_error(db_vnfrs, str(e))
1473 self.logger.error(
1474 "Error deploying at VIM {}".format(e),
1475 exc_info=not isinstance(
1476 e,
1477 (
1478 ROclient.ROClientException,
1479 LcmException,
1480 DbException,
1481 NgRoException,
1482 ),
1483 ),
1484 )
1485 raise
1486
1487 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1488 """
1489 Wait for kdu to be up, get ip address
1490 :param logging_text: prefix use for logging
1491 :param nsr_id:
1492 :param vnfr_id:
1493 :param kdu_name:
1494 :return: IP address, K8s services
1495 """
1496
1497 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1498 nb_tries = 0
1499
1500 while nb_tries < 360:
1501 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1502 kdur = next(
1503 (
1504 x
1505 for x in get_iterable(db_vnfr, "kdur")
1506 if x.get("kdu-name") == kdu_name
1507 ),
1508 None,
1509 )
1510 if not kdur:
1511 raise LcmException(
1512 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1513 )
1514 if kdur.get("status"):
1515 if kdur["status"] in ("READY", "ENABLED"):
1516 return kdur.get("ip-address"), kdur.get("services")
1517 else:
1518 raise LcmException(
1519 "target KDU={} is in error state".format(kdu_name)
1520 )
1521
1522 await asyncio.sleep(10, loop=self.loop)
1523 nb_tries += 1
1524 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1525
1526 async def wait_vm_up_insert_key_ro(
1527 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1528 ):
1529 """
1530 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1531 :param logging_text: prefix use for logging
1532 :param nsr_id:
1533 :param vnfr_id:
1534 :param vdu_id:
1535 :param vdu_index:
1536 :param pub_key: public ssh key to inject, None to skip
1537 :param user: user to apply the public ssh key
1538 :return: IP address
1539 """
1540
1541 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1542 ro_nsr_id = None
1543 ip_address = None
1544 nb_tries = 0
1545 target_vdu_id = None
1546 ro_retries = 0
1547
1548 while True:
1549
1550 ro_retries += 1
1551 if ro_retries >= 360: # 1 hour
1552 raise LcmException(
1553 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1554 )
1555
1556 await asyncio.sleep(10, loop=self.loop)
1557
1558 # get ip address
1559 if not target_vdu_id:
1560 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1561
1562 if not vdu_id: # for the VNF case
1563 if db_vnfr.get("status") == "ERROR":
1564 raise LcmException(
1565 "Cannot inject ssh-key because target VNF is in error state"
1566 )
1567 ip_address = db_vnfr.get("ip-address")
1568 if not ip_address:
1569 continue
1570 vdur = next(
1571 (
1572 x
1573 for x in get_iterable(db_vnfr, "vdur")
1574 if x.get("ip-address") == ip_address
1575 ),
1576 None,
1577 )
1578 else: # VDU case
1579 vdur = next(
1580 (
1581 x
1582 for x in get_iterable(db_vnfr, "vdur")
1583 if x.get("vdu-id-ref") == vdu_id
1584 and x.get("count-index") == vdu_index
1585 ),
1586 None,
1587 )
1588
1589 if (
1590 not vdur and len(db_vnfr.get("vdur", ())) == 1
1591 ): # If only one, this should be the target vdu
1592 vdur = db_vnfr["vdur"][0]
1593 if not vdur:
1594 raise LcmException(
1595 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1596 vnfr_id, vdu_id, vdu_index
1597 )
1598 )
1599 # New generation RO stores information at "vim_info"
1600 ng_ro_status = None
1601 target_vim = None
1602 if vdur.get("vim_info"):
1603 target_vim = next(
1604 t for t in vdur["vim_info"]
1605 ) # there should be only one key
1606 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1607 if (
1608 vdur.get("pdu-type")
1609 or vdur.get("status") == "ACTIVE"
1610 or ng_ro_status == "ACTIVE"
1611 ):
1612 ip_address = vdur.get("ip-address")
1613 if not ip_address:
1614 continue
1615 target_vdu_id = vdur["vdu-id-ref"]
1616 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1617 raise LcmException(
1618 "Cannot inject ssh-key because target VM is in error state"
1619 )
1620
1621 if not target_vdu_id:
1622 continue
1623
1624 # inject public key into machine
1625 if pub_key and user:
1626 self.logger.debug(logging_text + "Inserting RO key")
1627 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1628 if vdur.get("pdu-type"):
1629 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1630 return ip_address
1631 try:
1632 ro_vm_id = "{}-{}".format(
1633 db_vnfr["member-vnf-index-ref"], target_vdu_id
1634 ) # TODO add vdu_index
1635 if self.ng_ro:
1636 target = {
1637 "action": {
1638 "action": "inject_ssh_key",
1639 "key": pub_key,
1640 "user": user,
1641 },
1642 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1643 }
1644 desc = await self.RO.deploy(nsr_id, target)
1645 action_id = desc["action_id"]
1646 await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
1647 break
1648 else:
1649 # wait until NS is deployed at RO
1650 if not ro_nsr_id:
1651 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1652 ro_nsr_id = deep_get(
1653 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1654 )
1655 if not ro_nsr_id:
1656 continue
1657 result_dict = await self.RO.create_action(
1658 item="ns",
1659 item_id_name=ro_nsr_id,
1660 descriptor={
1661 "add_public_key": pub_key,
1662 "vms": [ro_vm_id],
1663 "user": user,
1664 },
1665 )
1666 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1667 if not result_dict or not isinstance(result_dict, dict):
1668 raise LcmException(
1669 "Unknown response from RO when injecting key"
1670 )
1671 for result in result_dict.values():
1672 if result.get("vim_result") == 200:
1673 break
1674 else:
1675 raise ROclient.ROClientException(
1676 "error injecting key: {}".format(
1677 result.get("description")
1678 )
1679 )
1680 break
1681 except NgRoException as e:
1682 raise LcmException(
1683 "Reaching max tries injecting key. Error: {}".format(e)
1684 )
1685 except ROclient.ROClientException as e:
1686 if not nb_tries:
1687 self.logger.debug(
1688 logging_text
1689 + "error injecting key: {}. Retrying until {} seconds".format(
1690 e, 20 * 10
1691 )
1692 )
1693 nb_tries += 1
1694 if nb_tries >= 20:
1695 raise LcmException(
1696 "Reaching max tries injecting key. Error: {}".format(e)
1697 )
1698 else:
1699 break
1700
1701 return ip_address
1702
1703 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1704 """
1705 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1706 """
1707 my_vca = vca_deployed_list[vca_index]
1708 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1709 # vdu or kdu: no dependencies
1710 return
1711 timeout = 300
1712 while timeout >= 0:
1713 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1714 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1715 configuration_status_list = db_nsr["configurationStatus"]
1716 for index, vca_deployed in enumerate(configuration_status_list):
1717 if index == vca_index:
1718 # myself
1719 continue
1720 if not my_vca.get("member-vnf-index") or (
1721 vca_deployed.get("member-vnf-index")
1722 == my_vca.get("member-vnf-index")
1723 ):
1724 internal_status = configuration_status_list[index].get("status")
1725 if internal_status == "READY":
1726 continue
1727 elif internal_status == "BROKEN":
1728 raise LcmException(
1729 "Configuration aborted because dependent charm/s has failed"
1730 )
1731 else:
1732 break
1733 else:
1734 # no dependencies, return
1735 return
1736 await asyncio.sleep(10)
1737 timeout -= 1
1738
1739 raise LcmException("Configuration aborted because dependent charm/s timeout")
1740
1741 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1742 vca_id = None
1743 if db_vnfr:
1744 vca_id = deep_get(db_vnfr, ("vca-id",))
1745 elif db_nsr:
1746 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1747 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1748 return vca_id
1749
1750 async def instantiate_N2VC(
1751 self,
1752 logging_text,
1753 vca_index,
1754 nsi_id,
1755 db_nsr,
1756 db_vnfr,
1757 vdu_id,
1758 kdu_name,
1759 vdu_index,
1760 config_descriptor,
1761 deploy_params,
1762 base_folder,
1763 nslcmop_id,
1764 stage,
1765 vca_type,
1766 vca_name,
1767 ee_config_descriptor,
1768 ):
1769 nsr_id = db_nsr["_id"]
1770 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1771 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1772 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1773 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1774 db_dict = {
1775 "collection": "nsrs",
1776 "filter": {"_id": nsr_id},
1777 "path": db_update_entry,
1778 }
1779 step = ""
1780 try:
1781
1782 element_type = "NS"
1783 element_under_configuration = nsr_id
1784
1785 vnfr_id = None
1786 if db_vnfr:
1787 vnfr_id = db_vnfr["_id"]
1788 osm_config["osm"]["vnf_id"] = vnfr_id
1789
1790 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1791
1792 if vca_type == "native_charm":
1793 index_number = 0
1794 else:
1795 index_number = vdu_index or 0
1796
1797 if vnfr_id:
1798 element_type = "VNF"
1799 element_under_configuration = vnfr_id
1800 namespace += ".{}-{}".format(vnfr_id, index_number)
1801 if vdu_id:
1802 namespace += ".{}-{}".format(vdu_id, index_number)
1803 element_type = "VDU"
1804 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1805 osm_config["osm"]["vdu_id"] = vdu_id
1806 elif kdu_name:
1807 namespace += ".{}".format(kdu_name)
1808 element_type = "KDU"
1809 element_under_configuration = kdu_name
1810 osm_config["osm"]["kdu_name"] = kdu_name
1811
1812 # Get artifact path
1813 if base_folder["pkg-dir"]:
1814 artifact_path = "{}/{}/{}/{}".format(
1815 base_folder["folder"],
1816 base_folder["pkg-dir"],
1817 "charms"
1818 if vca_type
1819 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1820 else "helm-charts",
1821 vca_name,
1822 )
1823 else:
1824 artifact_path = "{}/Scripts/{}/{}/".format(
1825 base_folder["folder"],
1826 "charms"
1827 if vca_type
1828 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1829 else "helm-charts",
1830 vca_name,
1831 )
1832
1833 self.logger.debug("Artifact path > {}".format(artifact_path))
1834
1835 # get initial_config_primitive_list that applies to this element
1836 initial_config_primitive_list = config_descriptor.get(
1837 "initial-config-primitive"
1838 )
1839
1840 self.logger.debug(
1841 "Initial config primitive list > {}".format(
1842 initial_config_primitive_list
1843 )
1844 )
1845
1846 # add config if not present for NS charm
1847 ee_descriptor_id = ee_config_descriptor.get("id")
1848 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1849 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1850 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1851 )
1852
1853 self.logger.debug(
1854 "Initial config primitive list #2 > {}".format(
1855 initial_config_primitive_list
1856 )
1857 )
1858 # n2vc_redesign STEP 3.1
1859 # find old ee_id if exists
1860 ee_id = vca_deployed.get("ee_id")
1861
1862 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1863 # create or register execution environment in VCA
1864 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1865
1866 self._write_configuration_status(
1867 nsr_id=nsr_id,
1868 vca_index=vca_index,
1869 status="CREATING",
1870 element_under_configuration=element_under_configuration,
1871 element_type=element_type,
1872 )
1873
1874 step = "create execution environment"
1875 self.logger.debug(logging_text + step)
1876
1877 ee_id = None
1878 credentials = None
1879 if vca_type == "k8s_proxy_charm":
1880 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1881 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1882 namespace=namespace,
1883 artifact_path=artifact_path,
1884 db_dict=db_dict,
1885 vca_id=vca_id,
1886 )
1887 elif vca_type == "helm" or vca_type == "helm-v3":
1888 ee_id, credentials = await self.vca_map[
1889 vca_type
1890 ].create_execution_environment(
1891 namespace=namespace,
1892 reuse_ee_id=ee_id,
1893 db_dict=db_dict,
1894 config=osm_config,
1895 artifact_path=artifact_path,
1896 vca_type=vca_type,
1897 )
1898 else:
1899 ee_id, credentials = await self.vca_map[
1900 vca_type
1901 ].create_execution_environment(
1902 namespace=namespace,
1903 reuse_ee_id=ee_id,
1904 db_dict=db_dict,
1905 vca_id=vca_id,
1906 )
1907
1908 elif vca_type == "native_charm":
1909 step = "Waiting to VM being up and getting IP address"
1910 self.logger.debug(logging_text + step)
1911 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1912 logging_text,
1913 nsr_id,
1914 vnfr_id,
1915 vdu_id,
1916 vdu_index,
1917 user=None,
1918 pub_key=None,
1919 )
1920 credentials = {"hostname": rw_mgmt_ip}
1921 # get username
1922 username = deep_get(
1923 config_descriptor, ("config-access", "ssh-access", "default-user")
1924 )
1925 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1926 # merged. Meanwhile let's get username from initial-config-primitive
1927 if not username and initial_config_primitive_list:
1928 for config_primitive in initial_config_primitive_list:
1929 for param in config_primitive.get("parameter", ()):
1930 if param["name"] == "ssh-username":
1931 username = param["value"]
1932 break
1933 if not username:
1934 raise LcmException(
1935 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1936 "'config-access.ssh-access.default-user'"
1937 )
1938 credentials["username"] = username
1939 # n2vc_redesign STEP 3.2
1940
1941 self._write_configuration_status(
1942 nsr_id=nsr_id,
1943 vca_index=vca_index,
1944 status="REGISTERING",
1945 element_under_configuration=element_under_configuration,
1946 element_type=element_type,
1947 )
1948
1949 step = "register execution environment {}".format(credentials)
1950 self.logger.debug(logging_text + step)
1951 ee_id = await self.vca_map[vca_type].register_execution_environment(
1952 credentials=credentials,
1953 namespace=namespace,
1954 db_dict=db_dict,
1955 vca_id=vca_id,
1956 )
1957
1958 # for compatibility with MON/POL modules, the need model and application name at database
1959 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1960 ee_id_parts = ee_id.split(".")
1961 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1962 if len(ee_id_parts) >= 2:
1963 model_name = ee_id_parts[0]
1964 application_name = ee_id_parts[1]
1965 db_nsr_update[db_update_entry + "model"] = model_name
1966 db_nsr_update[db_update_entry + "application"] = application_name
1967
1968 # n2vc_redesign STEP 3.3
1969 step = "Install configuration Software"
1970
1971 self._write_configuration_status(
1972 nsr_id=nsr_id,
1973 vca_index=vca_index,
1974 status="INSTALLING SW",
1975 element_under_configuration=element_under_configuration,
1976 element_type=element_type,
1977 other_update=db_nsr_update,
1978 )
1979
1980 # TODO check if already done
1981 self.logger.debug(logging_text + step)
1982 config = None
1983 if vca_type == "native_charm":
1984 config_primitive = next(
1985 (p for p in initial_config_primitive_list if p["name"] == "config"),
1986 None,
1987 )
1988 if config_primitive:
1989 config = self._map_primitive_params(
1990 config_primitive, {}, deploy_params
1991 )
1992 num_units = 1
1993 if vca_type == "lxc_proxy_charm":
1994 if element_type == "NS":
1995 num_units = db_nsr.get("config-units") or 1
1996 elif element_type == "VNF":
1997 num_units = db_vnfr.get("config-units") or 1
1998 elif element_type == "VDU":
1999 for v in db_vnfr["vdur"]:
2000 if vdu_id == v["vdu-id-ref"]:
2001 num_units = v.get("config-units") or 1
2002 break
2003 if vca_type != "k8s_proxy_charm":
2004 await self.vca_map[vca_type].install_configuration_sw(
2005 ee_id=ee_id,
2006 artifact_path=artifact_path,
2007 db_dict=db_dict,
2008 config=config,
2009 num_units=num_units,
2010 vca_id=vca_id,
2011 vca_type=vca_type,
2012 )
2013
2014 # write in db flag of configuration_sw already installed
2015 self.update_db_2(
2016 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2017 )
2018
2019 # add relations for this VCA (wait for other peers related with this VCA)
2020 await self._add_vca_relations(
2021 logging_text=logging_text,
2022 nsr_id=nsr_id,
2023 vca_type=vca_type,
2024 vca_index=vca_index,
2025 )
2026
2027 # if SSH access is required, then get execution environment SSH public
2028 # if native charm we have waited already to VM be UP
2029 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2030 pub_key = None
2031 user = None
2032 # self.logger.debug("get ssh key block")
2033 if deep_get(
2034 config_descriptor, ("config-access", "ssh-access", "required")
2035 ):
2036 # self.logger.debug("ssh key needed")
2037 # Needed to inject a ssh key
2038 user = deep_get(
2039 config_descriptor,
2040 ("config-access", "ssh-access", "default-user"),
2041 )
2042 step = "Install configuration Software, getting public ssh key"
2043 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2044 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2045 )
2046
2047 step = "Insert public key into VM user={} ssh_key={}".format(
2048 user, pub_key
2049 )
2050 else:
2051 # self.logger.debug("no need to get ssh key")
2052 step = "Waiting to VM being up and getting IP address"
2053 self.logger.debug(logging_text + step)
2054
2055 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2056 rw_mgmt_ip = None
2057
2058 # n2vc_redesign STEP 5.1
2059 # wait for RO (ip-address) Insert pub_key into VM
2060 if vnfr_id:
2061 if kdu_name:
2062 rw_mgmt_ip, services = await self.wait_kdu_up(
2063 logging_text, nsr_id, vnfr_id, kdu_name
2064 )
2065 vnfd = self.db.get_one(
2066 "vnfds_revisions",
2067 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2068 )
2069 kdu = get_kdu(vnfd, kdu_name)
2070 kdu_services = [
2071 service["name"] for service in get_kdu_services(kdu)
2072 ]
2073 exposed_services = []
2074 for service in services:
2075 if any(s in service["name"] for s in kdu_services):
2076 exposed_services.append(service)
2077 await self.vca_map[vca_type].exec_primitive(
2078 ee_id=ee_id,
2079 primitive_name="config",
2080 params_dict={
2081 "osm-config": json.dumps(
2082 OsmConfigBuilder(
2083 k8s={"services": exposed_services}
2084 ).build()
2085 )
2086 },
2087 vca_id=vca_id,
2088 )
2089
2090 # This verification is needed in order to avoid trying to add a public key
2091 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2092 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2093 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2094 # or it is a KNF)
2095 elif db_vnfr.get('vdur'):
2096 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2097 logging_text,
2098 nsr_id,
2099 vnfr_id,
2100 vdu_id,
2101 vdu_index,
2102 user=user,
2103 pub_key=pub_key,
2104 )
2105
2106 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2107
2108 # store rw_mgmt_ip in deploy params for later replacement
2109 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2110
2111 # n2vc_redesign STEP 6 Execute initial config primitive
2112 step = "execute initial config primitive"
2113
2114 # wait for dependent primitives execution (NS -> VNF -> VDU)
2115 if initial_config_primitive_list:
2116 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2117
2118 # stage, in function of element type: vdu, kdu, vnf or ns
2119 my_vca = vca_deployed_list[vca_index]
2120 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2121 # VDU or KDU
2122 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2123 elif my_vca.get("member-vnf-index"):
2124 # VNF
2125 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2126 else:
2127 # NS
2128 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2129
2130 self._write_configuration_status(
2131 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2132 )
2133
2134 self._write_op_status(op_id=nslcmop_id, stage=stage)
2135
2136 check_if_terminated_needed = True
2137 for initial_config_primitive in initial_config_primitive_list:
2138 # adding information on the vca_deployed if it is a NS execution environment
2139 if not vca_deployed["member-vnf-index"]:
2140 deploy_params["ns_config_info"] = json.dumps(
2141 self._get_ns_config_info(nsr_id)
2142 )
2143 # TODO check if already done
2144 primitive_params_ = self._map_primitive_params(
2145 initial_config_primitive, {}, deploy_params
2146 )
2147
2148 step = "execute primitive '{}' params '{}'".format(
2149 initial_config_primitive["name"], primitive_params_
2150 )
2151 self.logger.debug(logging_text + step)
2152 await self.vca_map[vca_type].exec_primitive(
2153 ee_id=ee_id,
2154 primitive_name=initial_config_primitive["name"],
2155 params_dict=primitive_params_,
2156 db_dict=db_dict,
2157 vca_id=vca_id,
2158 vca_type=vca_type,
2159 )
2160 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2161 if check_if_terminated_needed:
2162 if config_descriptor.get("terminate-config-primitive"):
2163 self.update_db_2(
2164 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2165 )
2166 check_if_terminated_needed = False
2167
2168 # TODO register in database that primitive is done
2169
2170 # STEP 7 Configure metrics
2171 if vca_type == "helm" or vca_type == "helm-v3":
2172 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2173 ee_id=ee_id,
2174 artifact_path=artifact_path,
2175 ee_config_descriptor=ee_config_descriptor,
2176 vnfr_id=vnfr_id,
2177 nsr_id=nsr_id,
2178 target_ip=rw_mgmt_ip,
2179 )
2180 if prometheus_jobs:
2181 self.update_db_2(
2182 "nsrs",
2183 nsr_id,
2184 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2185 )
2186
2187 for job in prometheus_jobs:
2188 self.db.set_one(
2189 "prometheus_jobs",
2190 {"job_name": job["job_name"]},
2191 job,
2192 upsert=True,
2193 fail_on_empty=False,
2194 )
2195
2196 step = "instantiated at VCA"
2197 self.logger.debug(logging_text + step)
2198
2199 self._write_configuration_status(
2200 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2201 )
2202
2203 except Exception as e: # TODO not use Exception but N2VC exception
2204 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2205 if not isinstance(
2206 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2207 ):
2208 self.logger.error(
2209 "Exception while {} : {}".format(step, e), exc_info=True
2210 )
2211 self._write_configuration_status(
2212 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2213 )
2214 raise LcmException("{} {}".format(step, e)) from e
2215
2216 def _write_ns_status(
2217 self,
2218 nsr_id: str,
2219 ns_state: str,
2220 current_operation: str,
2221 current_operation_id: str,
2222 error_description: str = None,
2223 error_detail: str = None,
2224 other_update: dict = None,
2225 ):
2226 """
2227 Update db_nsr fields.
2228 :param nsr_id:
2229 :param ns_state:
2230 :param current_operation:
2231 :param current_operation_id:
2232 :param error_description:
2233 :param error_detail:
2234 :param other_update: Other required changes at database if provided, will be cleared
2235 :return:
2236 """
2237 try:
2238 db_dict = other_update or {}
2239 db_dict[
2240 "_admin.nslcmop"
2241 ] = current_operation_id # for backward compatibility
2242 db_dict["_admin.current-operation"] = current_operation_id
2243 db_dict["_admin.operation-type"] = (
2244 current_operation if current_operation != "IDLE" else None
2245 )
2246 db_dict["currentOperation"] = current_operation
2247 db_dict["currentOperationID"] = current_operation_id
2248 db_dict["errorDescription"] = error_description
2249 db_dict["errorDetail"] = error_detail
2250
2251 if ns_state:
2252 db_dict["nsState"] = ns_state
2253 self.update_db_2("nsrs", nsr_id, db_dict)
2254 except DbException as e:
2255 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2256
2257 def _write_op_status(
2258 self,
2259 op_id: str,
2260 stage: list = None,
2261 error_message: str = None,
2262 queuePosition: int = 0,
2263 operation_state: str = None,
2264 other_update: dict = None,
2265 ):
2266 try:
2267 db_dict = other_update or {}
2268 db_dict["queuePosition"] = queuePosition
2269 if isinstance(stage, list):
2270 db_dict["stage"] = stage[0]
2271 db_dict["detailed-status"] = " ".join(stage)
2272 elif stage is not None:
2273 db_dict["stage"] = str(stage)
2274
2275 if error_message is not None:
2276 db_dict["errorMessage"] = error_message
2277 if operation_state is not None:
2278 db_dict["operationState"] = operation_state
2279 db_dict["statusEnteredTime"] = time()
2280 self.update_db_2("nslcmops", op_id, db_dict)
2281 except DbException as e:
2282 self.logger.warn(
2283 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2284 )
2285
2286 def _write_all_config_status(self, db_nsr: dict, status: str):
2287 try:
2288 nsr_id = db_nsr["_id"]
2289 # configurationStatus
2290 config_status = db_nsr.get("configurationStatus")
2291 if config_status:
2292 db_nsr_update = {
2293 "configurationStatus.{}.status".format(index): status
2294 for index, v in enumerate(config_status)
2295 if v
2296 }
2297 # update status
2298 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2299
2300 except DbException as e:
2301 self.logger.warn(
2302 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2303 )
2304
2305 def _write_configuration_status(
2306 self,
2307 nsr_id: str,
2308 vca_index: int,
2309 status: str = None,
2310 element_under_configuration: str = None,
2311 element_type: str = None,
2312 other_update: dict = None,
2313 ):
2314
2315 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2316 # .format(vca_index, status))
2317
2318 try:
2319 db_path = "configurationStatus.{}.".format(vca_index)
2320 db_dict = other_update or {}
2321 if status:
2322 db_dict[db_path + "status"] = status
2323 if element_under_configuration:
2324 db_dict[
2325 db_path + "elementUnderConfiguration"
2326 ] = element_under_configuration
2327 if element_type:
2328 db_dict[db_path + "elementType"] = element_type
2329 self.update_db_2("nsrs", nsr_id, db_dict)
2330 except DbException as e:
2331 self.logger.warn(
2332 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2333 status, nsr_id, vca_index, e
2334 )
2335 )
2336
2337 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2338 """
2339 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2340 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2341 Database is used because the result can be obtained from a different LCM worker in case of HA.
2342 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2343 :param db_nslcmop: database content of nslcmop
2344 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2345 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2346 computed 'vim-account-id'
2347 """
2348 modified = False
2349 nslcmop_id = db_nslcmop["_id"]
2350 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2351 if placement_engine == "PLA":
2352 self.logger.debug(
2353 logging_text + "Invoke and wait for placement optimization"
2354 )
2355 await self.msg.aiowrite(
2356 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2357 )
2358 db_poll_interval = 5
2359 wait = db_poll_interval * 10
2360 pla_result = None
2361 while not pla_result and wait >= 0:
2362 await asyncio.sleep(db_poll_interval)
2363 wait -= db_poll_interval
2364 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2365 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2366
2367 if not pla_result:
2368 raise LcmException(
2369 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2370 )
2371
2372 for pla_vnf in pla_result["vnf"]:
2373 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2374 if not pla_vnf.get("vimAccountId") or not vnfr:
2375 continue
2376 modified = True
2377 self.db.set_one(
2378 "vnfrs",
2379 {"_id": vnfr["_id"]},
2380 {"vim-account-id": pla_vnf["vimAccountId"]},
2381 )
2382 # Modifies db_vnfrs
2383 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2384 return modified
2385
2386 def update_nsrs_with_pla_result(self, params):
2387 try:
2388 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2389 self.update_db_2(
2390 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2391 )
2392 except Exception as e:
2393 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2394
2395 async def instantiate(self, nsr_id, nslcmop_id):
2396 """
2397
2398 :param nsr_id: ns instance to deploy
2399 :param nslcmop_id: operation to run
2400 :return:
2401 """
2402
2403 # Try to lock HA task here
2404 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2405 if not task_is_locked_by_me:
2406 self.logger.debug(
2407 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2408 )
2409 return
2410
2411 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2412 self.logger.debug(logging_text + "Enter")
2413
2414 # get all needed from database
2415
2416 # database nsrs record
2417 db_nsr = None
2418
2419 # database nslcmops record
2420 db_nslcmop = None
2421
2422 # update operation on nsrs
2423 db_nsr_update = {}
2424 # update operation on nslcmops
2425 db_nslcmop_update = {}
2426
2427 nslcmop_operation_state = None
2428 db_vnfrs = {} # vnf's info indexed by member-index
2429 # n2vc_info = {}
2430 tasks_dict_info = {} # from task to info text
2431 exc = None
2432 error_list = []
2433 stage = [
2434 "Stage 1/5: preparation of the environment.",
2435 "Waiting for previous operations to terminate.",
2436 "",
2437 ]
2438 # ^ stage, step, VIM progress
2439 try:
2440 # wait for any previous tasks in process
2441 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2442
2443 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2444 stage[1] = "Reading from database."
2445 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2446 db_nsr_update["detailed-status"] = "creating"
2447 db_nsr_update["operational-status"] = "init"
2448 self._write_ns_status(
2449 nsr_id=nsr_id,
2450 ns_state="BUILDING",
2451 current_operation="INSTANTIATING",
2452 current_operation_id=nslcmop_id,
2453 other_update=db_nsr_update,
2454 )
2455 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2456
2457 # read from db: operation
2458 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2459 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2460 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2461 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2462 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2463 )
2464 ns_params = db_nslcmop.get("operationParams")
2465 if ns_params and ns_params.get("timeout_ns_deploy"):
2466 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2467 else:
2468 timeout_ns_deploy = self.timeout.get(
2469 "ns_deploy", self.timeout_ns_deploy
2470 )
2471
2472 # read from db: ns
2473 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2474 self.logger.debug(logging_text + stage[1])
2475 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2476 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2477 self.logger.debug(logging_text + stage[1])
2478 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2479 self.fs.sync(db_nsr["nsd-id"])
2480 db_nsr["nsd"] = nsd
2481 # nsr_name = db_nsr["name"] # TODO short-name??
2482
2483 # read from db: vnf's of this ns
2484 stage[1] = "Getting vnfrs from db."
2485 self.logger.debug(logging_text + stage[1])
2486 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2487
2488 # read from db: vnfd's for every vnf
2489 db_vnfds = [] # every vnfd data
2490
2491 # for each vnf in ns, read vnfd
2492 for vnfr in db_vnfrs_list:
2493 if vnfr.get("kdur"):
2494 kdur_list = []
2495 for kdur in vnfr["kdur"]:
2496 if kdur.get("additionalParams"):
2497 kdur["additionalParams"] = json.loads(
2498 kdur["additionalParams"]
2499 )
2500 kdur_list.append(kdur)
2501 vnfr["kdur"] = kdur_list
2502
2503 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2504 vnfd_id = vnfr["vnfd-id"]
2505 vnfd_ref = vnfr["vnfd-ref"]
2506 self.fs.sync(vnfd_id)
2507
2508 # if we haven't this vnfd, read it from db
2509 if vnfd_id not in db_vnfds:
2510 # read from db
2511 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2512 vnfd_id, vnfd_ref
2513 )
2514 self.logger.debug(logging_text + stage[1])
2515 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2516
2517 # store vnfd
2518 db_vnfds.append(vnfd)
2519
2520 # Get or generates the _admin.deployed.VCA list
2521 vca_deployed_list = None
2522 if db_nsr["_admin"].get("deployed"):
2523 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2524 if vca_deployed_list is None:
2525 vca_deployed_list = []
2526 configuration_status_list = []
2527 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2528 db_nsr_update["configurationStatus"] = configuration_status_list
2529 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2530 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2531 elif isinstance(vca_deployed_list, dict):
2532 # maintain backward compatibility. Change a dict to list at database
2533 vca_deployed_list = list(vca_deployed_list.values())
2534 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2535 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2536
2537 if not isinstance(
2538 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2539 ):
2540 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2541 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2542
2543 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2544 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2545 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2546 self.db.set_list(
2547 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2548 )
2549
2550 # n2vc_redesign STEP 2 Deploy Network Scenario
2551 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2552 self._write_op_status(op_id=nslcmop_id, stage=stage)
2553
2554 stage[1] = "Deploying KDUs."
2555 # self.logger.debug(logging_text + "Before deploy_kdus")
2556 # Call to deploy_kdus in case exists the "vdu:kdu" param
2557 await self.deploy_kdus(
2558 logging_text=logging_text,
2559 nsr_id=nsr_id,
2560 nslcmop_id=nslcmop_id,
2561 db_vnfrs=db_vnfrs,
2562 db_vnfds=db_vnfds,
2563 task_instantiation_info=tasks_dict_info,
2564 )
2565
2566 stage[1] = "Getting VCA public key."
2567 # n2vc_redesign STEP 1 Get VCA public ssh-key
2568 # feature 1429. Add n2vc public key to needed VMs
2569 n2vc_key = self.n2vc.get_public_key()
2570 n2vc_key_list = [n2vc_key]
2571 if self.vca_config.get("public_key"):
2572 n2vc_key_list.append(self.vca_config["public_key"])
2573
2574 stage[1] = "Deploying NS at VIM."
2575 task_ro = asyncio.ensure_future(
2576 self.instantiate_RO(
2577 logging_text=logging_text,
2578 nsr_id=nsr_id,
2579 nsd=nsd,
2580 db_nsr=db_nsr,
2581 db_nslcmop=db_nslcmop,
2582 db_vnfrs=db_vnfrs,
2583 db_vnfds=db_vnfds,
2584 n2vc_key_list=n2vc_key_list,
2585 stage=stage,
2586 )
2587 )
2588 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2589 tasks_dict_info[task_ro] = "Deploying at VIM"
2590
2591 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2592 stage[1] = "Deploying Execution Environments."
2593 self.logger.debug(logging_text + stage[1])
2594
2595 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2596 for vnf_profile in get_vnf_profiles(nsd):
2597 vnfd_id = vnf_profile["vnfd-id"]
2598 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2599 member_vnf_index = str(vnf_profile["id"])
2600 db_vnfr = db_vnfrs[member_vnf_index]
2601 base_folder = vnfd["_admin"]["storage"]
2602 vdu_id = None
2603 vdu_index = 0
2604 vdu_name = None
2605 kdu_name = None
2606
2607 # Get additional parameters
2608 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2609 if db_vnfr.get("additionalParamsForVnf"):
2610 deploy_params.update(
2611 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2612 )
2613
2614 descriptor_config = get_configuration(vnfd, vnfd["id"])
2615 if descriptor_config:
2616 self._deploy_n2vc(
2617 logging_text=logging_text
2618 + "member_vnf_index={} ".format(member_vnf_index),
2619 db_nsr=db_nsr,
2620 db_vnfr=db_vnfr,
2621 nslcmop_id=nslcmop_id,
2622 nsr_id=nsr_id,
2623 nsi_id=nsi_id,
2624 vnfd_id=vnfd_id,
2625 vdu_id=vdu_id,
2626 kdu_name=kdu_name,
2627 member_vnf_index=member_vnf_index,
2628 vdu_index=vdu_index,
2629 vdu_name=vdu_name,
2630 deploy_params=deploy_params,
2631 descriptor_config=descriptor_config,
2632 base_folder=base_folder,
2633 task_instantiation_info=tasks_dict_info,
2634 stage=stage,
2635 )
2636
2637 # Deploy charms for each VDU that supports one.
2638 for vdud in get_vdu_list(vnfd):
2639 vdu_id = vdud["id"]
2640 descriptor_config = get_configuration(vnfd, vdu_id)
2641 vdur = find_in_list(
2642 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2643 )
2644
2645 if vdur.get("additionalParams"):
2646 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2647 else:
2648 deploy_params_vdu = deploy_params
2649 deploy_params_vdu["OSM"] = get_osm_params(
2650 db_vnfr, vdu_id, vdu_count_index=0
2651 )
2652 vdud_count = get_number_of_instances(vnfd, vdu_id)
2653
2654 self.logger.debug("VDUD > {}".format(vdud))
2655 self.logger.debug(
2656 "Descriptor config > {}".format(descriptor_config)
2657 )
2658 if descriptor_config:
2659 vdu_name = None
2660 kdu_name = None
2661 for vdu_index in range(vdud_count):
2662 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2663 self._deploy_n2vc(
2664 logging_text=logging_text
2665 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2666 member_vnf_index, vdu_id, vdu_index
2667 ),
2668 db_nsr=db_nsr,
2669 db_vnfr=db_vnfr,
2670 nslcmop_id=nslcmop_id,
2671 nsr_id=nsr_id,
2672 nsi_id=nsi_id,
2673 vnfd_id=vnfd_id,
2674 vdu_id=vdu_id,
2675 kdu_name=kdu_name,
2676 member_vnf_index=member_vnf_index,
2677 vdu_index=vdu_index,
2678 vdu_name=vdu_name,
2679 deploy_params=deploy_params_vdu,
2680 descriptor_config=descriptor_config,
2681 base_folder=base_folder,
2682 task_instantiation_info=tasks_dict_info,
2683 stage=stage,
2684 )
2685 for kdud in get_kdu_list(vnfd):
2686 kdu_name = kdud["name"]
2687 descriptor_config = get_configuration(vnfd, kdu_name)
2688 if descriptor_config:
2689 vdu_id = None
2690 vdu_index = 0
2691 vdu_name = None
2692 kdur = next(
2693 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2694 )
2695 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2696 if kdur.get("additionalParams"):
2697 deploy_params_kdu.update(
2698 parse_yaml_strings(kdur["additionalParams"].copy())
2699 )
2700
2701 self._deploy_n2vc(
2702 logging_text=logging_text,
2703 db_nsr=db_nsr,
2704 db_vnfr=db_vnfr,
2705 nslcmop_id=nslcmop_id,
2706 nsr_id=nsr_id,
2707 nsi_id=nsi_id,
2708 vnfd_id=vnfd_id,
2709 vdu_id=vdu_id,
2710 kdu_name=kdu_name,
2711 member_vnf_index=member_vnf_index,
2712 vdu_index=vdu_index,
2713 vdu_name=vdu_name,
2714 deploy_params=deploy_params_kdu,
2715 descriptor_config=descriptor_config,
2716 base_folder=base_folder,
2717 task_instantiation_info=tasks_dict_info,
2718 stage=stage,
2719 )
2720
2721 # Check if this NS has a charm configuration
2722 descriptor_config = nsd.get("ns-configuration")
2723 if descriptor_config and descriptor_config.get("juju"):
2724 vnfd_id = None
2725 db_vnfr = None
2726 member_vnf_index = None
2727 vdu_id = None
2728 kdu_name = None
2729 vdu_index = 0
2730 vdu_name = None
2731
2732 # Get additional parameters
2733 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2734 if db_nsr.get("additionalParamsForNs"):
2735 deploy_params.update(
2736 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2737 )
2738 base_folder = nsd["_admin"]["storage"]
2739 self._deploy_n2vc(
2740 logging_text=logging_text,
2741 db_nsr=db_nsr,
2742 db_vnfr=db_vnfr,
2743 nslcmop_id=nslcmop_id,
2744 nsr_id=nsr_id,
2745 nsi_id=nsi_id,
2746 vnfd_id=vnfd_id,
2747 vdu_id=vdu_id,
2748 kdu_name=kdu_name,
2749 member_vnf_index=member_vnf_index,
2750 vdu_index=vdu_index,
2751 vdu_name=vdu_name,
2752 deploy_params=deploy_params,
2753 descriptor_config=descriptor_config,
2754 base_folder=base_folder,
2755 task_instantiation_info=tasks_dict_info,
2756 stage=stage,
2757 )
2758
2759 # rest of staff will be done at finally
2760
2761 except (
2762 ROclient.ROClientException,
2763 DbException,
2764 LcmException,
2765 N2VCException,
2766 ) as e:
2767 self.logger.error(
2768 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2769 )
2770 exc = e
2771 except asyncio.CancelledError:
2772 self.logger.error(
2773 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2774 )
2775 exc = "Operation was cancelled"
2776 except Exception as e:
2777 exc = traceback.format_exc()
2778 self.logger.critical(
2779 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2780 exc_info=True,
2781 )
2782 finally:
2783 if exc:
2784 error_list.append(str(exc))
2785 try:
2786 # wait for pending tasks
2787 if tasks_dict_info:
2788 stage[1] = "Waiting for instantiate pending tasks."
2789 self.logger.debug(logging_text + stage[1])
2790 error_list += await self._wait_for_tasks(
2791 logging_text,
2792 tasks_dict_info,
2793 timeout_ns_deploy,
2794 stage,
2795 nslcmop_id,
2796 nsr_id=nsr_id,
2797 )
2798 stage[1] = stage[2] = ""
2799 except asyncio.CancelledError:
2800 error_list.append("Cancelled")
2801 # TODO cancel all tasks
2802 except Exception as exc:
2803 error_list.append(str(exc))
2804
2805 # update operation-status
2806 db_nsr_update["operational-status"] = "running"
2807 # let's begin with VCA 'configured' status (later we can change it)
2808 db_nsr_update["config-status"] = "configured"
2809 for task, task_name in tasks_dict_info.items():
2810 if not task.done() or task.cancelled() or task.exception():
2811 if task_name.startswith(self.task_name_deploy_vca):
2812 # A N2VC task is pending
2813 db_nsr_update["config-status"] = "failed"
2814 else:
2815 # RO or KDU task is pending
2816 db_nsr_update["operational-status"] = "failed"
2817
2818 # update status at database
2819 if error_list:
2820 error_detail = ". ".join(error_list)
2821 self.logger.error(logging_text + error_detail)
2822 error_description_nslcmop = "{} Detail: {}".format(
2823 stage[0], error_detail
2824 )
2825 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2826 nslcmop_id, stage[0]
2827 )
2828
2829 db_nsr_update["detailed-status"] = (
2830 error_description_nsr + " Detail: " + error_detail
2831 )
2832 db_nslcmop_update["detailed-status"] = error_detail
2833 nslcmop_operation_state = "FAILED"
2834 ns_state = "BROKEN"
2835 else:
2836 error_detail = None
2837 error_description_nsr = error_description_nslcmop = None
2838 ns_state = "READY"
2839 db_nsr_update["detailed-status"] = "Done"
2840 db_nslcmop_update["detailed-status"] = "Done"
2841 nslcmop_operation_state = "COMPLETED"
2842
2843 if db_nsr:
2844 self._write_ns_status(
2845 nsr_id=nsr_id,
2846 ns_state=ns_state,
2847 current_operation="IDLE",
2848 current_operation_id=None,
2849 error_description=error_description_nsr,
2850 error_detail=error_detail,
2851 other_update=db_nsr_update,
2852 )
2853 self._write_op_status(
2854 op_id=nslcmop_id,
2855 stage="",
2856 error_message=error_description_nslcmop,
2857 operation_state=nslcmop_operation_state,
2858 other_update=db_nslcmop_update,
2859 )
2860
2861 if nslcmop_operation_state:
2862 try:
2863 await self.msg.aiowrite(
2864 "ns",
2865 "instantiated",
2866 {
2867 "nsr_id": nsr_id,
2868 "nslcmop_id": nslcmop_id,
2869 "operationState": nslcmop_operation_state,
2870 },
2871 loop=self.loop,
2872 )
2873 except Exception as e:
2874 self.logger.error(
2875 logging_text + "kafka_write notification Exception {}".format(e)
2876 )
2877
2878 self.logger.debug(logging_text + "Exit")
2879 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2880
2881 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2882 if vnfd_id not in cached_vnfds:
2883 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2884 return cached_vnfds[vnfd_id]
2885
2886 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2887 if vnf_profile_id not in cached_vnfrs:
2888 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2889 "vnfrs",
2890 {
2891 "member-vnf-index-ref": vnf_profile_id,
2892 "nsr-id-ref": nsr_id,
2893 },
2894 )
2895 return cached_vnfrs[vnf_profile_id]
2896
2897 def _is_deployed_vca_in_relation(
2898 self, vca: DeployedVCA, relation: Relation
2899 ) -> bool:
2900 found = False
2901 for endpoint in (relation.provider, relation.requirer):
2902 if endpoint["kdu-resource-profile-id"]:
2903 continue
2904 found = (
2905 vca.vnf_profile_id == endpoint.vnf_profile_id
2906 and vca.vdu_profile_id == endpoint.vdu_profile_id
2907 and vca.execution_environment_ref == endpoint.execution_environment_ref
2908 )
2909 if found:
2910 break
2911 return found
2912
2913 def _update_ee_relation_data_with_implicit_data(
2914 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2915 ):
2916 ee_relation_data = safe_get_ee_relation(
2917 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2918 )
2919 ee_relation_level = EELevel.get_level(ee_relation_data)
2920 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2921 "execution-environment-ref"
2922 ]:
2923 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2924 vnfd_id = vnf_profile["vnfd-id"]
2925 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2926 entity_id = (
2927 vnfd_id
2928 if ee_relation_level == EELevel.VNF
2929 else ee_relation_data["vdu-profile-id"]
2930 )
2931 ee = get_juju_ee_ref(db_vnfd, entity_id)
2932 if not ee:
2933 raise Exception(
2934 f"not execution environments found for ee_relation {ee_relation_data}"
2935 )
2936 ee_relation_data["execution-environment-ref"] = ee["id"]
2937 return ee_relation_data
2938
2939 def _get_ns_relations(
2940 self,
2941 nsr_id: str,
2942 nsd: Dict[str, Any],
2943 vca: DeployedVCA,
2944 cached_vnfds: Dict[str, Any],
2945 ) -> List[Relation]:
2946 relations = []
2947 db_ns_relations = get_ns_configuration_relation_list(nsd)
2948 for r in db_ns_relations:
2949 provider_dict = None
2950 requirer_dict = None
2951 if all(key in r for key in ("provider", "requirer")):
2952 provider_dict = r["provider"]
2953 requirer_dict = r["requirer"]
2954 elif "entities" in r:
2955 provider_id = r["entities"][0]["id"]
2956 provider_dict = {
2957 "nsr-id": nsr_id,
2958 "endpoint": r["entities"][0]["endpoint"],
2959 }
2960 if provider_id != nsd["id"]:
2961 provider_dict["vnf-profile-id"] = provider_id
2962 requirer_id = r["entities"][1]["id"]
2963 requirer_dict = {
2964 "nsr-id": nsr_id,
2965 "endpoint": r["entities"][1]["endpoint"],
2966 }
2967 if requirer_id != nsd["id"]:
2968 requirer_dict["vnf-profile-id"] = requirer_id
2969 else:
2970 raise Exception(
2971 "provider/requirer or entities must be included in the relation."
2972 )
2973 relation_provider = self._update_ee_relation_data_with_implicit_data(
2974 nsr_id, nsd, provider_dict, cached_vnfds
2975 )
2976 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2977 nsr_id, nsd, requirer_dict, cached_vnfds
2978 )
2979 provider = EERelation(relation_provider)
2980 requirer = EERelation(relation_requirer)
2981 relation = Relation(r["name"], provider, requirer)
2982 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2983 if vca_in_relation:
2984 relations.append(relation)
2985 return relations
2986
2987 def _get_vnf_relations(
2988 self,
2989 nsr_id: str,
2990 nsd: Dict[str, Any],
2991 vca: DeployedVCA,
2992 cached_vnfds: Dict[str, Any],
2993 ) -> List[Relation]:
2994 relations = []
2995 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2996 vnf_profile_id = vnf_profile["id"]
2997 vnfd_id = vnf_profile["vnfd-id"]
2998 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2999 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
3000 for r in db_vnf_relations:
3001 provider_dict = None
3002 requirer_dict = None
3003 if all(key in r for key in ("provider", "requirer")):
3004 provider_dict = r["provider"]
3005 requirer_dict = r["requirer"]
3006 elif "entities" in r:
3007 provider_id = r["entities"][0]["id"]
3008 provider_dict = {
3009 "nsr-id": nsr_id,
3010 "vnf-profile-id": vnf_profile_id,
3011 "endpoint": r["entities"][0]["endpoint"],
3012 }
3013 if provider_id != vnfd_id:
3014 provider_dict["vdu-profile-id"] = provider_id
3015 requirer_id = r["entities"][1]["id"]
3016 requirer_dict = {
3017 "nsr-id": nsr_id,
3018 "vnf-profile-id": vnf_profile_id,
3019 "endpoint": r["entities"][1]["endpoint"],
3020 }
3021 if requirer_id != vnfd_id:
3022 requirer_dict["vdu-profile-id"] = requirer_id
3023 else:
3024 raise Exception(
3025 "provider/requirer or entities must be included in the relation."
3026 )
3027 relation_provider = self._update_ee_relation_data_with_implicit_data(
3028 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3029 )
3030 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3031 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3032 )
3033 provider = EERelation(relation_provider)
3034 requirer = EERelation(relation_requirer)
3035 relation = Relation(r["name"], provider, requirer)
3036 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3037 if vca_in_relation:
3038 relations.append(relation)
3039 return relations
3040
3041 def _get_kdu_resource_data(
3042 self,
3043 ee_relation: EERelation,
3044 db_nsr: Dict[str, Any],
3045 cached_vnfds: Dict[str, Any],
3046 ) -> DeployedK8sResource:
3047 nsd = get_nsd(db_nsr)
3048 vnf_profiles = get_vnf_profiles(nsd)
3049 vnfd_id = find_in_list(
3050 vnf_profiles,
3051 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3052 )["vnfd-id"]
3053 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3054 kdu_resource_profile = get_kdu_resource_profile(
3055 db_vnfd, ee_relation.kdu_resource_profile_id
3056 )
3057 kdu_name = kdu_resource_profile["kdu-name"]
3058 deployed_kdu, _ = get_deployed_kdu(
3059 db_nsr.get("_admin", ()).get("deployed", ()),
3060 kdu_name,
3061 ee_relation.vnf_profile_id,
3062 )
3063 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3064 return deployed_kdu
3065
3066 def _get_deployed_component(
3067 self,
3068 ee_relation: EERelation,
3069 db_nsr: Dict[str, Any],
3070 cached_vnfds: Dict[str, Any],
3071 ) -> DeployedComponent:
3072 nsr_id = db_nsr["_id"]
3073 deployed_component = None
3074 ee_level = EELevel.get_level(ee_relation)
3075 if ee_level == EELevel.NS:
3076 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3077 if vca:
3078 deployed_component = DeployedVCA(nsr_id, vca)
3079 elif ee_level == EELevel.VNF:
3080 vca = get_deployed_vca(
3081 db_nsr,
3082 {
3083 "vdu_id": None,
3084 "member-vnf-index": ee_relation.vnf_profile_id,
3085 "ee_descriptor_id": ee_relation.execution_environment_ref,
3086 },
3087 )
3088 if vca:
3089 deployed_component = DeployedVCA(nsr_id, vca)
3090 elif ee_level == EELevel.VDU:
3091 vca = get_deployed_vca(
3092 db_nsr,
3093 {
3094 "vdu_id": ee_relation.vdu_profile_id,
3095 "member-vnf-index": ee_relation.vnf_profile_id,
3096 "ee_descriptor_id": ee_relation.execution_environment_ref,
3097 },
3098 )
3099 if vca:
3100 deployed_component = DeployedVCA(nsr_id, vca)
3101 elif ee_level == EELevel.KDU:
3102 kdu_resource_data = self._get_kdu_resource_data(
3103 ee_relation, db_nsr, cached_vnfds
3104 )
3105 if kdu_resource_data:
3106 deployed_component = DeployedK8sResource(kdu_resource_data)
3107 return deployed_component
3108
3109 async def _add_relation(
3110 self,
3111 relation: Relation,
3112 vca_type: str,
3113 db_nsr: Dict[str, Any],
3114 cached_vnfds: Dict[str, Any],
3115 cached_vnfrs: Dict[str, Any],
3116 ) -> bool:
3117 deployed_provider = self._get_deployed_component(
3118 relation.provider, db_nsr, cached_vnfds
3119 )
3120 deployed_requirer = self._get_deployed_component(
3121 relation.requirer, db_nsr, cached_vnfds
3122 )
3123 if (
3124 deployed_provider
3125 and deployed_requirer
3126 and deployed_provider.config_sw_installed
3127 and deployed_requirer.config_sw_installed
3128 ):
3129 provider_db_vnfr = (
3130 self._get_vnfr(
3131 relation.provider.nsr_id,
3132 relation.provider.vnf_profile_id,
3133 cached_vnfrs,
3134 )
3135 if relation.provider.vnf_profile_id
3136 else None
3137 )
3138 requirer_db_vnfr = (
3139 self._get_vnfr(
3140 relation.requirer.nsr_id,
3141 relation.requirer.vnf_profile_id,
3142 cached_vnfrs,
3143 )
3144 if relation.requirer.vnf_profile_id
3145 else None
3146 )
3147 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3148 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3149 provider_relation_endpoint = RelationEndpoint(
3150 deployed_provider.ee_id,
3151 provider_vca_id,
3152 relation.provider.endpoint,
3153 )
3154 requirer_relation_endpoint = RelationEndpoint(
3155 deployed_requirer.ee_id,
3156 requirer_vca_id,
3157 relation.requirer.endpoint,
3158 )
3159 await self.vca_map[vca_type].add_relation(
3160 provider=provider_relation_endpoint,
3161 requirer=requirer_relation_endpoint,
3162 )
3163 # remove entry from relations list
3164 return True
3165 return False
3166
3167 async def _add_vca_relations(
3168 self,
3169 logging_text,
3170 nsr_id,
3171 vca_type: str,
3172 vca_index: int,
3173 timeout: int = 3600,
3174 ) -> bool:
3175
3176 # steps:
3177 # 1. find all relations for this VCA
3178 # 2. wait for other peers related
3179 # 3. add relations
3180
3181 try:
3182 # STEP 1: find all relations for this VCA
3183
3184 # read nsr record
3185 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3186 nsd = get_nsd(db_nsr)
3187
3188 # this VCA data
3189 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3190 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3191
3192 cached_vnfds = {}
3193 cached_vnfrs = {}
3194 relations = []
3195 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3196 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3197
3198 # if no relations, terminate
3199 if not relations:
3200 self.logger.debug(logging_text + " No relations")
3201 return True
3202
3203 self.logger.debug(logging_text + " adding relations {}".format(relations))
3204
3205 # add all relations
3206 start = time()
3207 while True:
3208 # check timeout
3209 now = time()
3210 if now - start >= timeout:
3211 self.logger.error(logging_text + " : timeout adding relations")
3212 return False
3213
3214 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3215 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3216
3217 # for each relation, find the VCA's related
3218 for relation in relations.copy():
3219 added = await self._add_relation(
3220 relation,
3221 vca_type,
3222 db_nsr,
3223 cached_vnfds,
3224 cached_vnfrs,
3225 )
3226 if added:
3227 relations.remove(relation)
3228
3229 if not relations:
3230 self.logger.debug("Relations added")
3231 break
3232 await asyncio.sleep(5.0)
3233
3234 return True
3235
3236 except Exception as e:
3237 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3238 return False
3239
3240 async def _install_kdu(
3241 self,
3242 nsr_id: str,
3243 nsr_db_path: str,
3244 vnfr_data: dict,
3245 kdu_index: int,
3246 kdud: dict,
3247 vnfd: dict,
3248 k8s_instance_info: dict,
3249 k8params: dict = None,
3250 timeout: int = 600,
3251 vca_id: str = None,
3252 ):
3253
3254 try:
3255 k8sclustertype = k8s_instance_info["k8scluster-type"]
3256 # Instantiate kdu
3257 db_dict_install = {
3258 "collection": "nsrs",
3259 "filter": {"_id": nsr_id},
3260 "path": nsr_db_path,
3261 }
3262
3263 if k8s_instance_info.get("kdu-deployment-name"):
3264 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3265 else:
3266 kdu_instance = self.k8scluster_map[
3267 k8sclustertype
3268 ].generate_kdu_instance_name(
3269 db_dict=db_dict_install,
3270 kdu_model=k8s_instance_info["kdu-model"],
3271 kdu_name=k8s_instance_info["kdu-name"],
3272 )
3273
3274 # Update the nsrs table with the kdu-instance value
3275 self.update_db_2(
3276 item="nsrs",
3277 _id=nsr_id,
3278 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3279 )
3280
3281 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3282 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3283 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3284 # namespace, this first verification could be removed, and the next step would be done for any kind
3285 # of KNF.
3286 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3287 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3288 if k8sclustertype in ("juju", "juju-bundle"):
3289 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3290 # that the user passed a namespace which he wants its KDU to be deployed in)
3291 if (
3292 self.db.count(
3293 table="nsrs",
3294 q_filter={
3295 "_id": nsr_id,
3296 "_admin.projects_write": k8s_instance_info["namespace"],
3297 "_admin.projects_read": k8s_instance_info["namespace"],
3298 },
3299 )
3300 > 0
3301 ):
3302 self.logger.debug(
3303 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3304 )
3305 self.update_db_2(
3306 item="nsrs",
3307 _id=nsr_id,
3308 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3309 )
3310 k8s_instance_info["namespace"] = kdu_instance
3311
3312 await self.k8scluster_map[k8sclustertype].install(
3313 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3314 kdu_model=k8s_instance_info["kdu-model"],
3315 atomic=True,
3316 params=k8params,
3317 db_dict=db_dict_install,
3318 timeout=timeout,
3319 kdu_name=k8s_instance_info["kdu-name"],
3320 namespace=k8s_instance_info["namespace"],
3321 kdu_instance=kdu_instance,
3322 vca_id=vca_id,
3323 )
3324
3325 # Obtain services to obtain management service ip
3326 services = await self.k8scluster_map[k8sclustertype].get_services(
3327 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3328 kdu_instance=kdu_instance,
3329 namespace=k8s_instance_info["namespace"],
3330 )
3331
3332 # Obtain management service info (if exists)
3333 vnfr_update_dict = {}
3334 kdu_config = get_configuration(vnfd, kdud["name"])
3335 if kdu_config:
3336 target_ee_list = kdu_config.get("execution-environment-list", [])
3337 else:
3338 target_ee_list = []
3339
3340 if services:
3341 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3342 mgmt_services = [
3343 service
3344 for service in kdud.get("service", [])
3345 if service.get("mgmt-service")
3346 ]
3347 for mgmt_service in mgmt_services:
3348 for service in services:
3349 if service["name"].startswith(mgmt_service["name"]):
3350 # Mgmt service found, Obtain service ip
3351 ip = service.get("external_ip", service.get("cluster_ip"))
3352 if isinstance(ip, list) and len(ip) == 1:
3353 ip = ip[0]
3354
3355 vnfr_update_dict[
3356 "kdur.{}.ip-address".format(kdu_index)
3357 ] = ip
3358
3359 # Check if must update also mgmt ip at the vnf
3360 service_external_cp = mgmt_service.get(
3361 "external-connection-point-ref"
3362 )
3363 if service_external_cp:
3364 if (
3365 deep_get(vnfd, ("mgmt-interface", "cp"))
3366 == service_external_cp
3367 ):
3368 vnfr_update_dict["ip-address"] = ip
3369
3370 if find_in_list(
3371 target_ee_list,
3372 lambda ee: ee.get(
3373 "external-connection-point-ref", ""
3374 )
3375 == service_external_cp,
3376 ):
3377 vnfr_update_dict[
3378 "kdur.{}.ip-address".format(kdu_index)
3379 ] = ip
3380 break
3381 else:
3382 self.logger.warn(
3383 "Mgmt service name: {} not found".format(
3384 mgmt_service["name"]
3385 )
3386 )
3387
3388 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3389 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3390
3391 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3392 if (
3393 kdu_config
3394 and kdu_config.get("initial-config-primitive")
3395 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3396 ):
3397 initial_config_primitive_list = kdu_config.get(
3398 "initial-config-primitive"
3399 )
3400 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3401
3402 for initial_config_primitive in initial_config_primitive_list:
3403 primitive_params_ = self._map_primitive_params(
3404 initial_config_primitive, {}, {}
3405 )
3406
3407 await asyncio.wait_for(
3408 self.k8scluster_map[k8sclustertype].exec_primitive(
3409 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3410 kdu_instance=kdu_instance,
3411 primitive_name=initial_config_primitive["name"],
3412 params=primitive_params_,
3413 db_dict=db_dict_install,
3414 vca_id=vca_id,
3415 ),
3416 timeout=timeout,
3417 )
3418
3419 except Exception as e:
3420 # Prepare update db with error and raise exception
3421 try:
3422 self.update_db_2(
3423 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3424 )
3425 self.update_db_2(
3426 "vnfrs",
3427 vnfr_data.get("_id"),
3428 {"kdur.{}.status".format(kdu_index): "ERROR"},
3429 )
3430 except Exception:
3431 # ignore to keep original exception
3432 pass
3433 # reraise original error
3434 raise
3435
3436 return kdu_instance
3437
3438 async def deploy_kdus(
3439 self,
3440 logging_text,
3441 nsr_id,
3442 nslcmop_id,
3443 db_vnfrs,
3444 db_vnfds,
3445 task_instantiation_info,
3446 ):
3447 # Launch kdus if present in the descriptor
3448
3449 k8scluster_id_2_uuic = {
3450 "helm-chart-v3": {},
3451 "helm-chart": {},
3452 "juju-bundle": {},
3453 }
3454
3455 async def _get_cluster_id(cluster_id, cluster_type):
3456 nonlocal k8scluster_id_2_uuic
3457 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3458 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3459
3460 # check if K8scluster is creating and wait look if previous tasks in process
3461 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3462 "k8scluster", cluster_id
3463 )
3464 if task_dependency:
3465 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3466 task_name, cluster_id
3467 )
3468 self.logger.debug(logging_text + text)
3469 await asyncio.wait(task_dependency, timeout=3600)
3470
3471 db_k8scluster = self.db.get_one(
3472 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3473 )
3474 if not db_k8scluster:
3475 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3476
3477 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3478 if not k8s_id:
3479 if cluster_type == "helm-chart-v3":
3480 try:
3481 # backward compatibility for existing clusters that have not been initialized for helm v3
3482 k8s_credentials = yaml.safe_dump(
3483 db_k8scluster.get("credentials")
3484 )
3485 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3486 k8s_credentials, reuse_cluster_uuid=cluster_id
3487 )
3488 db_k8scluster_update = {}
3489 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3490 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3491 db_k8scluster_update[
3492 "_admin.helm-chart-v3.created"
3493 ] = uninstall_sw
3494 db_k8scluster_update[
3495 "_admin.helm-chart-v3.operationalState"
3496 ] = "ENABLED"
3497 self.update_db_2(
3498 "k8sclusters", cluster_id, db_k8scluster_update
3499 )
3500 except Exception as e:
3501 self.logger.error(
3502 logging_text
3503 + "error initializing helm-v3 cluster: {}".format(str(e))
3504 )
3505 raise LcmException(
3506 "K8s cluster '{}' has not been initialized for '{}'".format(
3507 cluster_id, cluster_type
3508 )
3509 )
3510 else:
3511 raise LcmException(
3512 "K8s cluster '{}' has not been initialized for '{}'".format(
3513 cluster_id, cluster_type
3514 )
3515 )
3516 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3517 return k8s_id
3518
3519 logging_text += "Deploy kdus: "
3520 step = ""
3521 try:
3522 db_nsr_update = {"_admin.deployed.K8s": []}
3523 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3524
3525 index = 0
3526 updated_cluster_list = []
3527 updated_v3_cluster_list = []
3528
3529 for vnfr_data in db_vnfrs.values():
3530 vca_id = self.get_vca_id(vnfr_data, {})
3531 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3532 # Step 0: Prepare and set parameters
3533 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3534 vnfd_id = vnfr_data.get("vnfd-id")
3535 vnfd_with_id = find_in_list(
3536 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3537 )
3538 kdud = next(
3539 kdud
3540 for kdud in vnfd_with_id["kdu"]
3541 if kdud["name"] == kdur["kdu-name"]
3542 )
3543 namespace = kdur.get("k8s-namespace")
3544 kdu_deployment_name = kdur.get("kdu-deployment-name")
3545 if kdur.get("helm-chart"):
3546 kdumodel = kdur["helm-chart"]
3547 # Default version: helm3, if helm-version is v2 assign v2
3548 k8sclustertype = "helm-chart-v3"
3549 self.logger.debug("kdur: {}".format(kdur))
3550 if (
3551 kdur.get("helm-version")
3552 and kdur.get("helm-version") == "v2"
3553 ):
3554 k8sclustertype = "helm-chart"
3555 elif kdur.get("juju-bundle"):
3556 kdumodel = kdur["juju-bundle"]
3557 k8sclustertype = "juju-bundle"
3558 else:
3559 raise LcmException(
3560 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3561 "juju-bundle. Maybe an old NBI version is running".format(
3562 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3563 )
3564 )
3565 # check if kdumodel is a file and exists
3566 try:
3567 vnfd_with_id = find_in_list(
3568 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3569 )
3570 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3571 if storage: # may be not present if vnfd has not artifacts
3572 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3573 if storage["pkg-dir"]:
3574 filename = "{}/{}/{}s/{}".format(
3575 storage["folder"],
3576 storage["pkg-dir"],
3577 k8sclustertype,
3578 kdumodel,
3579 )
3580 else:
3581 filename = "{}/Scripts/{}s/{}".format(
3582 storage["folder"],
3583 k8sclustertype,
3584 kdumodel,
3585 )
3586 if self.fs.file_exists(
3587 filename, mode="file"
3588 ) or self.fs.file_exists(filename, mode="dir"):
3589 kdumodel = self.fs.path + filename
3590 except (asyncio.TimeoutError, asyncio.CancelledError):
3591 raise
3592 except Exception: # it is not a file
3593 pass
3594
3595 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3596 step = "Synchronize repos for k8s cluster '{}'".format(
3597 k8s_cluster_id
3598 )
3599 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3600
3601 # Synchronize repos
3602 if (
3603 k8sclustertype == "helm-chart"
3604 and cluster_uuid not in updated_cluster_list
3605 ) or (
3606 k8sclustertype == "helm-chart-v3"
3607 and cluster_uuid not in updated_v3_cluster_list
3608 ):
3609 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3610 self.k8scluster_map[k8sclustertype].synchronize_repos(
3611 cluster_uuid=cluster_uuid
3612 )
3613 )
3614 if del_repo_list or added_repo_dict:
3615 if k8sclustertype == "helm-chart":
3616 unset = {
3617 "_admin.helm_charts_added." + item: None
3618 for item in del_repo_list
3619 }
3620 updated = {
3621 "_admin.helm_charts_added." + item: name
3622 for item, name in added_repo_dict.items()
3623 }
3624 updated_cluster_list.append(cluster_uuid)
3625 elif k8sclustertype == "helm-chart-v3":
3626 unset = {
3627 "_admin.helm_charts_v3_added." + item: None
3628 for item in del_repo_list
3629 }
3630 updated = {
3631 "_admin.helm_charts_v3_added." + item: name
3632 for item, name in added_repo_dict.items()
3633 }
3634 updated_v3_cluster_list.append(cluster_uuid)
3635 self.logger.debug(
3636 logging_text + "repos synchronized on k8s cluster "
3637 "'{}' to_delete: {}, to_add: {}".format(
3638 k8s_cluster_id, del_repo_list, added_repo_dict
3639 )
3640 )
3641 self.db.set_one(
3642 "k8sclusters",
3643 {"_id": k8s_cluster_id},
3644 updated,
3645 unset=unset,
3646 )
3647
3648 # Instantiate kdu
3649 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3650 vnfr_data["member-vnf-index-ref"],
3651 kdur["kdu-name"],
3652 k8s_cluster_id,
3653 )
3654 k8s_instance_info = {
3655 "kdu-instance": None,
3656 "k8scluster-uuid": cluster_uuid,
3657 "k8scluster-type": k8sclustertype,
3658 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3659 "kdu-name": kdur["kdu-name"],
3660 "kdu-model": kdumodel,
3661 "namespace": namespace,
3662 "kdu-deployment-name": kdu_deployment_name,
3663 }
3664 db_path = "_admin.deployed.K8s.{}".format(index)
3665 db_nsr_update[db_path] = k8s_instance_info
3666 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3667 vnfd_with_id = find_in_list(
3668 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3669 )
3670 task = asyncio.ensure_future(
3671 self._install_kdu(
3672 nsr_id,
3673 db_path,
3674 vnfr_data,
3675 kdu_index,
3676 kdud,
3677 vnfd_with_id,
3678 k8s_instance_info,
3679 k8params=desc_params,
3680 timeout=1800,
3681 vca_id=vca_id,
3682 )
3683 )
3684 self.lcm_tasks.register(
3685 "ns",
3686 nsr_id,
3687 nslcmop_id,
3688 "instantiate_KDU-{}".format(index),
3689 task,
3690 )
3691 task_instantiation_info[task] = "Deploying KDU {}".format(
3692 kdur["kdu-name"]
3693 )
3694
3695 index += 1
3696
3697 except (LcmException, asyncio.CancelledError):
3698 raise
3699 except Exception as e:
3700 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3701 if isinstance(e, (N2VCException, DbException)):
3702 self.logger.error(logging_text + msg)
3703 else:
3704 self.logger.critical(logging_text + msg, exc_info=True)
3705 raise LcmException(msg)
3706 finally:
3707 if db_nsr_update:
3708 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3709
3710 def _deploy_n2vc(
3711 self,
3712 logging_text,
3713 db_nsr,
3714 db_vnfr,
3715 nslcmop_id,
3716 nsr_id,
3717 nsi_id,
3718 vnfd_id,
3719 vdu_id,
3720 kdu_name,
3721 member_vnf_index,
3722 vdu_index,
3723 vdu_name,
3724 deploy_params,
3725 descriptor_config,
3726 base_folder,
3727 task_instantiation_info,
3728 stage,
3729 ):
3730 # launch instantiate_N2VC in a asyncio task and register task object
3731 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3732 # if not found, create one entry and update database
3733 # fill db_nsr._admin.deployed.VCA.<index>
3734
3735 self.logger.debug(
3736 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3737 )
3738 if "execution-environment-list" in descriptor_config:
3739 ee_list = descriptor_config.get("execution-environment-list", [])
3740 elif "juju" in descriptor_config:
3741 ee_list = [descriptor_config] # ns charms
3742 else: # other types as script are not supported
3743 ee_list = []
3744
3745 for ee_item in ee_list:
3746 self.logger.debug(
3747 logging_text
3748 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3749 ee_item.get("juju"), ee_item.get("helm-chart")
3750 )
3751 )
3752 ee_descriptor_id = ee_item.get("id")
3753 if ee_item.get("juju"):
3754 vca_name = ee_item["juju"].get("charm")
3755 vca_type = (
3756 "lxc_proxy_charm"
3757 if ee_item["juju"].get("charm") is not None
3758 else "native_charm"
3759 )
3760 if ee_item["juju"].get("cloud") == "k8s":
3761 vca_type = "k8s_proxy_charm"
3762 elif ee_item["juju"].get("proxy") is False:
3763 vca_type = "native_charm"
3764 elif ee_item.get("helm-chart"):
3765 vca_name = ee_item["helm-chart"]
3766 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3767 vca_type = "helm"
3768 else:
3769 vca_type = "helm-v3"
3770 else:
3771 self.logger.debug(
3772 logging_text + "skipping non juju neither charm configuration"
3773 )
3774 continue
3775
3776 vca_index = -1
3777 for vca_index, vca_deployed in enumerate(
3778 db_nsr["_admin"]["deployed"]["VCA"]
3779 ):
3780 if not vca_deployed:
3781 continue
3782 if (
3783 vca_deployed.get("member-vnf-index") == member_vnf_index
3784 and vca_deployed.get("vdu_id") == vdu_id
3785 and vca_deployed.get("kdu_name") == kdu_name
3786 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3787 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3788 ):
3789 break
3790 else:
3791 # not found, create one.
3792 target = (
3793 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3794 )
3795 if vdu_id:
3796 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3797 elif kdu_name:
3798 target += "/kdu/{}".format(kdu_name)
3799 vca_deployed = {
3800 "target_element": target,
3801 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3802 "member-vnf-index": member_vnf_index,
3803 "vdu_id": vdu_id,
3804 "kdu_name": kdu_name,
3805 "vdu_count_index": vdu_index,
3806 "operational-status": "init", # TODO revise
3807 "detailed-status": "", # TODO revise
3808 "step": "initial-deploy", # TODO revise
3809 "vnfd_id": vnfd_id,
3810 "vdu_name": vdu_name,
3811 "type": vca_type,
3812 "ee_descriptor_id": ee_descriptor_id,
3813 }
3814 vca_index += 1
3815
3816 # create VCA and configurationStatus in db
3817 db_dict = {
3818 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3819 "configurationStatus.{}".format(vca_index): dict(),
3820 }
3821 self.update_db_2("nsrs", nsr_id, db_dict)
3822
3823 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3824
3825 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3826 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3827 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3828
3829 # Launch task
3830 task_n2vc = asyncio.ensure_future(
3831 self.instantiate_N2VC(
3832 logging_text=logging_text,
3833 vca_index=vca_index,
3834 nsi_id=nsi_id,
3835 db_nsr=db_nsr,
3836 db_vnfr=db_vnfr,
3837 vdu_id=vdu_id,
3838 kdu_name=kdu_name,
3839 vdu_index=vdu_index,
3840 deploy_params=deploy_params,
3841 config_descriptor=descriptor_config,
3842 base_folder=base_folder,
3843 nslcmop_id=nslcmop_id,
3844 stage=stage,
3845 vca_type=vca_type,
3846 vca_name=vca_name,
3847 ee_config_descriptor=ee_item,
3848 )
3849 )
3850 self.lcm_tasks.register(
3851 "ns",
3852 nsr_id,
3853 nslcmop_id,
3854 "instantiate_N2VC-{}".format(vca_index),
3855 task_n2vc,
3856 )
3857 task_instantiation_info[
3858 task_n2vc
3859 ] = self.task_name_deploy_vca + " {}.{}".format(
3860 member_vnf_index or "", vdu_id or ""
3861 )
3862
3863 @staticmethod
3864 def _create_nslcmop(nsr_id, operation, params):
3865 """
3866 Creates a ns-lcm-opp content to be stored at database.
3867 :param nsr_id: internal id of the instance
3868 :param operation: instantiate, terminate, scale, action, ...
3869 :param params: user parameters for the operation
3870 :return: dictionary following SOL005 format
3871 """
3872 # Raise exception if invalid arguments
3873 if not (nsr_id and operation and params):
3874 raise LcmException(
3875 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3876 )
3877 now = time()
3878 _id = str(uuid4())
3879 nslcmop = {
3880 "id": _id,
3881 "_id": _id,
3882 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3883 "operationState": "PROCESSING",
3884 "statusEnteredTime": now,
3885 "nsInstanceId": nsr_id,
3886 "lcmOperationType": operation,
3887 "startTime": now,
3888 "isAutomaticInvocation": False,
3889 "operationParams": params,
3890 "isCancelPending": False,
3891 "links": {
3892 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3893 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3894 },
3895 }
3896 return nslcmop
3897
3898 def _format_additional_params(self, params):
3899 params = params or {}
3900 for key, value in params.items():
3901 if str(value).startswith("!!yaml "):
3902 params[key] = yaml.safe_load(value[7:])
3903 return params
3904
3905 def _get_terminate_primitive_params(self, seq, vnf_index):
3906 primitive = seq.get("name")
3907 primitive_params = {}
3908 params = {
3909 "member_vnf_index": vnf_index,
3910 "primitive": primitive,
3911 "primitive_params": primitive_params,
3912 }
3913 desc_params = {}
3914 return self._map_primitive_params(seq, params, desc_params)
3915
3916 # sub-operations
3917
3918 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3919 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3920 if op.get("operationState") == "COMPLETED":
3921 # b. Skip sub-operation
3922 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3923 return self.SUBOPERATION_STATUS_SKIP
3924 else:
3925 # c. retry executing sub-operation
3926 # The sub-operation exists, and operationState != 'COMPLETED'
3927 # Update operationState = 'PROCESSING' to indicate a retry.
3928 operationState = "PROCESSING"
3929 detailed_status = "In progress"
3930 self._update_suboperation_status(
3931 db_nslcmop, op_index, operationState, detailed_status
3932 )
3933 # Return the sub-operation index
3934 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3935 # with arguments extracted from the sub-operation
3936 return op_index
3937
3938 # Find a sub-operation where all keys in a matching dictionary must match
3939 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3940 def _find_suboperation(self, db_nslcmop, match):
3941 if db_nslcmop and match:
3942 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3943 for i, op in enumerate(op_list):
3944 if all(op.get(k) == match[k] for k in match):
3945 return i
3946 return self.SUBOPERATION_STATUS_NOT_FOUND
3947
3948 # Update status for a sub-operation given its index
3949 def _update_suboperation_status(
3950 self, db_nslcmop, op_index, operationState, detailed_status
3951 ):
3952 # Update DB for HA tasks
3953 q_filter = {"_id": db_nslcmop["_id"]}
3954 update_dict = {
3955 "_admin.operations.{}.operationState".format(op_index): operationState,
3956 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3957 }
3958 self.db.set_one(
3959 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3960 )
3961
3962 # Add sub-operation, return the index of the added sub-operation
3963 # Optionally, set operationState, detailed-status, and operationType
3964 # Status and type are currently set for 'scale' sub-operations:
3965 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3966 # 'detailed-status' : status message
3967 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3968 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3969 def _add_suboperation(
3970 self,
3971 db_nslcmop,
3972 vnf_index,
3973 vdu_id,
3974 vdu_count_index,
3975 vdu_name,
3976 primitive,
3977 mapped_primitive_params,
3978 operationState=None,
3979 detailed_status=None,
3980 operationType=None,
3981 RO_nsr_id=None,
3982 RO_scaling_info=None,
3983 ):
3984 if not db_nslcmop:
3985 return self.SUBOPERATION_STATUS_NOT_FOUND
3986 # Get the "_admin.operations" list, if it exists
3987 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3988 op_list = db_nslcmop_admin.get("operations")
3989 # Create or append to the "_admin.operations" list
3990 new_op = {
3991 "member_vnf_index": vnf_index,
3992 "vdu_id": vdu_id,
3993 "vdu_count_index": vdu_count_index,
3994 "primitive": primitive,
3995 "primitive_params": mapped_primitive_params,
3996 }
3997 if operationState:
3998 new_op["operationState"] = operationState
3999 if detailed_status:
4000 new_op["detailed-status"] = detailed_status
4001 if operationType:
4002 new_op["lcmOperationType"] = operationType
4003 if RO_nsr_id:
4004 new_op["RO_nsr_id"] = RO_nsr_id
4005 if RO_scaling_info:
4006 new_op["RO_scaling_info"] = RO_scaling_info
4007 if not op_list:
4008 # No existing operations, create key 'operations' with current operation as first list element
4009 db_nslcmop_admin.update({"operations": [new_op]})
4010 op_list = db_nslcmop_admin.get("operations")
4011 else:
4012 # Existing operations, append operation to list
4013 op_list.append(new_op)
4014
4015 db_nslcmop_update = {"_admin.operations": op_list}
4016 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4017 op_index = len(op_list) - 1
4018 return op_index
4019
4020 # Helper methods for scale() sub-operations
4021
4022 # pre-scale/post-scale:
4023 # Check for 3 different cases:
4024 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4025 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4026 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4027 def _check_or_add_scale_suboperation(
4028 self,
4029 db_nslcmop,
4030 vnf_index,
4031 vnf_config_primitive,
4032 primitive_params,
4033 operationType,
4034 RO_nsr_id=None,
4035 RO_scaling_info=None,
4036 ):
4037 # Find this sub-operation
4038 if RO_nsr_id and RO_scaling_info:
4039 operationType = "SCALE-RO"
4040 match = {
4041 "member_vnf_index": vnf_index,
4042 "RO_nsr_id": RO_nsr_id,
4043 "RO_scaling_info": RO_scaling_info,
4044 }
4045 else:
4046 match = {
4047 "member_vnf_index": vnf_index,
4048 "primitive": vnf_config_primitive,
4049 "primitive_params": primitive_params,
4050 "lcmOperationType": operationType,
4051 }
4052 op_index = self._find_suboperation(db_nslcmop, match)
4053 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4054 # a. New sub-operation
4055 # The sub-operation does not exist, add it.
4056 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4057 # The following parameters are set to None for all kind of scaling:
4058 vdu_id = None
4059 vdu_count_index = None
4060 vdu_name = None
4061 if RO_nsr_id and RO_scaling_info:
4062 vnf_config_primitive = None
4063 primitive_params = None
4064 else:
4065 RO_nsr_id = None
4066 RO_scaling_info = None
4067 # Initial status for sub-operation
4068 operationState = "PROCESSING"
4069 detailed_status = "In progress"
4070 # Add sub-operation for pre/post-scaling (zero or more operations)
4071 self._add_suboperation(
4072 db_nslcmop,
4073 vnf_index,
4074 vdu_id,
4075 vdu_count_index,
4076 vdu_name,
4077 vnf_config_primitive,
4078 primitive_params,
4079 operationState,
4080 detailed_status,
4081 operationType,
4082 RO_nsr_id,
4083 RO_scaling_info,
4084 )
4085 return self.SUBOPERATION_STATUS_NEW
4086 else:
4087 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4088 # or op_index (operationState != 'COMPLETED')
4089 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4090
4091 # Function to return execution_environment id
4092
4093 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4094 # TODO vdu_index_count
4095 for vca in vca_deployed_list:
4096 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4097 return vca["ee_id"]
4098
4099 async def destroy_N2VC(
4100 self,
4101 logging_text,
4102 db_nslcmop,
4103 vca_deployed,
4104 config_descriptor,
4105 vca_index,
4106 destroy_ee=True,
4107 exec_primitives=True,
4108 scaling_in=False,
4109 vca_id: str = None,
4110 ):
4111 """
4112 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4113 :param logging_text:
4114 :param db_nslcmop:
4115 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4116 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4117 :param vca_index: index in the database _admin.deployed.VCA
4118 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4119 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4120 not executed properly
4121 :param scaling_in: True destroys the application, False destroys the model
4122 :return: None or exception
4123 """
4124
4125 self.logger.debug(
4126 logging_text
4127 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4128 vca_index, vca_deployed, config_descriptor, destroy_ee
4129 )
4130 )
4131
4132 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4133
4134 # execute terminate_primitives
4135 if exec_primitives:
4136 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4137 config_descriptor.get("terminate-config-primitive"),
4138 vca_deployed.get("ee_descriptor_id"),
4139 )
4140 vdu_id = vca_deployed.get("vdu_id")
4141 vdu_count_index = vca_deployed.get("vdu_count_index")
4142 vdu_name = vca_deployed.get("vdu_name")
4143 vnf_index = vca_deployed.get("member-vnf-index")
4144 if terminate_primitives and vca_deployed.get("needed_terminate"):
4145 for seq in terminate_primitives:
4146 # For each sequence in list, get primitive and call _ns_execute_primitive()
4147 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4148 vnf_index, seq.get("name")
4149 )
4150 self.logger.debug(logging_text + step)
4151 # Create the primitive for each sequence, i.e. "primitive": "touch"
4152 primitive = seq.get("name")
4153 mapped_primitive_params = self._get_terminate_primitive_params(
4154 seq, vnf_index
4155 )
4156
4157 # Add sub-operation
4158 self._add_suboperation(
4159 db_nslcmop,
4160 vnf_index,
4161 vdu_id,
4162 vdu_count_index,
4163 vdu_name,
4164 primitive,
4165 mapped_primitive_params,
4166 )
4167 # Sub-operations: Call _ns_execute_primitive() instead of action()
4168 try:
4169 result, result_detail = await self._ns_execute_primitive(
4170 vca_deployed["ee_id"],
4171 primitive,
4172 mapped_primitive_params,
4173 vca_type=vca_type,
4174 vca_id=vca_id,
4175 )
4176 except LcmException:
4177 # this happens when VCA is not deployed. In this case it is not needed to terminate
4178 continue
4179 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4180 if result not in result_ok:
4181 raise LcmException(
4182 "terminate_primitive {} for vnf_member_index={} fails with "
4183 "error {}".format(seq.get("name"), vnf_index, result_detail)
4184 )
4185 # set that this VCA do not need terminated
4186 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4187 vca_index
4188 )
4189 self.update_db_2(
4190 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4191 )
4192
4193 # Delete Prometheus Jobs if any
4194 # This uses NSR_ID, so it will destroy any jobs under this index
4195 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4196
4197 if destroy_ee:
4198 await self.vca_map[vca_type].delete_execution_environment(
4199 vca_deployed["ee_id"],
4200 scaling_in=scaling_in,
4201 vca_type=vca_type,
4202 vca_id=vca_id,
4203 )
4204
4205 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4206 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4207 namespace = "." + db_nsr["_id"]
4208 try:
4209 await self.n2vc.delete_namespace(
4210 namespace=namespace,
4211 total_timeout=self.timeout_charm_delete,
4212 vca_id=vca_id,
4213 )
4214 except N2VCNotFound: # already deleted. Skip
4215 pass
4216 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4217
4218 async def _terminate_RO(
4219 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4220 ):
4221 """
4222 Terminates a deployment from RO
4223 :param logging_text:
4224 :param nsr_deployed: db_nsr._admin.deployed
4225 :param nsr_id:
4226 :param nslcmop_id:
4227 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4228 this method will update only the index 2, but it will write on database the concatenated content of the list
4229 :return:
4230 """
4231 db_nsr_update = {}
4232 failed_detail = []
4233 ro_nsr_id = ro_delete_action = None
4234 if nsr_deployed and nsr_deployed.get("RO"):
4235 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4236 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4237 try:
4238 if ro_nsr_id:
4239 stage[2] = "Deleting ns from VIM."
4240 db_nsr_update["detailed-status"] = " ".join(stage)
4241 self._write_op_status(nslcmop_id, stage)
4242 self.logger.debug(logging_text + stage[2])
4243 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4244 self._write_op_status(nslcmop_id, stage)
4245 desc = await self.RO.delete("ns", ro_nsr_id)
4246 ro_delete_action = desc["action_id"]
4247 db_nsr_update[
4248 "_admin.deployed.RO.nsr_delete_action_id"
4249 ] = ro_delete_action
4250 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4251 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4252 if ro_delete_action:
4253 # wait until NS is deleted from VIM
4254 stage[2] = "Waiting ns deleted from VIM."
4255 detailed_status_old = None
4256 self.logger.debug(
4257 logging_text
4258 + stage[2]
4259 + " RO_id={} ro_delete_action={}".format(
4260 ro_nsr_id, ro_delete_action
4261 )
4262 )
4263 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4264 self._write_op_status(nslcmop_id, stage)
4265
4266 delete_timeout = 20 * 60 # 20 minutes
4267 while delete_timeout > 0:
4268 desc = await self.RO.show(
4269 "ns",
4270 item_id_name=ro_nsr_id,
4271 extra_item="action",
4272 extra_item_id=ro_delete_action,
4273 )
4274
4275 # deploymentStatus
4276 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4277
4278 ns_status, ns_status_info = self.RO.check_action_status(desc)
4279 if ns_status == "ERROR":
4280 raise ROclient.ROClientException(ns_status_info)
4281 elif ns_status == "BUILD":
4282 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4283 elif ns_status == "ACTIVE":
4284 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4285 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4286 break
4287 else:
4288 assert (
4289 False
4290 ), "ROclient.check_action_status returns unknown {}".format(
4291 ns_status
4292 )
4293 if stage[2] != detailed_status_old:
4294 detailed_status_old = stage[2]
4295 db_nsr_update["detailed-status"] = " ".join(stage)
4296 self._write_op_status(nslcmop_id, stage)
4297 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4298 await asyncio.sleep(5, loop=self.loop)
4299 delete_timeout -= 5
4300 else: # delete_timeout <= 0:
4301 raise ROclient.ROClientException(
4302 "Timeout waiting ns deleted from VIM"
4303 )
4304
4305 except Exception as e:
4306 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4307 if (
4308 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4309 ): # not found
4310 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4311 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4312 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4313 self.logger.debug(
4314 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4315 )
4316 elif (
4317 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4318 ): # conflict
4319 failed_detail.append("delete conflict: {}".format(e))
4320 self.logger.debug(
4321 logging_text
4322 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4323 )
4324 else:
4325 failed_detail.append("delete error: {}".format(e))
4326 self.logger.error(
4327 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4328 )
4329
4330 # Delete nsd
4331 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4332 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4333 try:
4334 stage[2] = "Deleting nsd from RO."
4335 db_nsr_update["detailed-status"] = " ".join(stage)
4336 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4337 self._write_op_status(nslcmop_id, stage)
4338 await self.RO.delete("nsd", ro_nsd_id)
4339 self.logger.debug(
4340 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4341 )
4342 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4343 except Exception as e:
4344 if (
4345 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4346 ): # not found
4347 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4348 self.logger.debug(
4349 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4350 )
4351 elif (
4352 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4353 ): # conflict
4354 failed_detail.append(
4355 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4356 )
4357 self.logger.debug(logging_text + failed_detail[-1])
4358 else:
4359 failed_detail.append(
4360 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4361 )
4362 self.logger.error(logging_text + failed_detail[-1])
4363
4364 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4365 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4366 if not vnf_deployed or not vnf_deployed["id"]:
4367 continue
4368 try:
4369 ro_vnfd_id = vnf_deployed["id"]
4370 stage[
4371 2
4372 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4373 vnf_deployed["member-vnf-index"], ro_vnfd_id
4374 )
4375 db_nsr_update["detailed-status"] = " ".join(stage)
4376 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4377 self._write_op_status(nslcmop_id, stage)
4378 await self.RO.delete("vnfd", ro_vnfd_id)
4379 self.logger.debug(
4380 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4381 )
4382 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4383 except Exception as e:
4384 if (
4385 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4386 ): # not found
4387 db_nsr_update[
4388 "_admin.deployed.RO.vnfd.{}.id".format(index)
4389 ] = None
4390 self.logger.debug(
4391 logging_text
4392 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4393 )
4394 elif (
4395 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4396 ): # conflict
4397 failed_detail.append(
4398 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4399 )
4400 self.logger.debug(logging_text + failed_detail[-1])
4401 else:
4402 failed_detail.append(
4403 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4404 )
4405 self.logger.error(logging_text + failed_detail[-1])
4406
4407 if failed_detail:
4408 stage[2] = "Error deleting from VIM"
4409 else:
4410 stage[2] = "Deleted from VIM"
4411 db_nsr_update["detailed-status"] = " ".join(stage)
4412 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4413 self._write_op_status(nslcmop_id, stage)
4414
4415 if failed_detail:
4416 raise LcmException("; ".join(failed_detail))
4417
4418 async def terminate(self, nsr_id, nslcmop_id):
4419 # Try to lock HA task here
4420 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4421 if not task_is_locked_by_me:
4422 return
4423
4424 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4425 self.logger.debug(logging_text + "Enter")
4426 timeout_ns_terminate = self.timeout_ns_terminate
4427 db_nsr = None
4428 db_nslcmop = None
4429 operation_params = None
4430 exc = None
4431 error_list = [] # annotates all failed error messages
4432 db_nslcmop_update = {}
4433 autoremove = False # autoremove after terminated
4434 tasks_dict_info = {}
4435 db_nsr_update = {}
4436 stage = [
4437 "Stage 1/3: Preparing task.",
4438 "Waiting for previous operations to terminate.",
4439 "",
4440 ]
4441 # ^ contains [stage, step, VIM-status]
4442 try:
4443 # wait for any previous tasks in process
4444 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4445
4446 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4447 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4448 operation_params = db_nslcmop.get("operationParams") or {}
4449 if operation_params.get("timeout_ns_terminate"):
4450 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4451 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4452 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4453
4454 db_nsr_update["operational-status"] = "terminating"
4455 db_nsr_update["config-status"] = "terminating"
4456 self._write_ns_status(
4457 nsr_id=nsr_id,
4458 ns_state="TERMINATING",
4459 current_operation="TERMINATING",
4460 current_operation_id=nslcmop_id,
4461 other_update=db_nsr_update,
4462 )
4463 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4464 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4465 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4466 return
4467
4468 stage[1] = "Getting vnf descriptors from db."
4469 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4470 db_vnfrs_dict = {
4471 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4472 }
4473 db_vnfds_from_id = {}
4474 db_vnfds_from_member_index = {}
4475 # Loop over VNFRs
4476 for vnfr in db_vnfrs_list:
4477 vnfd_id = vnfr["vnfd-id"]
4478 if vnfd_id not in db_vnfds_from_id:
4479 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4480 db_vnfds_from_id[vnfd_id] = vnfd
4481 db_vnfds_from_member_index[
4482 vnfr["member-vnf-index-ref"]
4483 ] = db_vnfds_from_id[vnfd_id]
4484
4485 # Destroy individual execution environments when there are terminating primitives.
4486 # Rest of EE will be deleted at once
4487 # TODO - check before calling _destroy_N2VC
4488 # if not operation_params.get("skip_terminate_primitives"):#
4489 # or not vca.get("needed_terminate"):
4490 stage[0] = "Stage 2/3 execute terminating primitives."
4491 self.logger.debug(logging_text + stage[0])
4492 stage[1] = "Looking execution environment that needs terminate."
4493 self.logger.debug(logging_text + stage[1])
4494
4495 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4496 config_descriptor = None
4497 vca_member_vnf_index = vca.get("member-vnf-index")
4498 vca_id = self.get_vca_id(
4499 db_vnfrs_dict.get(vca_member_vnf_index)
4500 if vca_member_vnf_index
4501 else None,
4502 db_nsr,
4503 )
4504 if not vca or not vca.get("ee_id"):
4505 continue
4506 if not vca.get("member-vnf-index"):
4507 # ns
4508 config_descriptor = db_nsr.get("ns-configuration")
4509 elif vca.get("vdu_id"):
4510 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4511 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4512 elif vca.get("kdu_name"):
4513 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4514 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4515 else:
4516 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4517 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4518 vca_type = vca.get("type")
4519 exec_terminate_primitives = not operation_params.get(
4520 "skip_terminate_primitives"
4521 ) and vca.get("needed_terminate")
4522 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4523 # pending native charms
4524 destroy_ee = (
4525 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4526 )
4527 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4528 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4529 task = asyncio.ensure_future(
4530 self.destroy_N2VC(
4531 logging_text,
4532 db_nslcmop,
4533 vca,
4534 config_descriptor,
4535 vca_index,
4536 destroy_ee,
4537 exec_terminate_primitives,
4538 vca_id=vca_id,
4539 )
4540 )
4541 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4542
4543 # wait for pending tasks of terminate primitives
4544 if tasks_dict_info:
4545 self.logger.debug(
4546 logging_text
4547 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4548 )
4549 error_list = await self._wait_for_tasks(
4550 logging_text,
4551 tasks_dict_info,
4552 min(self.timeout_charm_delete, timeout_ns_terminate),
4553 stage,
4554 nslcmop_id,
4555 )
4556 tasks_dict_info.clear()
4557 if error_list:
4558 return # raise LcmException("; ".join(error_list))
4559
4560 # remove All execution environments at once
4561 stage[0] = "Stage 3/3 delete all."
4562
4563 if nsr_deployed.get("VCA"):
4564 stage[1] = "Deleting all execution environments."
4565 self.logger.debug(logging_text + stage[1])
4566 vca_id = self.get_vca_id({}, db_nsr)
4567 task_delete_ee = asyncio.ensure_future(
4568 asyncio.wait_for(
4569 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4570 timeout=self.timeout_charm_delete,
4571 )
4572 )
4573 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4574 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4575
4576 # Delete from k8scluster
4577 stage[1] = "Deleting KDUs."
4578 self.logger.debug(logging_text + stage[1])
4579 # print(nsr_deployed)
4580 for kdu in get_iterable(nsr_deployed, "K8s"):
4581 if not kdu or not kdu.get("kdu-instance"):
4582 continue
4583 kdu_instance = kdu.get("kdu-instance")
4584 if kdu.get("k8scluster-type") in self.k8scluster_map:
4585 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4586 vca_id = self.get_vca_id({}, db_nsr)
4587 task_delete_kdu_instance = asyncio.ensure_future(
4588 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4589 cluster_uuid=kdu.get("k8scluster-uuid"),
4590 kdu_instance=kdu_instance,
4591 vca_id=vca_id,
4592 namespace=kdu.get("namespace"),
4593 )
4594 )
4595 else:
4596 self.logger.error(
4597 logging_text
4598 + "Unknown k8s deployment type {}".format(
4599 kdu.get("k8scluster-type")
4600 )
4601 )
4602 continue
4603 tasks_dict_info[
4604 task_delete_kdu_instance
4605 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4606
4607 # remove from RO
4608 stage[1] = "Deleting ns from VIM."
4609 if self.ng_ro:
4610 task_delete_ro = asyncio.ensure_future(
4611 self._terminate_ng_ro(
4612 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4613 )
4614 )
4615 else:
4616 task_delete_ro = asyncio.ensure_future(
4617 self._terminate_RO(
4618 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4619 )
4620 )
4621 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4622
4623 # rest of staff will be done at finally
4624
4625 except (
4626 ROclient.ROClientException,
4627 DbException,
4628 LcmException,
4629 N2VCException,
4630 ) as e:
4631 self.logger.error(logging_text + "Exit Exception {}".format(e))
4632 exc = e
4633 except asyncio.CancelledError:
4634 self.logger.error(
4635 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4636 )
4637 exc = "Operation was cancelled"
4638 except Exception as e:
4639 exc = traceback.format_exc()
4640 self.logger.critical(
4641 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4642 exc_info=True,
4643 )
4644 finally:
4645 if exc:
4646 error_list.append(str(exc))
4647 try:
4648 # wait for pending tasks
4649 if tasks_dict_info:
4650 stage[1] = "Waiting for terminate pending tasks."
4651 self.logger.debug(logging_text + stage[1])
4652 error_list += await self._wait_for_tasks(
4653 logging_text,
4654 tasks_dict_info,
4655 timeout_ns_terminate,
4656 stage,
4657 nslcmop_id,
4658 )
4659 stage[1] = stage[2] = ""
4660 except asyncio.CancelledError:
4661 error_list.append("Cancelled")
4662 # TODO cancell all tasks
4663 except Exception as exc:
4664 error_list.append(str(exc))
4665 # update status at database
4666 if error_list:
4667 error_detail = "; ".join(error_list)
4668 # self.logger.error(logging_text + error_detail)
4669 error_description_nslcmop = "{} Detail: {}".format(
4670 stage[0], error_detail
4671 )
4672 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4673 nslcmop_id, stage[0]
4674 )
4675
4676 db_nsr_update["operational-status"] = "failed"
4677 db_nsr_update["detailed-status"] = (
4678 error_description_nsr + " Detail: " + error_detail
4679 )
4680 db_nslcmop_update["detailed-status"] = error_detail
4681 nslcmop_operation_state = "FAILED"
4682 ns_state = "BROKEN"
4683 else:
4684 error_detail = None
4685 error_description_nsr = error_description_nslcmop = None
4686 ns_state = "NOT_INSTANTIATED"
4687 db_nsr_update["operational-status"] = "terminated"
4688 db_nsr_update["detailed-status"] = "Done"
4689 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4690 db_nslcmop_update["detailed-status"] = "Done"
4691 nslcmop_operation_state = "COMPLETED"
4692
4693 if db_nsr:
4694 self._write_ns_status(
4695 nsr_id=nsr_id,
4696 ns_state=ns_state,
4697 current_operation="IDLE",
4698 current_operation_id=None,
4699 error_description=error_description_nsr,
4700 error_detail=error_detail,
4701 other_update=db_nsr_update,
4702 )
4703 self._write_op_status(
4704 op_id=nslcmop_id,
4705 stage="",
4706 error_message=error_description_nslcmop,
4707 operation_state=nslcmop_operation_state,
4708 other_update=db_nslcmop_update,
4709 )
4710 if ns_state == "NOT_INSTANTIATED":
4711 try:
4712 self.db.set_list(
4713 "vnfrs",
4714 {"nsr-id-ref": nsr_id},
4715 {"_admin.nsState": "NOT_INSTANTIATED"},
4716 )
4717 except DbException as e:
4718 self.logger.warn(
4719 logging_text
4720 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4721 nsr_id, e
4722 )
4723 )
4724 if operation_params:
4725 autoremove = operation_params.get("autoremove", False)
4726 if nslcmop_operation_state:
4727 try:
4728 await self.msg.aiowrite(
4729 "ns",
4730 "terminated",
4731 {
4732 "nsr_id": nsr_id,
4733 "nslcmop_id": nslcmop_id,
4734 "operationState": nslcmop_operation_state,
4735 "autoremove": autoremove,
4736 },
4737 loop=self.loop,
4738 )
4739 except Exception as e:
4740 self.logger.error(
4741 logging_text + "kafka_write notification Exception {}".format(e)
4742 )
4743
4744 self.logger.debug(logging_text + "Exit")
4745 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4746
4747 async def _wait_for_tasks(
4748 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4749 ):
4750 time_start = time()
4751 error_detail_list = []
4752 error_list = []
4753 pending_tasks = list(created_tasks_info.keys())
4754 num_tasks = len(pending_tasks)
4755 num_done = 0
4756 stage[1] = "{}/{}.".format(num_done, num_tasks)
4757 self._write_op_status(nslcmop_id, stage)
4758 while pending_tasks:
4759 new_error = None
4760 _timeout = timeout + time_start - time()
4761 done, pending_tasks = await asyncio.wait(
4762 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4763 )
4764 num_done += len(done)
4765 if not done: # Timeout
4766 for task in pending_tasks:
4767 new_error = created_tasks_info[task] + ": Timeout"
4768 error_detail_list.append(new_error)
4769 error_list.append(new_error)
4770 break
4771 for task in done:
4772 if task.cancelled():
4773 exc = "Cancelled"
4774 else:
4775 exc = task.exception()
4776 if exc:
4777 if isinstance(exc, asyncio.TimeoutError):
4778 exc = "Timeout"
4779 new_error = created_tasks_info[task] + ": {}".format(exc)
4780 error_list.append(created_tasks_info[task])
4781 error_detail_list.append(new_error)
4782 if isinstance(
4783 exc,
4784 (
4785 str,
4786 DbException,
4787 N2VCException,
4788 ROclient.ROClientException,
4789 LcmException,
4790 K8sException,
4791 NgRoException,
4792 ),
4793 ):
4794 self.logger.error(logging_text + new_error)
4795 else:
4796 exc_traceback = "".join(
4797 traceback.format_exception(None, exc, exc.__traceback__)
4798 )
4799 self.logger.error(
4800 logging_text
4801 + created_tasks_info[task]
4802 + " "
4803 + exc_traceback
4804 )
4805 else:
4806 self.logger.debug(
4807 logging_text + created_tasks_info[task] + ": Done"
4808 )
4809 stage[1] = "{}/{}.".format(num_done, num_tasks)
4810 if new_error:
4811 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4812 if nsr_id: # update also nsr
4813 self.update_db_2(
4814 "nsrs",
4815 nsr_id,
4816 {
4817 "errorDescription": "Error at: " + ", ".join(error_list),
4818 "errorDetail": ". ".join(error_detail_list),
4819 },
4820 )
4821 self._write_op_status(nslcmop_id, stage)
4822 return error_detail_list
4823
4824 @staticmethod
4825 def _map_primitive_params(primitive_desc, params, instantiation_params):
4826 """
4827 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4828 The default-value is used. If it is between < > it look for a value at instantiation_params
4829 :param primitive_desc: portion of VNFD/NSD that describes primitive
4830 :param params: Params provided by user
4831 :param instantiation_params: Instantiation params provided by user
4832 :return: a dictionary with the calculated params
4833 """
4834 calculated_params = {}
4835 for parameter in primitive_desc.get("parameter", ()):
4836 param_name = parameter["name"]
4837 if param_name in params:
4838 calculated_params[param_name] = params[param_name]
4839 elif "default-value" in parameter or "value" in parameter:
4840 if "value" in parameter:
4841 calculated_params[param_name] = parameter["value"]
4842 else:
4843 calculated_params[param_name] = parameter["default-value"]
4844 if (
4845 isinstance(calculated_params[param_name], str)
4846 and calculated_params[param_name].startswith("<")
4847 and calculated_params[param_name].endswith(">")
4848 ):
4849 if calculated_params[param_name][1:-1] in instantiation_params:
4850 calculated_params[param_name] = instantiation_params[
4851 calculated_params[param_name][1:-1]
4852 ]
4853 else:
4854 raise LcmException(
4855 "Parameter {} needed to execute primitive {} not provided".format(
4856 calculated_params[param_name], primitive_desc["name"]
4857 )
4858 )
4859 else:
4860 raise LcmException(
4861 "Parameter {} needed to execute primitive {} not provided".format(
4862 param_name, primitive_desc["name"]
4863 )
4864 )
4865
4866 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4867 calculated_params[param_name] = yaml.safe_dump(
4868 calculated_params[param_name], default_flow_style=True, width=256
4869 )
4870 elif isinstance(calculated_params[param_name], str) and calculated_params[
4871 param_name
4872 ].startswith("!!yaml "):
4873 calculated_params[param_name] = calculated_params[param_name][7:]
4874 if parameter.get("data-type") == "INTEGER":
4875 try:
4876 calculated_params[param_name] = int(calculated_params[param_name])
4877 except ValueError: # error converting string to int
4878 raise LcmException(
4879 "Parameter {} of primitive {} must be integer".format(
4880 param_name, primitive_desc["name"]
4881 )
4882 )
4883 elif parameter.get("data-type") == "BOOLEAN":
4884 calculated_params[param_name] = not (
4885 (str(calculated_params[param_name])).lower() == "false"
4886 )
4887
4888 # add always ns_config_info if primitive name is config
4889 if primitive_desc["name"] == "config":
4890 if "ns_config_info" in instantiation_params:
4891 calculated_params["ns_config_info"] = instantiation_params[
4892 "ns_config_info"
4893 ]
4894 return calculated_params
4895
4896 def _look_for_deployed_vca(
4897 self,
4898 deployed_vca,
4899 member_vnf_index,
4900 vdu_id,
4901 vdu_count_index,
4902 kdu_name=None,
4903 ee_descriptor_id=None,
4904 ):
4905 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4906 for vca in deployed_vca:
4907 if not vca:
4908 continue
4909 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4910 continue
4911 if (
4912 vdu_count_index is not None
4913 and vdu_count_index != vca["vdu_count_index"]
4914 ):
4915 continue
4916 if kdu_name and kdu_name != vca["kdu_name"]:
4917 continue
4918 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4919 continue
4920 break
4921 else:
4922 # vca_deployed not found
4923 raise LcmException(
4924 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4925 " is not deployed".format(
4926 member_vnf_index,
4927 vdu_id,
4928 vdu_count_index,
4929 kdu_name,
4930 ee_descriptor_id,
4931 )
4932 )
4933 # get ee_id
4934 ee_id = vca.get("ee_id")
4935 vca_type = vca.get(
4936 "type", "lxc_proxy_charm"
4937 ) # default value for backward compatibility - proxy charm
4938 if not ee_id:
4939 raise LcmException(
4940 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4941 "execution environment".format(
4942 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4943 )
4944 )
4945 return ee_id, vca_type
4946
4947 async def _ns_execute_primitive(
4948 self,
4949 ee_id,
4950 primitive,
4951 primitive_params,
4952 retries=0,
4953 retries_interval=30,
4954 timeout=None,
4955 vca_type=None,
4956 db_dict=None,
4957 vca_id: str = None,
4958 ) -> (str, str):
4959 try:
4960 if primitive == "config":
4961 primitive_params = {"params": primitive_params}
4962
4963 vca_type = vca_type or "lxc_proxy_charm"
4964
4965 while retries >= 0:
4966 try:
4967 output = await asyncio.wait_for(
4968 self.vca_map[vca_type].exec_primitive(
4969 ee_id=ee_id,
4970 primitive_name=primitive,
4971 params_dict=primitive_params,
4972 progress_timeout=self.timeout_progress_primitive,
4973 total_timeout=self.timeout_primitive,
4974 db_dict=db_dict,
4975 vca_id=vca_id,
4976 vca_type=vca_type,
4977 ),
4978 timeout=timeout or self.timeout_primitive,
4979 )
4980 # execution was OK
4981 break
4982 except asyncio.CancelledError:
4983 raise
4984 except Exception as e: # asyncio.TimeoutError
4985 if isinstance(e, asyncio.TimeoutError):
4986 e = "Timeout"
4987 retries -= 1
4988 if retries >= 0:
4989 self.logger.debug(
4990 "Error executing action {} on {} -> {}".format(
4991 primitive, ee_id, e
4992 )
4993 )
4994 # wait and retry
4995 await asyncio.sleep(retries_interval, loop=self.loop)
4996 else:
4997 return "FAILED", str(e)
4998
4999 return "COMPLETED", output
5000
5001 except (LcmException, asyncio.CancelledError):
5002 raise
5003 except Exception as e:
5004 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5005
5006 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5007 """
5008 Updating the vca_status with latest juju information in nsrs record
5009 :param: nsr_id: Id of the nsr
5010 :param: nslcmop_id: Id of the nslcmop
5011 :return: None
5012 """
5013
5014 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5015 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5016 vca_id = self.get_vca_id({}, db_nsr)
5017 if db_nsr["_admin"]["deployed"]["K8s"]:
5018 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5019 cluster_uuid, kdu_instance, cluster_type = (
5020 k8s["k8scluster-uuid"],
5021 k8s["kdu-instance"],
5022 k8s["k8scluster-type"],
5023 )
5024 await self._on_update_k8s_db(
5025 cluster_uuid=cluster_uuid,
5026 kdu_instance=kdu_instance,
5027 filter={"_id": nsr_id},
5028 vca_id=vca_id,
5029 cluster_type=cluster_type,
5030 )
5031 else:
5032 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5033 table, filter = "nsrs", {"_id": nsr_id}
5034 path = "_admin.deployed.VCA.{}.".format(vca_index)
5035 await self._on_update_n2vc_db(table, filter, path, {})
5036
5037 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5038 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5039
5040 async def action(self, nsr_id, nslcmop_id):
5041 # Try to lock HA task here
5042 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5043 if not task_is_locked_by_me:
5044 return
5045
5046 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5047 self.logger.debug(logging_text + "Enter")
5048 # get all needed from database
5049 db_nsr = None
5050 db_nslcmop = None
5051 db_nsr_update = {}
5052 db_nslcmop_update = {}
5053 nslcmop_operation_state = None
5054 error_description_nslcmop = None
5055 exc = None
5056 try:
5057 # wait for any previous tasks in process
5058 step = "Waiting for previous operations to terminate"
5059 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5060
5061 self._write_ns_status(
5062 nsr_id=nsr_id,
5063 ns_state=None,
5064 current_operation="RUNNING ACTION",
5065 current_operation_id=nslcmop_id,
5066 )
5067
5068 step = "Getting information from database"
5069 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5070 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5071 if db_nslcmop["operationParams"].get("primitive_params"):
5072 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5073 db_nslcmop["operationParams"]["primitive_params"]
5074 )
5075
5076 nsr_deployed = db_nsr["_admin"].get("deployed")
5077 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5078 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5079 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5080 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5081 primitive = db_nslcmop["operationParams"]["primitive"]
5082 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5083 timeout_ns_action = db_nslcmop["operationParams"].get(
5084 "timeout_ns_action", self.timeout_primitive
5085 )
5086
5087 if vnf_index:
5088 step = "Getting vnfr from database"
5089 db_vnfr = self.db.get_one(
5090 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5091 )
5092 if db_vnfr.get("kdur"):
5093 kdur_list = []
5094 for kdur in db_vnfr["kdur"]:
5095 if kdur.get("additionalParams"):
5096 kdur["additionalParams"] = json.loads(
5097 kdur["additionalParams"]
5098 )
5099 kdur_list.append(kdur)
5100 db_vnfr["kdur"] = kdur_list
5101 step = "Getting vnfd from database"
5102 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5103
5104 # Sync filesystem before running a primitive
5105 self.fs.sync(db_vnfr["vnfd-id"])
5106 else:
5107 step = "Getting nsd from database"
5108 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5109
5110 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5111 # for backward compatibility
5112 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5113 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5114 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5115 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5116
5117 # look for primitive
5118 config_primitive_desc = descriptor_configuration = None
5119 if vdu_id:
5120 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5121 elif kdu_name:
5122 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5123 elif vnf_index:
5124 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5125 else:
5126 descriptor_configuration = db_nsd.get("ns-configuration")
5127
5128 if descriptor_configuration and descriptor_configuration.get(
5129 "config-primitive"
5130 ):
5131 for config_primitive in descriptor_configuration["config-primitive"]:
5132 if config_primitive["name"] == primitive:
5133 config_primitive_desc = config_primitive
5134 break
5135
5136 if not config_primitive_desc:
5137 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5138 raise LcmException(
5139 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5140 primitive
5141 )
5142 )
5143 primitive_name = primitive
5144 ee_descriptor_id = None
5145 else:
5146 primitive_name = config_primitive_desc.get(
5147 "execution-environment-primitive", primitive
5148 )
5149 ee_descriptor_id = config_primitive_desc.get(
5150 "execution-environment-ref"
5151 )
5152
5153 if vnf_index:
5154 if vdu_id:
5155 vdur = next(
5156 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5157 )
5158 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5159 elif kdu_name:
5160 kdur = next(
5161 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5162 )
5163 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5164 else:
5165 desc_params = parse_yaml_strings(
5166 db_vnfr.get("additionalParamsForVnf")
5167 )
5168 else:
5169 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5170 if kdu_name and get_configuration(db_vnfd, kdu_name):
5171 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5172 actions = set()
5173 for primitive in kdu_configuration.get("initial-config-primitive", []):
5174 actions.add(primitive["name"])
5175 for primitive in kdu_configuration.get("config-primitive", []):
5176 actions.add(primitive["name"])
5177 kdu = find_in_list(
5178 nsr_deployed["K8s"],
5179 lambda kdu: kdu_name == kdu["kdu-name"]
5180 and kdu["member-vnf-index"] == vnf_index,
5181 )
5182 kdu_action = (
5183 True
5184 if primitive_name in actions
5185 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5186 else False
5187 )
5188
5189 # TODO check if ns is in a proper status
5190 if kdu_name and (
5191 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5192 ):
5193 # kdur and desc_params already set from before
5194 if primitive_params:
5195 desc_params.update(primitive_params)
5196 # TODO Check if we will need something at vnf level
5197 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5198 if (
5199 kdu_name == kdu["kdu-name"]
5200 and kdu["member-vnf-index"] == vnf_index
5201 ):
5202 break
5203 else:
5204 raise LcmException(
5205 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5206 )
5207
5208 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5209 msg = "unknown k8scluster-type '{}'".format(
5210 kdu.get("k8scluster-type")
5211 )
5212 raise LcmException(msg)
5213
5214 db_dict = {
5215 "collection": "nsrs",
5216 "filter": {"_id": nsr_id},
5217 "path": "_admin.deployed.K8s.{}".format(index),
5218 }
5219 self.logger.debug(
5220 logging_text
5221 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5222 )
5223 step = "Executing kdu {}".format(primitive_name)
5224 if primitive_name == "upgrade":
5225 if desc_params.get("kdu_model"):
5226 kdu_model = desc_params.get("kdu_model")
5227 del desc_params["kdu_model"]
5228 else:
5229 kdu_model = kdu.get("kdu-model")
5230 parts = kdu_model.split(sep=":")
5231 if len(parts) == 2:
5232 kdu_model = parts[0]
5233
5234 detailed_status = await asyncio.wait_for(
5235 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5236 cluster_uuid=kdu.get("k8scluster-uuid"),
5237 kdu_instance=kdu.get("kdu-instance"),
5238 atomic=True,
5239 kdu_model=kdu_model,
5240 params=desc_params,
5241 db_dict=db_dict,
5242 timeout=timeout_ns_action,
5243 ),
5244 timeout=timeout_ns_action + 10,
5245 )
5246 self.logger.debug(
5247 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5248 )
5249 elif primitive_name == "rollback":
5250 detailed_status = await asyncio.wait_for(
5251 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5252 cluster_uuid=kdu.get("k8scluster-uuid"),
5253 kdu_instance=kdu.get("kdu-instance"),
5254 db_dict=db_dict,
5255 ),
5256 timeout=timeout_ns_action,
5257 )
5258 elif primitive_name == "status":
5259 detailed_status = await asyncio.wait_for(
5260 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5261 cluster_uuid=kdu.get("k8scluster-uuid"),
5262 kdu_instance=kdu.get("kdu-instance"),
5263 vca_id=vca_id,
5264 ),
5265 timeout=timeout_ns_action,
5266 )
5267 else:
5268 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5269 kdu["kdu-name"], nsr_id
5270 )
5271 params = self._map_primitive_params(
5272 config_primitive_desc, primitive_params, desc_params
5273 )
5274
5275 detailed_status = await asyncio.wait_for(
5276 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5277 cluster_uuid=kdu.get("k8scluster-uuid"),
5278 kdu_instance=kdu_instance,
5279 primitive_name=primitive_name,
5280 params=params,
5281 db_dict=db_dict,
5282 timeout=timeout_ns_action,
5283 vca_id=vca_id,
5284 ),
5285 timeout=timeout_ns_action,
5286 )
5287
5288 if detailed_status:
5289 nslcmop_operation_state = "COMPLETED"
5290 else:
5291 detailed_status = ""
5292 nslcmop_operation_state = "FAILED"
5293 else:
5294 ee_id, vca_type = self._look_for_deployed_vca(
5295 nsr_deployed["VCA"],
5296 member_vnf_index=vnf_index,
5297 vdu_id=vdu_id,
5298 vdu_count_index=vdu_count_index,
5299 ee_descriptor_id=ee_descriptor_id,
5300 )
5301 for vca_index, vca_deployed in enumerate(
5302 db_nsr["_admin"]["deployed"]["VCA"]
5303 ):
5304 if vca_deployed.get("member-vnf-index") == vnf_index:
5305 db_dict = {
5306 "collection": "nsrs",
5307 "filter": {"_id": nsr_id},
5308 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5309 }
5310 break
5311 (
5312 nslcmop_operation_state,
5313 detailed_status,
5314 ) = await self._ns_execute_primitive(
5315 ee_id,
5316 primitive=primitive_name,
5317 primitive_params=self._map_primitive_params(
5318 config_primitive_desc, primitive_params, desc_params
5319 ),
5320 timeout=timeout_ns_action,
5321 vca_type=vca_type,
5322 db_dict=db_dict,
5323 vca_id=vca_id,
5324 )
5325
5326 db_nslcmop_update["detailed-status"] = detailed_status
5327 error_description_nslcmop = (
5328 detailed_status if nslcmop_operation_state == "FAILED" else ""
5329 )
5330 self.logger.debug(
5331 logging_text
5332 + " task Done with result {} {}".format(
5333 nslcmop_operation_state, detailed_status
5334 )
5335 )
5336 return # database update is called inside finally
5337
5338 except (DbException, LcmException, N2VCException, K8sException) as e:
5339 self.logger.error(logging_text + "Exit Exception {}".format(e))
5340 exc = e
5341 except asyncio.CancelledError:
5342 self.logger.error(
5343 logging_text + "Cancelled Exception while '{}'".format(step)
5344 )
5345 exc = "Operation was cancelled"
5346 except asyncio.TimeoutError:
5347 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5348 exc = "Timeout"
5349 except Exception as e:
5350 exc = traceback.format_exc()
5351 self.logger.critical(
5352 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5353 exc_info=True,
5354 )
5355 finally:
5356 if exc:
5357 db_nslcmop_update[
5358 "detailed-status"
5359 ] = (
5360 detailed_status
5361 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5362 nslcmop_operation_state = "FAILED"
5363 if db_nsr:
5364 self._write_ns_status(
5365 nsr_id=nsr_id,
5366 ns_state=db_nsr[
5367 "nsState"
5368 ], # TODO check if degraded. For the moment use previous status
5369 current_operation="IDLE",
5370 current_operation_id=None,
5371 # error_description=error_description_nsr,
5372 # error_detail=error_detail,
5373 other_update=db_nsr_update,
5374 )
5375
5376 self._write_op_status(
5377 op_id=nslcmop_id,
5378 stage="",
5379 error_message=error_description_nslcmop,
5380 operation_state=nslcmop_operation_state,
5381 other_update=db_nslcmop_update,
5382 )
5383
5384 if nslcmop_operation_state:
5385 try:
5386 await self.msg.aiowrite(
5387 "ns",
5388 "actioned",
5389 {
5390 "nsr_id": nsr_id,
5391 "nslcmop_id": nslcmop_id,
5392 "operationState": nslcmop_operation_state,
5393 },
5394 loop=self.loop,
5395 )
5396 except Exception as e:
5397 self.logger.error(
5398 logging_text + "kafka_write notification Exception {}".format(e)
5399 )
5400 self.logger.debug(logging_text + "Exit")
5401 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5402 return nslcmop_operation_state, detailed_status
5403
5404 async def terminate_vdus(
5405 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5406 ):
5407 """This method terminates VDUs
5408
5409 Args:
5410 db_vnfr: VNF instance record
5411 member_vnf_index: VNF index to identify the VDUs to be removed
5412 db_nsr: NS instance record
5413 update_db_nslcmops: Nslcmop update record
5414 """
5415 vca_scaling_info = []
5416 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5417 scaling_info["scaling_direction"] = "IN"
5418 scaling_info["vdu-delete"] = {}
5419 scaling_info["kdu-delete"] = {}
5420 db_vdur = db_vnfr.get("vdur")
5421 vdur_list = copy(db_vdur)
5422 count_index = 0
5423 for index, vdu in enumerate(vdur_list):
5424 vca_scaling_info.append(
5425 {
5426 "osm_vdu_id": vdu["vdu-id-ref"],
5427 "member-vnf-index": member_vnf_index,
5428 "type": "delete",
5429 "vdu_index": count_index,
5430 })
5431 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5432 scaling_info["vdu"].append(
5433 {
5434 "name": vdu.get("name") or vdu.get("vdu-name"),
5435 "vdu_id": vdu["vdu-id-ref"],
5436 "interface": [],
5437 })
5438 for interface in vdu["interfaces"]:
5439 scaling_info["vdu"][index]["interface"].append(
5440 {
5441 "name": interface["name"],
5442 "ip_address": interface["ip-address"],
5443 "mac_address": interface.get("mac-address"),
5444 })
5445 self.logger.info("NS update scaling info{}".format(scaling_info))
5446 stage[2] = "Terminating VDUs"
5447 if scaling_info.get("vdu-delete"):
5448 # scale_process = "RO"
5449 if self.ro_config.get("ng"):
5450 await self._scale_ng_ro(
5451 logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
5452 )
5453
5454 async def remove_vnf(
5455 self, nsr_id, nslcmop_id, vnf_instance_id
5456 ):
5457 """This method is to Remove VNF instances from NS.
5458
5459 Args:
5460 nsr_id: NS instance id
5461 nslcmop_id: nslcmop id of update
5462 vnf_instance_id: id of the VNF instance to be removed
5463
5464 Returns:
5465 result: (str, str) COMPLETED/FAILED, details
5466 """
5467 try:
5468 db_nsr_update = {}
5469 logging_text = "Task ns={} update ".format(nsr_id)
5470 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5471 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5472 if check_vnfr_count > 1:
5473 stage = ["", "", ""]
5474 step = "Getting nslcmop from database"
5475 self.logger.debug(step + " after having waited for previous tasks to be completed")
5476 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5477 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5478 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5479 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5480 """ db_vnfr = self.db.get_one(
5481 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5482
5483 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5484 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5485
5486 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5487 constituent_vnfr.remove(db_vnfr.get("_id"))
5488 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
5489 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5490 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5491 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5492 return "COMPLETED", "Done"
5493 else:
5494 step = "Terminate VNF Failed with"
5495 raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
5496 vnf_instance_id))
5497 except (LcmException, asyncio.CancelledError):
5498 raise
5499 except Exception as e:
5500 self.logger.debug("Error removing VNF {}".format(e))
5501 return "FAILED", "Error removing VNF {}".format(e)
5502
5503 async def _ns_redeploy_vnf(
5504 self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
5505 ):
5506 """This method updates and redeploys VNF instances
5507
5508 Args:
5509 nsr_id: NS instance id
5510 nslcmop_id: nslcmop id
5511 db_vnfd: VNF descriptor
5512 db_vnfr: VNF instance record
5513 db_nsr: NS instance record
5514
5515 Returns:
5516 result: (str, str) COMPLETED/FAILED, details
5517 """
5518 try:
5519 count_index = 0
5520 stage = ["", "", ""]
5521 logging_text = "Task ns={} update ".format(nsr_id)
5522 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5523 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5524
5525 # Terminate old VNF resources
5526 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5527 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5528
5529 # old_vnfd_id = db_vnfr["vnfd-id"]
5530 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5531 new_db_vnfd = db_vnfd
5532 # new_vnfd_ref = new_db_vnfd["id"]
5533 # new_vnfd_id = vnfd_id
5534
5535 # Create VDUR
5536 new_vnfr_cp = []
5537 for cp in new_db_vnfd.get("ext-cpd", ()):
5538 vnf_cp = {
5539 "name": cp.get("id"),
5540 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5541 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5542 "id": cp.get("id"),
5543 }
5544 new_vnfr_cp.append(vnf_cp)
5545 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5546 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5547 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5548 new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5549 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5550 updated_db_vnfr = self.db.get_one(
5551 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
5552 )
5553
5554 # Instantiate new VNF resources
5555 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5556 vca_scaling_info = []
5557 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5558 scaling_info["scaling_direction"] = "OUT"
5559 scaling_info["vdu-create"] = {}
5560 scaling_info["kdu-create"] = {}
5561 vdud_instantiate_list = db_vnfd["vdu"]
5562 for index, vdud in enumerate(vdud_instantiate_list):
5563 cloud_init_text = self._get_vdu_cloud_init_content(
5564 vdud, db_vnfd
5565 )
5566 if cloud_init_text:
5567 additional_params = (
5568 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5569 or {}
5570 )
5571 cloud_init_list = []
5572 if cloud_init_text:
5573 # TODO Information of its own ip is not available because db_vnfr is not updated.
5574 additional_params["OSM"] = get_osm_params(
5575 updated_db_vnfr, vdud["id"], 1
5576 )
5577 cloud_init_list.append(
5578 self._parse_cloud_init(
5579 cloud_init_text,
5580 additional_params,
5581 db_vnfd["id"],
5582 vdud["id"],
5583 )
5584 )
5585 vca_scaling_info.append(
5586 {
5587 "osm_vdu_id": vdud["id"],
5588 "member-vnf-index": member_vnf_index,
5589 "type": "create",
5590 "vdu_index": count_index,
5591 }
5592 )
5593 scaling_info["vdu-create"][vdud["id"]] = count_index
5594 if self.ro_config.get("ng"):
5595 self.logger.debug(
5596 "New Resources to be deployed: {}".format(scaling_info))
5597 await self._scale_ng_ro(
5598 logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
5599 )
5600 return "COMPLETED", "Done"
5601 except (LcmException, asyncio.CancelledError):
5602 raise
5603 except Exception as e:
5604 self.logger.debug("Error updating VNF {}".format(e))
5605 return "FAILED", "Error updating VNF {}".format(e)
5606
5607 async def _ns_charm_upgrade(
5608 self,
5609 ee_id,
5610 charm_id,
5611 charm_type,
5612 path,
5613 timeout: float = None,
5614 ) -> (str, str):
5615 """This method upgrade charms in VNF instances
5616
5617 Args:
5618 ee_id: Execution environment id
5619 path: Local path to the charm
5620 charm_id: charm-id
5621 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5622 timeout: (Float) Timeout for the ns update operation
5623
5624 Returns:
5625 result: (str, str) COMPLETED/FAILED, details
5626 """
5627 try:
5628 charm_type = charm_type or "lxc_proxy_charm"
5629 output = await self.vca_map[charm_type].upgrade_charm(
5630 ee_id=ee_id,
5631 path=path,
5632 charm_id=charm_id,
5633 charm_type=charm_type,
5634 timeout=timeout or self.timeout_ns_update,
5635 )
5636
5637 if output:
5638 return "COMPLETED", output
5639
5640 except (LcmException, asyncio.CancelledError):
5641 raise
5642
5643 except Exception as e:
5644
5645 self.logger.debug("Error upgrading charm {}".format(path))
5646
5647 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5648
5649 async def update(self, nsr_id, nslcmop_id):
5650 """Update NS according to different update types
5651
5652 This method performs upgrade of VNF instances then updates the revision
5653 number in VNF record
5654
5655 Args:
5656 nsr_id: Network service will be updated
5657 nslcmop_id: ns lcm operation id
5658
5659 Returns:
5660 It may raise DbException, LcmException, N2VCException, K8sException
5661
5662 """
5663 # Try to lock HA task here
5664 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5665 if not task_is_locked_by_me:
5666 return
5667
5668 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5669 self.logger.debug(logging_text + "Enter")
5670
5671 # Set the required variables to be filled up later
5672 db_nsr = None
5673 db_nslcmop_update = {}
5674 vnfr_update = {}
5675 nslcmop_operation_state = None
5676 db_nsr_update = {}
5677 error_description_nslcmop = ""
5678 exc = None
5679 change_type = "updated"
5680 detailed_status = ""
5681
5682 try:
5683 # wait for any previous tasks in process
5684 step = "Waiting for previous operations to terminate"
5685 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5686 self._write_ns_status(
5687 nsr_id=nsr_id,
5688 ns_state=None,
5689 current_operation="UPDATING",
5690 current_operation_id=nslcmop_id,
5691 )
5692
5693 step = "Getting nslcmop from database"
5694 db_nslcmop = self.db.get_one(
5695 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5696 )
5697 update_type = db_nslcmop["operationParams"]["updateType"]
5698
5699 step = "Getting nsr from database"
5700 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5701 old_operational_status = db_nsr["operational-status"]
5702 db_nsr_update["operational-status"] = "updating"
5703 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5704 nsr_deployed = db_nsr["_admin"].get("deployed")
5705
5706 if update_type == "CHANGE_VNFPKG":
5707
5708 # Get the input parameters given through update request
5709 vnf_instance_id = db_nslcmop["operationParams"][
5710 "changeVnfPackageData"
5711 ].get("vnfInstanceId")
5712
5713 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5714 "vnfdId"
5715 )
5716 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5717
5718 step = "Getting vnfr from database"
5719 db_vnfr = self.db.get_one(
5720 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5721 )
5722
5723 step = "Getting vnfds from database"
5724 # Latest VNFD
5725 latest_vnfd = self.db.get_one(
5726 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5727 )
5728 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5729
5730 # Current VNFD
5731 current_vnf_revision = db_vnfr.get("revision", 1)
5732 current_vnfd = self.db.get_one(
5733 "vnfds_revisions",
5734 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5735 fail_on_empty=False,
5736 )
5737 # Charm artifact paths will be filled up later
5738 (
5739 current_charm_artifact_path,
5740 target_charm_artifact_path,
5741 charm_artifact_paths,
5742 ) = ([], [], [])
5743
5744 step = "Checking if revision has changed in VNFD"
5745 if current_vnf_revision != latest_vnfd_revision:
5746
5747 change_type = "policy_updated"
5748
5749 # There is new revision of VNFD, update operation is required
5750 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5751 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5752
5753 step = "Removing the VNFD packages if they exist in the local path"
5754 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5755 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5756
5757 step = "Get the VNFD packages from FSMongo"
5758 self.fs.sync(from_path=latest_vnfd_path)
5759 self.fs.sync(from_path=current_vnfd_path)
5760
5761 step = (
5762 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5763 )
5764 base_folder = latest_vnfd["_admin"]["storage"]
5765
5766 for charm_index, charm_deployed in enumerate(
5767 get_iterable(nsr_deployed, "VCA")
5768 ):
5769 vnf_index = db_vnfr.get("member-vnf-index-ref")
5770
5771 # Getting charm-id and charm-type
5772 if charm_deployed.get("member-vnf-index") == vnf_index:
5773 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5774 charm_type = charm_deployed.get("type")
5775
5776 # Getting ee-id
5777 ee_id = charm_deployed.get("ee_id")
5778
5779 step = "Getting descriptor config"
5780 descriptor_config = get_configuration(
5781 current_vnfd, current_vnfd["id"]
5782 )
5783
5784 if "execution-environment-list" in descriptor_config:
5785 ee_list = descriptor_config.get(
5786 "execution-environment-list", []
5787 )
5788 else:
5789 ee_list = []
5790
5791 # There could be several charm used in the same VNF
5792 for ee_item in ee_list:
5793 if ee_item.get("juju"):
5794
5795 step = "Getting charm name"
5796 charm_name = ee_item["juju"].get("charm")
5797
5798 step = "Setting Charm artifact paths"
5799 current_charm_artifact_path.append(
5800 get_charm_artifact_path(
5801 base_folder,
5802 charm_name,
5803 charm_type,
5804 current_vnf_revision,
5805 )
5806 )
5807 target_charm_artifact_path.append(
5808 get_charm_artifact_path(
5809 base_folder,
5810 charm_name,
5811 charm_type,
5812 latest_vnfd_revision,
5813 )
5814 )
5815
5816 charm_artifact_paths = zip(
5817 current_charm_artifact_path, target_charm_artifact_path
5818 )
5819
5820 step = "Checking if software version has changed in VNFD"
5821 if find_software_version(current_vnfd) != find_software_version(
5822 latest_vnfd
5823 ):
5824
5825 step = "Checking if existing VNF has charm"
5826 for current_charm_path, target_charm_path in list(
5827 charm_artifact_paths
5828 ):
5829 if current_charm_path:
5830 raise LcmException(
5831 "Software version change is not supported as VNF instance {} has charm.".format(
5832 vnf_instance_id
5833 )
5834 )
5835
5836 # There is no change in the charm package, then redeploy the VNF
5837 # based on new descriptor
5838 step = "Redeploying VNF"
5839 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5840 (
5841 result,
5842 detailed_status
5843 ) = await self._ns_redeploy_vnf(
5844 nsr_id,
5845 nslcmop_id,
5846 latest_vnfd,
5847 db_vnfr,
5848 db_nsr
5849 )
5850 if result == "FAILED":
5851 nslcmop_operation_state = result
5852 error_description_nslcmop = detailed_status
5853 db_nslcmop_update["detailed-status"] = detailed_status
5854 self.logger.debug(
5855 logging_text
5856 + " step {} Done with result {} {}".format(
5857 step, nslcmop_operation_state, detailed_status
5858 )
5859 )
5860
5861 else:
5862 step = "Checking if any charm package has changed or not"
5863 for current_charm_path, target_charm_path in list(
5864 charm_artifact_paths
5865 ):
5866 if (
5867 current_charm_path
5868 and target_charm_path
5869 and self.check_charm_hash_changed(
5870 current_charm_path, target_charm_path
5871 )
5872 ):
5873
5874 step = "Checking whether VNF uses juju bundle"
5875 if check_juju_bundle_existence(current_vnfd):
5876
5877 raise LcmException(
5878 "Charm upgrade is not supported for the instance which"
5879 " uses juju-bundle: {}".format(
5880 check_juju_bundle_existence(current_vnfd)
5881 )
5882 )
5883
5884 step = "Upgrading Charm"
5885 (
5886 result,
5887 detailed_status,
5888 ) = await self._ns_charm_upgrade(
5889 ee_id=ee_id,
5890 charm_id=charm_id,
5891 charm_type=charm_type,
5892 path=self.fs.path + target_charm_path,
5893 timeout=timeout_seconds,
5894 )
5895
5896 if result == "FAILED":
5897 nslcmop_operation_state = result
5898 error_description_nslcmop = detailed_status
5899
5900 db_nslcmop_update["detailed-status"] = detailed_status
5901 self.logger.debug(
5902 logging_text
5903 + " step {} Done with result {} {}".format(
5904 step, nslcmop_operation_state, detailed_status
5905 )
5906 )
5907
5908 step = "Updating policies"
5909 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5910 result = "COMPLETED"
5911 detailed_status = "Done"
5912 db_nslcmop_update["detailed-status"] = "Done"
5913
5914 # If nslcmop_operation_state is None, so any operation is not failed.
5915 if not nslcmop_operation_state:
5916 nslcmop_operation_state = "COMPLETED"
5917
5918 # If update CHANGE_VNFPKG nslcmop_operation is successful
5919 # vnf revision need to be updated
5920 vnfr_update["revision"] = latest_vnfd_revision
5921 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5922
5923 self.logger.debug(
5924 logging_text
5925 + " task Done with result {} {}".format(
5926 nslcmop_operation_state, detailed_status
5927 )
5928 )
5929 elif update_type == "REMOVE_VNF":
5930 # This part is included in https://osm.etsi.org/gerrit/11876
5931 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5932 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5933 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5934 step = "Removing VNF"
5935 (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
5936 if result == "FAILED":
5937 nslcmop_operation_state = result
5938 error_description_nslcmop = detailed_status
5939 db_nslcmop_update["detailed-status"] = detailed_status
5940 change_type = "vnf_terminated"
5941 if not nslcmop_operation_state:
5942 nslcmop_operation_state = "COMPLETED"
5943 self.logger.debug(
5944 logging_text
5945 + " task Done with result {} {}".format(
5946 nslcmop_operation_state, detailed_status
5947 )
5948 )
5949
5950 elif update_type == "OPERATE_VNF":
5951 vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
5952 operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
5953 additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
5954 (result, detailed_status) = await self.rebuild_start_stop(
5955 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5956 )
5957 if result == "FAILED":
5958 nslcmop_operation_state = result
5959 error_description_nslcmop = detailed_status
5960 db_nslcmop_update["detailed-status"] = detailed_status
5961 if not nslcmop_operation_state:
5962 nslcmop_operation_state = "COMPLETED"
5963 self.logger.debug(
5964 logging_text
5965 + " task Done with result {} {}".format(
5966 nslcmop_operation_state, detailed_status
5967 )
5968 )
5969
5970 # If nslcmop_operation_state is None, so any operation is not failed.
5971 # All operations are executed in overall.
5972 if not nslcmop_operation_state:
5973 nslcmop_operation_state = "COMPLETED"
5974 db_nsr_update["operational-status"] = old_operational_status
5975
5976 except (DbException, LcmException, N2VCException, K8sException) as e:
5977 self.logger.error(logging_text + "Exit Exception {}".format(e))
5978 exc = e
5979 except asyncio.CancelledError:
5980 self.logger.error(
5981 logging_text + "Cancelled Exception while '{}'".format(step)
5982 )
5983 exc = "Operation was cancelled"
5984 except asyncio.TimeoutError:
5985 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5986 exc = "Timeout"
5987 except Exception as e:
5988 exc = traceback.format_exc()
5989 self.logger.critical(
5990 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5991 exc_info=True,
5992 )
5993 finally:
5994 if exc:
5995 db_nslcmop_update[
5996 "detailed-status"
5997 ] = (
5998 detailed_status
5999 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
6000 nslcmop_operation_state = "FAILED"
6001 db_nsr_update["operational-status"] = old_operational_status
6002 if db_nsr:
6003 self._write_ns_status(
6004 nsr_id=nsr_id,
6005 ns_state=db_nsr["nsState"],
6006 current_operation="IDLE",
6007 current_operation_id=None,
6008 other_update=db_nsr_update,
6009 )
6010
6011 self._write_op_status(
6012 op_id=nslcmop_id,
6013 stage="",
6014 error_message=error_description_nslcmop,
6015 operation_state=nslcmop_operation_state,
6016 other_update=db_nslcmop_update,
6017 )
6018
6019 if nslcmop_operation_state:
6020 try:
6021 msg = {
6022 "nsr_id": nsr_id,
6023 "nslcmop_id": nslcmop_id,
6024 "operationState": nslcmop_operation_state,
6025 }
6026 if change_type in ("vnf_terminated", "policy_updated"):
6027 msg.update({"vnf_member_index": member_vnf_index})
6028 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6029 except Exception as e:
6030 self.logger.error(
6031 logging_text + "kafka_write notification Exception {}".format(e)
6032 )
6033 self.logger.debug(logging_text + "Exit")
6034 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6035 return nslcmop_operation_state, detailed_status
6036
6037 async def scale(self, nsr_id, nslcmop_id):
6038 # Try to lock HA task here
6039 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6040 if not task_is_locked_by_me:
6041 return
6042
6043 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6044 stage = ["", "", ""]
6045 tasks_dict_info = {}
6046 # ^ stage, step, VIM progress
6047 self.logger.debug(logging_text + "Enter")
6048 # get all needed from database
6049 db_nsr = None
6050 db_nslcmop_update = {}
6051 db_nsr_update = {}
6052 exc = None
6053 # in case of error, indicates what part of scale was failed to put nsr at error status
6054 scale_process = None
6055 old_operational_status = ""
6056 old_config_status = ""
6057 nsi_id = None
6058 try:
6059 # wait for any previous tasks in process
6060 step = "Waiting for previous operations to terminate"
6061 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6062 self._write_ns_status(
6063 nsr_id=nsr_id,
6064 ns_state=None,
6065 current_operation="SCALING",
6066 current_operation_id=nslcmop_id,
6067 )
6068
6069 step = "Getting nslcmop from database"
6070 self.logger.debug(
6071 step + " after having waited for previous tasks to be completed"
6072 )
6073 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6074
6075 step = "Getting nsr from database"
6076 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6077 old_operational_status = db_nsr["operational-status"]
6078 old_config_status = db_nsr["config-status"]
6079
6080 step = "Parsing scaling parameters"
6081 db_nsr_update["operational-status"] = "scaling"
6082 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6083 nsr_deployed = db_nsr["_admin"].get("deployed")
6084
6085 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6086 "scaleByStepData"
6087 ]["member-vnf-index"]
6088 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6089 "scaleByStepData"
6090 ]["scaling-group-descriptor"]
6091 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6092 # for backward compatibility
6093 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6094 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6095 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6096 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6097
6098 step = "Getting vnfr from database"
6099 db_vnfr = self.db.get_one(
6100 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6101 )
6102
6103 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6104
6105 step = "Getting vnfd from database"
6106 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6107
6108 base_folder = db_vnfd["_admin"]["storage"]
6109
6110 step = "Getting scaling-group-descriptor"
6111 scaling_descriptor = find_in_list(
6112 get_scaling_aspect(db_vnfd),
6113 lambda scale_desc: scale_desc["name"] == scaling_group,
6114 )
6115 if not scaling_descriptor:
6116 raise LcmException(
6117 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6118 "at vnfd:scaling-group-descriptor".format(scaling_group)
6119 )
6120
6121 step = "Sending scale order to VIM"
6122 # TODO check if ns is in a proper status
6123 nb_scale_op = 0
6124 if not db_nsr["_admin"].get("scaling-group"):
6125 self.update_db_2(
6126 "nsrs",
6127 nsr_id,
6128 {
6129 "_admin.scaling-group": [
6130 {"name": scaling_group, "nb-scale-op": 0}
6131 ]
6132 },
6133 )
6134 admin_scale_index = 0
6135 else:
6136 for admin_scale_index, admin_scale_info in enumerate(
6137 db_nsr["_admin"]["scaling-group"]
6138 ):
6139 if admin_scale_info["name"] == scaling_group:
6140 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6141 break
6142 else: # not found, set index one plus last element and add new entry with the name
6143 admin_scale_index += 1
6144 db_nsr_update[
6145 "_admin.scaling-group.{}.name".format(admin_scale_index)
6146 ] = scaling_group
6147
6148 vca_scaling_info = []
6149 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6150 if scaling_type == "SCALE_OUT":
6151 if "aspect-delta-details" not in scaling_descriptor:
6152 raise LcmException(
6153 "Aspect delta details not fount in scaling descriptor {}".format(
6154 scaling_descriptor["name"]
6155 )
6156 )
6157 # count if max-instance-count is reached
6158 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6159
6160 scaling_info["scaling_direction"] = "OUT"
6161 scaling_info["vdu-create"] = {}
6162 scaling_info["kdu-create"] = {}
6163 for delta in deltas:
6164 for vdu_delta in delta.get("vdu-delta", {}):
6165 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6166 # vdu_index also provides the number of instance of the targeted vdu
6167 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6168 cloud_init_text = self._get_vdu_cloud_init_content(
6169 vdud, db_vnfd
6170 )
6171 if cloud_init_text:
6172 additional_params = (
6173 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6174 or {}
6175 )
6176 cloud_init_list = []
6177
6178 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6179 max_instance_count = 10
6180 if vdu_profile and "max-number-of-instances" in vdu_profile:
6181 max_instance_count = vdu_profile.get(
6182 "max-number-of-instances", 10
6183 )
6184
6185 default_instance_num = get_number_of_instances(
6186 db_vnfd, vdud["id"]
6187 )
6188 instances_number = vdu_delta.get("number-of-instances", 1)
6189 nb_scale_op += instances_number
6190
6191 new_instance_count = nb_scale_op + default_instance_num
6192 # Control if new count is over max and vdu count is less than max.
6193 # Then assign new instance count
6194 if new_instance_count > max_instance_count > vdu_count:
6195 instances_number = new_instance_count - max_instance_count
6196 else:
6197 instances_number = instances_number
6198
6199 if new_instance_count > max_instance_count:
6200 raise LcmException(
6201 "reached the limit of {} (max-instance-count) "
6202 "scaling-out operations for the "
6203 "scaling-group-descriptor '{}'".format(
6204 nb_scale_op, scaling_group
6205 )
6206 )
6207 for x in range(vdu_delta.get("number-of-instances", 1)):
6208 if cloud_init_text:
6209 # TODO Information of its own ip is not available because db_vnfr is not updated.
6210 additional_params["OSM"] = get_osm_params(
6211 db_vnfr, vdu_delta["id"], vdu_index + x
6212 )
6213 cloud_init_list.append(
6214 self._parse_cloud_init(
6215 cloud_init_text,
6216 additional_params,
6217 db_vnfd["id"],
6218 vdud["id"],
6219 )
6220 )
6221 vca_scaling_info.append(
6222 {
6223 "osm_vdu_id": vdu_delta["id"],
6224 "member-vnf-index": vnf_index,
6225 "type": "create",
6226 "vdu_index": vdu_index + x,
6227 }
6228 )
6229 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6230 for kdu_delta in delta.get("kdu-resource-delta", {}):
6231 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6232 kdu_name = kdu_profile["kdu-name"]
6233 resource_name = kdu_profile.get("resource-name", "")
6234
6235 # Might have different kdus in the same delta
6236 # Should have list for each kdu
6237 if not scaling_info["kdu-create"].get(kdu_name, None):
6238 scaling_info["kdu-create"][kdu_name] = []
6239
6240 kdur = get_kdur(db_vnfr, kdu_name)
6241 if kdur.get("helm-chart"):
6242 k8s_cluster_type = "helm-chart-v3"
6243 self.logger.debug("kdur: {}".format(kdur))
6244 if (
6245 kdur.get("helm-version")
6246 and kdur.get("helm-version") == "v2"
6247 ):
6248 k8s_cluster_type = "helm-chart"
6249 elif kdur.get("juju-bundle"):
6250 k8s_cluster_type = "juju-bundle"
6251 else:
6252 raise LcmException(
6253 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6254 "juju-bundle. Maybe an old NBI version is running".format(
6255 db_vnfr["member-vnf-index-ref"], kdu_name
6256 )
6257 )
6258
6259 max_instance_count = 10
6260 if kdu_profile and "max-number-of-instances" in kdu_profile:
6261 max_instance_count = kdu_profile.get(
6262 "max-number-of-instances", 10
6263 )
6264
6265 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6266 deployed_kdu, _ = get_deployed_kdu(
6267 nsr_deployed, kdu_name, vnf_index
6268 )
6269 if deployed_kdu is None:
6270 raise LcmException(
6271 "KDU '{}' for vnf '{}' not deployed".format(
6272 kdu_name, vnf_index
6273 )
6274 )
6275 kdu_instance = deployed_kdu.get("kdu-instance")
6276 instance_num = await self.k8scluster_map[
6277 k8s_cluster_type
6278 ].get_scale_count(
6279 resource_name,
6280 kdu_instance,
6281 vca_id=vca_id,
6282 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6283 kdu_model=deployed_kdu.get("kdu-model"),
6284 )
6285 kdu_replica_count = instance_num + kdu_delta.get(
6286 "number-of-instances", 1
6287 )
6288
6289 # Control if new count is over max and instance_num is less than max.
6290 # Then assign max instance number to kdu replica count
6291 if kdu_replica_count > max_instance_count > instance_num:
6292 kdu_replica_count = max_instance_count
6293 if kdu_replica_count > max_instance_count:
6294 raise LcmException(
6295 "reached the limit of {} (max-instance-count) "
6296 "scaling-out operations for the "
6297 "scaling-group-descriptor '{}'".format(
6298 instance_num, scaling_group
6299 )
6300 )
6301
6302 for x in range(kdu_delta.get("number-of-instances", 1)):
6303 vca_scaling_info.append(
6304 {
6305 "osm_kdu_id": kdu_name,
6306 "member-vnf-index": vnf_index,
6307 "type": "create",
6308 "kdu_index": instance_num + x - 1,
6309 }
6310 )
6311 scaling_info["kdu-create"][kdu_name].append(
6312 {
6313 "member-vnf-index": vnf_index,
6314 "type": "create",
6315 "k8s-cluster-type": k8s_cluster_type,
6316 "resource-name": resource_name,
6317 "scale": kdu_replica_count,
6318 }
6319 )
6320 elif scaling_type == "SCALE_IN":
6321 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6322
6323 scaling_info["scaling_direction"] = "IN"
6324 scaling_info["vdu-delete"] = {}
6325 scaling_info["kdu-delete"] = {}
6326
6327 for delta in deltas:
6328 for vdu_delta in delta.get("vdu-delta", {}):
6329 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6330 min_instance_count = 0
6331 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6332 if vdu_profile and "min-number-of-instances" in vdu_profile:
6333 min_instance_count = vdu_profile["min-number-of-instances"]
6334
6335 default_instance_num = get_number_of_instances(
6336 db_vnfd, vdu_delta["id"]
6337 )
6338 instance_num = vdu_delta.get("number-of-instances", 1)
6339 nb_scale_op -= instance_num
6340
6341 new_instance_count = nb_scale_op + default_instance_num
6342
6343 if new_instance_count < min_instance_count < vdu_count:
6344 instances_number = min_instance_count - new_instance_count
6345 else:
6346 instances_number = instance_num
6347
6348 if new_instance_count < min_instance_count:
6349 raise LcmException(
6350 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6351 "scaling-group-descriptor '{}'".format(
6352 nb_scale_op, scaling_group
6353 )
6354 )
6355 for x in range(vdu_delta.get("number-of-instances", 1)):
6356 vca_scaling_info.append(
6357 {
6358 "osm_vdu_id": vdu_delta["id"],
6359 "member-vnf-index": vnf_index,
6360 "type": "delete",
6361 "vdu_index": vdu_index - 1 - x,
6362 }
6363 )
6364 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6365 for kdu_delta in delta.get("kdu-resource-delta", {}):
6366 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6367 kdu_name = kdu_profile["kdu-name"]
6368 resource_name = kdu_profile.get("resource-name", "")
6369
6370 if not scaling_info["kdu-delete"].get(kdu_name, None):
6371 scaling_info["kdu-delete"][kdu_name] = []
6372
6373 kdur = get_kdur(db_vnfr, kdu_name)
6374 if kdur.get("helm-chart"):
6375 k8s_cluster_type = "helm-chart-v3"
6376 self.logger.debug("kdur: {}".format(kdur))
6377 if (
6378 kdur.get("helm-version")
6379 and kdur.get("helm-version") == "v2"
6380 ):
6381 k8s_cluster_type = "helm-chart"
6382 elif kdur.get("juju-bundle"):
6383 k8s_cluster_type = "juju-bundle"
6384 else:
6385 raise LcmException(
6386 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6387 "juju-bundle. Maybe an old NBI version is running".format(
6388 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6389 )
6390 )
6391
6392 min_instance_count = 0
6393 if kdu_profile and "min-number-of-instances" in kdu_profile:
6394 min_instance_count = kdu_profile["min-number-of-instances"]
6395
6396 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6397 deployed_kdu, _ = get_deployed_kdu(
6398 nsr_deployed, kdu_name, vnf_index
6399 )
6400 if deployed_kdu is None:
6401 raise LcmException(
6402 "KDU '{}' for vnf '{}' not deployed".format(
6403 kdu_name, vnf_index
6404 )
6405 )
6406 kdu_instance = deployed_kdu.get("kdu-instance")
6407 instance_num = await self.k8scluster_map[
6408 k8s_cluster_type
6409 ].get_scale_count(
6410 resource_name,
6411 kdu_instance,
6412 vca_id=vca_id,
6413 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6414 kdu_model=deployed_kdu.get("kdu-model"),
6415 )
6416 kdu_replica_count = instance_num - kdu_delta.get(
6417 "number-of-instances", 1
6418 )
6419
6420 if kdu_replica_count < min_instance_count < instance_num:
6421 kdu_replica_count = min_instance_count
6422 if kdu_replica_count < min_instance_count:
6423 raise LcmException(
6424 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6425 "scaling-group-descriptor '{}'".format(
6426 instance_num, scaling_group
6427 )
6428 )
6429
6430 for x in range(kdu_delta.get("number-of-instances", 1)):
6431 vca_scaling_info.append(
6432 {
6433 "osm_kdu_id": kdu_name,
6434 "member-vnf-index": vnf_index,
6435 "type": "delete",
6436 "kdu_index": instance_num - x - 1,
6437 }
6438 )
6439 scaling_info["kdu-delete"][kdu_name].append(
6440 {
6441 "member-vnf-index": vnf_index,
6442 "type": "delete",
6443 "k8s-cluster-type": k8s_cluster_type,
6444 "resource-name": resource_name,
6445 "scale": kdu_replica_count,
6446 }
6447 )
6448
6449 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6450 vdu_delete = copy(scaling_info.get("vdu-delete"))
6451 if scaling_info["scaling_direction"] == "IN":
6452 for vdur in reversed(db_vnfr["vdur"]):
6453 if vdu_delete.get(vdur["vdu-id-ref"]):
6454 vdu_delete[vdur["vdu-id-ref"]] -= 1
6455 scaling_info["vdu"].append(
6456 {
6457 "name": vdur.get("name") or vdur.get("vdu-name"),
6458 "vdu_id": vdur["vdu-id-ref"],
6459 "interface": [],
6460 }
6461 )
6462 for interface in vdur["interfaces"]:
6463 scaling_info["vdu"][-1]["interface"].append(
6464 {
6465 "name": interface["name"],
6466 "ip_address": interface["ip-address"],
6467 "mac_address": interface.get("mac-address"),
6468 }
6469 )
6470 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6471
6472 # PRE-SCALE BEGIN
6473 step = "Executing pre-scale vnf-config-primitive"
6474 if scaling_descriptor.get("scaling-config-action"):
6475 for scaling_config_action in scaling_descriptor[
6476 "scaling-config-action"
6477 ]:
6478 if (
6479 scaling_config_action.get("trigger") == "pre-scale-in"
6480 and scaling_type == "SCALE_IN"
6481 ) or (
6482 scaling_config_action.get("trigger") == "pre-scale-out"
6483 and scaling_type == "SCALE_OUT"
6484 ):
6485 vnf_config_primitive = scaling_config_action[
6486 "vnf-config-primitive-name-ref"
6487 ]
6488 step = db_nslcmop_update[
6489 "detailed-status"
6490 ] = "executing pre-scale scaling-config-action '{}'".format(
6491 vnf_config_primitive
6492 )
6493
6494 # look for primitive
6495 for config_primitive in (
6496 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6497 ).get("config-primitive", ()):
6498 if config_primitive["name"] == vnf_config_primitive:
6499 break
6500 else:
6501 raise LcmException(
6502 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6503 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6504 "primitive".format(scaling_group, vnf_config_primitive)
6505 )
6506
6507 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6508 if db_vnfr.get("additionalParamsForVnf"):
6509 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6510
6511 scale_process = "VCA"
6512 db_nsr_update["config-status"] = "configuring pre-scaling"
6513 primitive_params = self._map_primitive_params(
6514 config_primitive, {}, vnfr_params
6515 )
6516
6517 # Pre-scale retry check: Check if this sub-operation has been executed before
6518 op_index = self._check_or_add_scale_suboperation(
6519 db_nslcmop,
6520 vnf_index,
6521 vnf_config_primitive,
6522 primitive_params,
6523 "PRE-SCALE",
6524 )
6525 if op_index == self.SUBOPERATION_STATUS_SKIP:
6526 # Skip sub-operation
6527 result = "COMPLETED"
6528 result_detail = "Done"
6529 self.logger.debug(
6530 logging_text
6531 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6532 vnf_config_primitive, result, result_detail
6533 )
6534 )
6535 else:
6536 if op_index == self.SUBOPERATION_STATUS_NEW:
6537 # New sub-operation: Get index of this sub-operation
6538 op_index = (
6539 len(db_nslcmop.get("_admin", {}).get("operations"))
6540 - 1
6541 )
6542 self.logger.debug(
6543 logging_text
6544 + "vnf_config_primitive={} New sub-operation".format(
6545 vnf_config_primitive
6546 )
6547 )
6548 else:
6549 # retry: Get registered params for this existing sub-operation
6550 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6551 op_index
6552 ]
6553 vnf_index = op.get("member_vnf_index")
6554 vnf_config_primitive = op.get("primitive")
6555 primitive_params = op.get("primitive_params")
6556 self.logger.debug(
6557 logging_text
6558 + "vnf_config_primitive={} Sub-operation retry".format(
6559 vnf_config_primitive
6560 )
6561 )
6562 # Execute the primitive, either with new (first-time) or registered (reintent) args
6563 ee_descriptor_id = config_primitive.get(
6564 "execution-environment-ref"
6565 )
6566 primitive_name = config_primitive.get(
6567 "execution-environment-primitive", vnf_config_primitive
6568 )
6569 ee_id, vca_type = self._look_for_deployed_vca(
6570 nsr_deployed["VCA"],
6571 member_vnf_index=vnf_index,
6572 vdu_id=None,
6573 vdu_count_index=None,
6574 ee_descriptor_id=ee_descriptor_id,
6575 )
6576 result, result_detail = await self._ns_execute_primitive(
6577 ee_id,
6578 primitive_name,
6579 primitive_params,
6580 vca_type=vca_type,
6581 vca_id=vca_id,
6582 )
6583 self.logger.debug(
6584 logging_text
6585 + "vnf_config_primitive={} Done with result {} {}".format(
6586 vnf_config_primitive, result, result_detail
6587 )
6588 )
6589 # Update operationState = COMPLETED | FAILED
6590 self._update_suboperation_status(
6591 db_nslcmop, op_index, result, result_detail
6592 )
6593
6594 if result == "FAILED":
6595 raise LcmException(result_detail)
6596 db_nsr_update["config-status"] = old_config_status
6597 scale_process = None
6598 # PRE-SCALE END
6599
6600 db_nsr_update[
6601 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6602 ] = nb_scale_op
6603 db_nsr_update[
6604 "_admin.scaling-group.{}.time".format(admin_scale_index)
6605 ] = time()
6606
6607 # SCALE-IN VCA - BEGIN
6608 if vca_scaling_info:
6609 step = db_nslcmop_update[
6610 "detailed-status"
6611 ] = "Deleting the execution environments"
6612 scale_process = "VCA"
6613 for vca_info in vca_scaling_info:
6614 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6615 member_vnf_index = str(vca_info["member-vnf-index"])
6616 self.logger.debug(
6617 logging_text + "vdu info: {}".format(vca_info)
6618 )
6619 if vca_info.get("osm_vdu_id"):
6620 vdu_id = vca_info["osm_vdu_id"]
6621 vdu_index = int(vca_info["vdu_index"])
6622 stage[
6623 1
6624 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6625 member_vnf_index, vdu_id, vdu_index
6626 )
6627 stage[2] = step = "Scaling in VCA"
6628 self._write_op_status(op_id=nslcmop_id, stage=stage)
6629 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6630 config_update = db_nsr["configurationStatus"]
6631 for vca_index, vca in enumerate(vca_update):
6632 if (
6633 (vca or vca.get("ee_id"))
6634 and vca["member-vnf-index"] == member_vnf_index
6635 and vca["vdu_count_index"] == vdu_index
6636 ):
6637 if vca.get("vdu_id"):
6638 config_descriptor = get_configuration(
6639 db_vnfd, vca.get("vdu_id")
6640 )
6641 elif vca.get("kdu_name"):
6642 config_descriptor = get_configuration(
6643 db_vnfd, vca.get("kdu_name")
6644 )
6645 else:
6646 config_descriptor = get_configuration(
6647 db_vnfd, db_vnfd["id"]
6648 )
6649 operation_params = (
6650 db_nslcmop.get("operationParams") or {}
6651 )
6652 exec_terminate_primitives = not operation_params.get(
6653 "skip_terminate_primitives"
6654 ) and vca.get("needed_terminate")
6655 task = asyncio.ensure_future(
6656 asyncio.wait_for(
6657 self.destroy_N2VC(
6658 logging_text,
6659 db_nslcmop,
6660 vca,
6661 config_descriptor,
6662 vca_index,
6663 destroy_ee=True,
6664 exec_primitives=exec_terminate_primitives,
6665 scaling_in=True,
6666 vca_id=vca_id,
6667 ),
6668 timeout=self.timeout_charm_delete,
6669 )
6670 )
6671 tasks_dict_info[task] = "Terminating VCA {}".format(
6672 vca.get("ee_id")
6673 )
6674 del vca_update[vca_index]
6675 del config_update[vca_index]
6676 # wait for pending tasks of terminate primitives
6677 if tasks_dict_info:
6678 self.logger.debug(
6679 logging_text
6680 + "Waiting for tasks {}".format(
6681 list(tasks_dict_info.keys())
6682 )
6683 )
6684 error_list = await self._wait_for_tasks(
6685 logging_text,
6686 tasks_dict_info,
6687 min(
6688 self.timeout_charm_delete, self.timeout_ns_terminate
6689 ),
6690 stage,
6691 nslcmop_id,
6692 )
6693 tasks_dict_info.clear()
6694 if error_list:
6695 raise LcmException("; ".join(error_list))
6696
6697 db_vca_and_config_update = {
6698 "_admin.deployed.VCA": vca_update,
6699 "configurationStatus": config_update,
6700 }
6701 self.update_db_2(
6702 "nsrs", db_nsr["_id"], db_vca_and_config_update
6703 )
6704 scale_process = None
6705 # SCALE-IN VCA - END
6706
6707 # SCALE RO - BEGIN
6708 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6709 scale_process = "RO"
6710 if self.ro_config.get("ng"):
6711 await self._scale_ng_ro(
6712 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6713 )
6714 scaling_info.pop("vdu-create", None)
6715 scaling_info.pop("vdu-delete", None)
6716
6717 scale_process = None
6718 # SCALE RO - END
6719
6720 # SCALE KDU - BEGIN
6721 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6722 scale_process = "KDU"
6723 await self._scale_kdu(
6724 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6725 )
6726 scaling_info.pop("kdu-create", None)
6727 scaling_info.pop("kdu-delete", None)
6728
6729 scale_process = None
6730 # SCALE KDU - END
6731
6732 if db_nsr_update:
6733 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6734
6735 # SCALE-UP VCA - BEGIN
6736 if vca_scaling_info:
6737 step = db_nslcmop_update[
6738 "detailed-status"
6739 ] = "Creating new execution environments"
6740 scale_process = "VCA"
6741 for vca_info in vca_scaling_info:
6742 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6743 member_vnf_index = str(vca_info["member-vnf-index"])
6744 self.logger.debug(
6745 logging_text + "vdu info: {}".format(vca_info)
6746 )
6747 vnfd_id = db_vnfr["vnfd-ref"]
6748 if vca_info.get("osm_vdu_id"):
6749 vdu_index = int(vca_info["vdu_index"])
6750 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6751 if db_vnfr.get("additionalParamsForVnf"):
6752 deploy_params.update(
6753 parse_yaml_strings(
6754 db_vnfr["additionalParamsForVnf"].copy()
6755 )
6756 )
6757 descriptor_config = get_configuration(
6758 db_vnfd, db_vnfd["id"]
6759 )
6760 if descriptor_config:
6761 vdu_id = None
6762 vdu_name = None
6763 kdu_name = None
6764 self._deploy_n2vc(
6765 logging_text=logging_text
6766 + "member_vnf_index={} ".format(member_vnf_index),
6767 db_nsr=db_nsr,
6768 db_vnfr=db_vnfr,
6769 nslcmop_id=nslcmop_id,
6770 nsr_id=nsr_id,
6771 nsi_id=nsi_id,
6772 vnfd_id=vnfd_id,
6773 vdu_id=vdu_id,
6774 kdu_name=kdu_name,
6775 member_vnf_index=member_vnf_index,
6776 vdu_index=vdu_index,
6777 vdu_name=vdu_name,
6778 deploy_params=deploy_params,
6779 descriptor_config=descriptor_config,
6780 base_folder=base_folder,
6781 task_instantiation_info=tasks_dict_info,
6782 stage=stage,
6783 )
6784 vdu_id = vca_info["osm_vdu_id"]
6785 vdur = find_in_list(
6786 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6787 )
6788 descriptor_config = get_configuration(db_vnfd, vdu_id)
6789 if vdur.get("additionalParams"):
6790 deploy_params_vdu = parse_yaml_strings(
6791 vdur["additionalParams"]
6792 )
6793 else:
6794 deploy_params_vdu = deploy_params
6795 deploy_params_vdu["OSM"] = get_osm_params(
6796 db_vnfr, vdu_id, vdu_count_index=vdu_index
6797 )
6798 if descriptor_config:
6799 vdu_name = None
6800 kdu_name = None
6801 stage[
6802 1
6803 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6804 member_vnf_index, vdu_id, vdu_index
6805 )
6806 stage[2] = step = "Scaling out VCA"
6807 self._write_op_status(op_id=nslcmop_id, stage=stage)
6808 self._deploy_n2vc(
6809 logging_text=logging_text
6810 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6811 member_vnf_index, vdu_id, vdu_index
6812 ),
6813 db_nsr=db_nsr,
6814 db_vnfr=db_vnfr,
6815 nslcmop_id=nslcmop_id,
6816 nsr_id=nsr_id,
6817 nsi_id=nsi_id,
6818 vnfd_id=vnfd_id,
6819 vdu_id=vdu_id,
6820 kdu_name=kdu_name,
6821 member_vnf_index=member_vnf_index,
6822 vdu_index=vdu_index,
6823 vdu_name=vdu_name,
6824 deploy_params=deploy_params_vdu,
6825 descriptor_config=descriptor_config,
6826 base_folder=base_folder,
6827 task_instantiation_info=tasks_dict_info,
6828 stage=stage,
6829 )
6830 # SCALE-UP VCA - END
6831 scale_process = None
6832
6833 # POST-SCALE BEGIN
6834 # execute primitive service POST-SCALING
6835 step = "Executing post-scale vnf-config-primitive"
6836 if scaling_descriptor.get("scaling-config-action"):
6837 for scaling_config_action in scaling_descriptor[
6838 "scaling-config-action"
6839 ]:
6840 if (
6841 scaling_config_action.get("trigger") == "post-scale-in"
6842 and scaling_type == "SCALE_IN"
6843 ) or (
6844 scaling_config_action.get("trigger") == "post-scale-out"
6845 and scaling_type == "SCALE_OUT"
6846 ):
6847 vnf_config_primitive = scaling_config_action[
6848 "vnf-config-primitive-name-ref"
6849 ]
6850 step = db_nslcmop_update[
6851 "detailed-status"
6852 ] = "executing post-scale scaling-config-action '{}'".format(
6853 vnf_config_primitive
6854 )
6855
6856 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6857 if db_vnfr.get("additionalParamsForVnf"):
6858 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6859
6860 # look for primitive
6861 for config_primitive in (
6862 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6863 ).get("config-primitive", ()):
6864 if config_primitive["name"] == vnf_config_primitive:
6865 break
6866 else:
6867 raise LcmException(
6868 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6869 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6870 "config-primitive".format(
6871 scaling_group, vnf_config_primitive
6872 )
6873 )
6874 scale_process = "VCA"
6875 db_nsr_update["config-status"] = "configuring post-scaling"
6876 primitive_params = self._map_primitive_params(
6877 config_primitive, {}, vnfr_params
6878 )
6879
6880 # Post-scale retry check: Check if this sub-operation has been executed before
6881 op_index = self._check_or_add_scale_suboperation(
6882 db_nslcmop,
6883 vnf_index,
6884 vnf_config_primitive,
6885 primitive_params,
6886 "POST-SCALE",
6887 )
6888 if op_index == self.SUBOPERATION_STATUS_SKIP:
6889 # Skip sub-operation
6890 result = "COMPLETED"
6891 result_detail = "Done"
6892 self.logger.debug(
6893 logging_text
6894 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6895 vnf_config_primitive, result, result_detail
6896 )
6897 )
6898 else:
6899 if op_index == self.SUBOPERATION_STATUS_NEW:
6900 # New sub-operation: Get index of this sub-operation
6901 op_index = (
6902 len(db_nslcmop.get("_admin", {}).get("operations"))
6903 - 1
6904 )
6905 self.logger.debug(
6906 logging_text
6907 + "vnf_config_primitive={} New sub-operation".format(
6908 vnf_config_primitive
6909 )
6910 )
6911 else:
6912 # retry: Get registered params for this existing sub-operation
6913 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6914 op_index
6915 ]
6916 vnf_index = op.get("member_vnf_index")
6917 vnf_config_primitive = op.get("primitive")
6918 primitive_params = op.get("primitive_params")
6919 self.logger.debug(
6920 logging_text
6921 + "vnf_config_primitive={} Sub-operation retry".format(
6922 vnf_config_primitive
6923 )
6924 )
6925 # Execute the primitive, either with new (first-time) or registered (reintent) args
6926 ee_descriptor_id = config_primitive.get(
6927 "execution-environment-ref"
6928 )
6929 primitive_name = config_primitive.get(
6930 "execution-environment-primitive", vnf_config_primitive
6931 )
6932 ee_id, vca_type = self._look_for_deployed_vca(
6933 nsr_deployed["VCA"],
6934 member_vnf_index=vnf_index,
6935 vdu_id=None,
6936 vdu_count_index=None,
6937 ee_descriptor_id=ee_descriptor_id,
6938 )
6939 result, result_detail = await self._ns_execute_primitive(
6940 ee_id,
6941 primitive_name,
6942 primitive_params,
6943 vca_type=vca_type,
6944 vca_id=vca_id,
6945 )
6946 self.logger.debug(
6947 logging_text
6948 + "vnf_config_primitive={} Done with result {} {}".format(
6949 vnf_config_primitive, result, result_detail
6950 )
6951 )
6952 # Update operationState = COMPLETED | FAILED
6953 self._update_suboperation_status(
6954 db_nslcmop, op_index, result, result_detail
6955 )
6956
6957 if result == "FAILED":
6958 raise LcmException(result_detail)
6959 db_nsr_update["config-status"] = old_config_status
6960 scale_process = None
6961 # POST-SCALE END
6962
6963 db_nsr_update[
6964 "detailed-status"
6965 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6966 db_nsr_update["operational-status"] = (
6967 "running"
6968 if old_operational_status == "failed"
6969 else old_operational_status
6970 )
6971 db_nsr_update["config-status"] = old_config_status
6972 return
6973 except (
6974 ROclient.ROClientException,
6975 DbException,
6976 LcmException,
6977 NgRoException,
6978 ) as e:
6979 self.logger.error(logging_text + "Exit Exception {}".format(e))
6980 exc = e
6981 except asyncio.CancelledError:
6982 self.logger.error(
6983 logging_text + "Cancelled Exception while '{}'".format(step)
6984 )
6985 exc = "Operation was cancelled"
6986 except Exception as e:
6987 exc = traceback.format_exc()
6988 self.logger.critical(
6989 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6990 exc_info=True,
6991 )
6992 finally:
6993 self._write_ns_status(
6994 nsr_id=nsr_id,
6995 ns_state=None,
6996 current_operation="IDLE",
6997 current_operation_id=None,
6998 )
6999 if tasks_dict_info:
7000 stage[1] = "Waiting for instantiate pending tasks."
7001 self.logger.debug(logging_text + stage[1])
7002 exc = await self._wait_for_tasks(
7003 logging_text,
7004 tasks_dict_info,
7005 self.timeout_ns_deploy,
7006 stage,
7007 nslcmop_id,
7008 nsr_id=nsr_id,
7009 )
7010 if exc:
7011 db_nslcmop_update[
7012 "detailed-status"
7013 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7014 nslcmop_operation_state = "FAILED"
7015 if db_nsr:
7016 db_nsr_update["operational-status"] = old_operational_status
7017 db_nsr_update["config-status"] = old_config_status
7018 db_nsr_update["detailed-status"] = ""
7019 if scale_process:
7020 if "VCA" in scale_process:
7021 db_nsr_update["config-status"] = "failed"
7022 if "RO" in scale_process:
7023 db_nsr_update["operational-status"] = "failed"
7024 db_nsr_update[
7025 "detailed-status"
7026 ] = "FAILED scaling nslcmop={} {}: {}".format(
7027 nslcmop_id, step, exc
7028 )
7029 else:
7030 error_description_nslcmop = None
7031 nslcmop_operation_state = "COMPLETED"
7032 db_nslcmop_update["detailed-status"] = "Done"
7033
7034 self._write_op_status(
7035 op_id=nslcmop_id,
7036 stage="",
7037 error_message=error_description_nslcmop,
7038 operation_state=nslcmop_operation_state,
7039 other_update=db_nslcmop_update,
7040 )
7041 if db_nsr:
7042 self._write_ns_status(
7043 nsr_id=nsr_id,
7044 ns_state=None,
7045 current_operation="IDLE",
7046 current_operation_id=None,
7047 other_update=db_nsr_update,
7048 )
7049
7050 if nslcmop_operation_state:
7051 try:
7052 msg = {
7053 "nsr_id": nsr_id,
7054 "nslcmop_id": nslcmop_id,
7055 "operationState": nslcmop_operation_state,
7056 }
7057 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7058 except Exception as e:
7059 self.logger.error(
7060 logging_text + "kafka_write notification Exception {}".format(e)
7061 )
7062 self.logger.debug(logging_text + "Exit")
7063 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7064
7065 async def _scale_kdu(
7066 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7067 ):
7068 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7069 for kdu_name in _scaling_info:
7070 for kdu_scaling_info in _scaling_info[kdu_name]:
7071 deployed_kdu, index = get_deployed_kdu(
7072 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7073 )
7074 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7075 kdu_instance = deployed_kdu["kdu-instance"]
7076 kdu_model = deployed_kdu.get("kdu-model")
7077 scale = int(kdu_scaling_info["scale"])
7078 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7079
7080 db_dict = {
7081 "collection": "nsrs",
7082 "filter": {"_id": nsr_id},
7083 "path": "_admin.deployed.K8s.{}".format(index),
7084 }
7085
7086 step = "scaling application {}".format(
7087 kdu_scaling_info["resource-name"]
7088 )
7089 self.logger.debug(logging_text + step)
7090
7091 if kdu_scaling_info["type"] == "delete":
7092 kdu_config = get_configuration(db_vnfd, kdu_name)
7093 if (
7094 kdu_config
7095 and kdu_config.get("terminate-config-primitive")
7096 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7097 ):
7098 terminate_config_primitive_list = kdu_config.get(
7099 "terminate-config-primitive"
7100 )
7101 terminate_config_primitive_list.sort(
7102 key=lambda val: int(val["seq"])
7103 )
7104
7105 for (
7106 terminate_config_primitive
7107 ) in terminate_config_primitive_list:
7108 primitive_params_ = self._map_primitive_params(
7109 terminate_config_primitive, {}, {}
7110 )
7111 step = "execute terminate config primitive"
7112 self.logger.debug(logging_text + step)
7113 await asyncio.wait_for(
7114 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7115 cluster_uuid=cluster_uuid,
7116 kdu_instance=kdu_instance,
7117 primitive_name=terminate_config_primitive["name"],
7118 params=primitive_params_,
7119 db_dict=db_dict,
7120 vca_id=vca_id,
7121 ),
7122 timeout=600,
7123 )
7124
7125 await asyncio.wait_for(
7126 self.k8scluster_map[k8s_cluster_type].scale(
7127 kdu_instance,
7128 scale,
7129 kdu_scaling_info["resource-name"],
7130 vca_id=vca_id,
7131 cluster_uuid=cluster_uuid,
7132 kdu_model=kdu_model,
7133 atomic=True,
7134 db_dict=db_dict,
7135 ),
7136 timeout=self.timeout_vca_on_error,
7137 )
7138
7139 if kdu_scaling_info["type"] == "create":
7140 kdu_config = get_configuration(db_vnfd, kdu_name)
7141 if (
7142 kdu_config
7143 and kdu_config.get("initial-config-primitive")
7144 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7145 ):
7146 initial_config_primitive_list = kdu_config.get(
7147 "initial-config-primitive"
7148 )
7149 initial_config_primitive_list.sort(
7150 key=lambda val: int(val["seq"])
7151 )
7152
7153 for initial_config_primitive in initial_config_primitive_list:
7154 primitive_params_ = self._map_primitive_params(
7155 initial_config_primitive, {}, {}
7156 )
7157 step = "execute initial config primitive"
7158 self.logger.debug(logging_text + step)
7159 await asyncio.wait_for(
7160 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7161 cluster_uuid=cluster_uuid,
7162 kdu_instance=kdu_instance,
7163 primitive_name=initial_config_primitive["name"],
7164 params=primitive_params_,
7165 db_dict=db_dict,
7166 vca_id=vca_id,
7167 ),
7168 timeout=600,
7169 )
7170
7171 async def _scale_ng_ro(
7172 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7173 ):
7174 nsr_id = db_nslcmop["nsInstanceId"]
7175 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7176 db_vnfrs = {}
7177
7178 # read from db: vnfd's for every vnf
7179 db_vnfds = []
7180
7181 # for each vnf in ns, read vnfd
7182 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7183 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7184 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7185 # if we haven't this vnfd, read it from db
7186 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7187 # read from db
7188 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7189 db_vnfds.append(vnfd)
7190 n2vc_key = self.n2vc.get_public_key()
7191 n2vc_key_list = [n2vc_key]
7192 self.scale_vnfr(
7193 db_vnfr,
7194 vdu_scaling_info.get("vdu-create"),
7195 vdu_scaling_info.get("vdu-delete"),
7196 mark_delete=True,
7197 )
7198 # db_vnfr has been updated, update db_vnfrs to use it
7199 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7200 await self._instantiate_ng_ro(
7201 logging_text,
7202 nsr_id,
7203 db_nsd,
7204 db_nsr,
7205 db_nslcmop,
7206 db_vnfrs,
7207 db_vnfds,
7208 n2vc_key_list,
7209 stage=stage,
7210 start_deploy=time(),
7211 timeout_ns_deploy=self.timeout_ns_deploy,
7212 )
7213 if vdu_scaling_info.get("vdu-delete"):
7214 self.scale_vnfr(
7215 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7216 )
7217
7218 async def extract_prometheus_scrape_jobs(
7219 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7220 ):
7221 # look if exist a file called 'prometheus*.j2' and
7222 artifact_content = self.fs.dir_ls(artifact_path)
7223 job_file = next(
7224 (
7225 f
7226 for f in artifact_content
7227 if f.startswith("prometheus") and f.endswith(".j2")
7228 ),
7229 None,
7230 )
7231 if not job_file:
7232 return
7233 with self.fs.file_open((artifact_path, job_file), "r") as f:
7234 job_data = f.read()
7235
7236 # TODO get_service
7237 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7238 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7239 host_port = "80"
7240 vnfr_id = vnfr_id.replace("-", "")
7241 variables = {
7242 "JOB_NAME": vnfr_id,
7243 "TARGET_IP": target_ip,
7244 "EXPORTER_POD_IP": host_name,
7245 "EXPORTER_POD_PORT": host_port,
7246 }
7247 job_list = parse_job(job_data, variables)
7248 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7249 for job in job_list:
7250 if (
7251 not isinstance(job.get("job_name"), str)
7252 or vnfr_id not in job["job_name"]
7253 ):
7254 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7255 job["nsr_id"] = nsr_id
7256 job["vnfr_id"] = vnfr_id
7257 return job_list
7258
7259 async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
7260 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7261 self.logger.info(logging_text + "Enter")
7262 stage = ["Preparing the environment", ""]
7263 # database nsrs record
7264 db_nsr_update = {}
7265 vdu_vim_name = None
7266 vim_vm_id = None
7267 # in case of error, indicates what part of scale was failed to put nsr at error status
7268 start_deploy = time()
7269 try:
7270 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7271 vim_account_id = db_vnfr.get("vim-account-id")
7272 vim_info_key = "vim:" + vim_account_id
7273 vdu_id = additional_param["vdu_id"]
7274 vdurs = [item for item in db_vnfr["vdur"] if item["vdu-id-ref"] == vdu_id]
7275 vdur = find_in_list(
7276 vdurs, lambda vdu: vdu["count-index"] == additional_param["count-index"]
7277 )
7278 if vdur:
7279 vdu_vim_name = vdur["name"]
7280 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7281 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7282 else:
7283 raise LcmException("Target vdu is not found")
7284 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7285 # wait for any previous tasks in process
7286 stage[1] = "Waiting for previous operations to terminate"
7287 self.logger.info(stage[1])
7288 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
7289
7290 stage[1] = "Reading from database."
7291 self.logger.info(stage[1])
7292 self._write_ns_status(
7293 nsr_id=nsr_id,
7294 ns_state=None,
7295 current_operation=operation_type.upper(),
7296 current_operation_id=nslcmop_id
7297 )
7298 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7299
7300 # read from db: ns
7301 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7302 db_nsr_update["operational-status"] = operation_type
7303 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7304 # Payload for RO
7305 desc = {
7306 operation_type: {
7307 "vim_vm_id": vim_vm_id,
7308 "vnf_id": vnf_id,
7309 "vdu_index": additional_param["count-index"],
7310 "vdu_id": vdur["id"],
7311 "target_vim": target_vim,
7312 "vim_account_id": vim_account_id
7313 }
7314 }
7315 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7316 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7317 self.logger.info("ro nsr id: {}".format(nsr_id))
7318 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7319 self.logger.info("response from RO: {}".format(result_dict))
7320 action_id = result_dict["action_id"]
7321 await self._wait_ng_ro(
7322 nsr_id, action_id, nslcmop_id, start_deploy,
7323 self.timeout_operate, None, "start_stop_rebuild",
7324 )
7325 return "COMPLETED", "Done"
7326 except (ROclient.ROClientException, DbException, LcmException) as e:
7327 self.logger.error("Exit Exception {}".format(e))
7328 exc = e
7329 except asyncio.CancelledError:
7330 self.logger.error("Cancelled Exception while '{}'".format(stage))
7331 exc = "Operation was cancelled"
7332 except Exception as e:
7333 exc = traceback.format_exc()
7334 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
7335 return "FAILED", "Error in operate VNF {}".format(exc)
7336
7337 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7338 """
7339 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7340
7341 :param: vim_account_id: VIM Account ID
7342
7343 :return: (cloud_name, cloud_credential)
7344 """
7345 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7346 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7347
7348 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7349 """
7350 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7351
7352 :param: vim_account_id: VIM Account ID
7353
7354 :return: (cloud_name, cloud_credential)
7355 """
7356 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7357 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7358
7359 async def migrate(self, nsr_id, nslcmop_id):
7360 """
7361 Migrate VNFs and VDUs instances in a NS
7362
7363 :param: nsr_id: NS Instance ID
7364 :param: nslcmop_id: nslcmop ID of migrate
7365
7366 """
7367 # Try to lock HA task here
7368 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7369 if not task_is_locked_by_me:
7370 return
7371 logging_text = "Task ns={} migrate ".format(nsr_id)
7372 self.logger.debug(logging_text + "Enter")
7373 # get all needed from database
7374 db_nslcmop = None
7375 db_nslcmop_update = {}
7376 nslcmop_operation_state = None
7377 db_nsr_update = {}
7378 target = {}
7379 exc = None
7380 # in case of error, indicates what part of scale was failed to put nsr at error status
7381 start_deploy = time()
7382
7383 try:
7384 # wait for any previous tasks in process
7385 step = "Waiting for previous operations to terminate"
7386 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7387
7388 self._write_ns_status(
7389 nsr_id=nsr_id,
7390 ns_state=None,
7391 current_operation="MIGRATING",
7392 current_operation_id=nslcmop_id,
7393 )
7394 step = "Getting nslcmop from database"
7395 self.logger.debug(
7396 step + " after having waited for previous tasks to be completed"
7397 )
7398 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7399 migrate_params = db_nslcmop.get("operationParams")
7400
7401 target = {}
7402 target.update(migrate_params)
7403 desc = await self.RO.migrate(nsr_id, target)
7404 self.logger.debug("RO return > {}".format(desc))
7405 action_id = desc["action_id"]
7406 await self._wait_ng_ro(
7407 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
7408 operation="migrate"
7409 )
7410 except (ROclient.ROClientException, DbException, LcmException) as e:
7411 self.logger.error("Exit Exception {}".format(e))
7412 exc = e
7413 except asyncio.CancelledError:
7414 self.logger.error("Cancelled Exception while '{}'".format(step))
7415 exc = "Operation was cancelled"
7416 except Exception as e:
7417 exc = traceback.format_exc()
7418 self.logger.critical(
7419 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7420 )
7421 finally:
7422 self._write_ns_status(
7423 nsr_id=nsr_id,
7424 ns_state=None,
7425 current_operation="IDLE",
7426 current_operation_id=None,
7427 )
7428 if exc:
7429 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7430 nslcmop_operation_state = "FAILED"
7431 else:
7432 nslcmop_operation_state = "COMPLETED"
7433 db_nslcmop_update["detailed-status"] = "Done"
7434 db_nsr_update["detailed-status"] = "Done"
7435
7436 self._write_op_status(
7437 op_id=nslcmop_id,
7438 stage="",
7439 error_message="",
7440 operation_state=nslcmop_operation_state,
7441 other_update=db_nslcmop_update,
7442 )
7443 if nslcmop_operation_state:
7444 try:
7445 msg = {
7446 "nsr_id": nsr_id,
7447 "nslcmop_id": nslcmop_id,
7448 "operationState": nslcmop_operation_state,
7449 }
7450 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7451 except Exception as e:
7452 self.logger.error(
7453 logging_text + "kafka_write notification Exception {}".format(e)
7454 )
7455 self.logger.debug(logging_text + "Exit")
7456 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7457
7458
7459 async def heal(self, nsr_id, nslcmop_id):
7460 """
7461 Heal NS
7462
7463 :param nsr_id: ns instance to heal
7464 :param nslcmop_id: operation to run
7465 :return:
7466 """
7467
7468 # Try to lock HA task here
7469 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7470 if not task_is_locked_by_me:
7471 return
7472
7473 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7474 stage = ["", "", ""]
7475 tasks_dict_info = {}
7476 # ^ stage, step, VIM progress
7477 self.logger.debug(logging_text + "Enter")
7478 # get all needed from database
7479 db_nsr = None
7480 db_nslcmop_update = {}
7481 db_nsr_update = {}
7482 db_vnfrs = {} # vnf's info indexed by _id
7483 exc = None
7484 old_operational_status = ""
7485 old_config_status = ""
7486 nsi_id = None
7487 try:
7488 # wait for any previous tasks in process
7489 step = "Waiting for previous operations to terminate"
7490 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7491 self._write_ns_status(
7492 nsr_id=nsr_id,
7493 ns_state=None,
7494 current_operation="HEALING",
7495 current_operation_id=nslcmop_id,
7496 )
7497
7498 step = "Getting nslcmop from database"
7499 self.logger.debug(
7500 step + " after having waited for previous tasks to be completed"
7501 )
7502 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7503
7504 step = "Getting nsr from database"
7505 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7506 old_operational_status = db_nsr["operational-status"]
7507 old_config_status = db_nsr["config-status"]
7508
7509 db_nsr_update = {
7510 "_admin.deployed.RO.operational-status": "healing",
7511 }
7512 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7513
7514 step = "Sending heal order to VIM"
7515 task_ro = asyncio.ensure_future(
7516 self.heal_RO(
7517 logging_text=logging_text,
7518 nsr_id=nsr_id,
7519 db_nslcmop=db_nslcmop,
7520 stage=stage,
7521 )
7522 )
7523 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7524 tasks_dict_info[task_ro] = "Healing at VIM"
7525
7526 # VCA tasks
7527 # read from db: nsd
7528 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7529 self.logger.debug(logging_text + stage[1])
7530 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7531 self.fs.sync(db_nsr["nsd-id"])
7532 db_nsr["nsd"] = nsd
7533 # read from db: vnfr's of this ns
7534 step = "Getting vnfrs from db"
7535 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7536 for vnfr in db_vnfrs_list:
7537 db_vnfrs[vnfr["_id"]] = vnfr
7538 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7539
7540 # Check for each target VNF
7541 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7542 for target_vnf in target_list:
7543 # Find this VNF in the list from DB
7544 vnfr_id = target_vnf.get("vnfInstanceId", None)
7545 if vnfr_id:
7546 db_vnfr = db_vnfrs[vnfr_id]
7547 vnfd_id = db_vnfr.get("vnfd-id")
7548 vnfd_ref = db_vnfr.get("vnfd-ref")
7549 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7550 base_folder = vnfd["_admin"]["storage"]
7551 vdu_id = None
7552 vdu_index = 0
7553 vdu_name = None
7554 kdu_name = None
7555 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7556 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7557
7558 # Check each target VDU and deploy N2VC
7559 for target_vdu in target_vnf["additionalParams"].get("vdu", None):
7560 deploy_params_vdu = target_vdu
7561 # Set run-day1 vnf level value if not vdu level value exists
7562 if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
7563 deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
7564 vdu_name = target_vdu.get("vdu-id", None)
7565 # TODO: Get vdu_id from vdud.
7566 vdu_id = vdu_name
7567 # For multi instance VDU count-index is mandatory
7568 # For single session VDU count-indes is 0
7569 vdu_index = target_vdu.get("count-index",0)
7570
7571 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7572 stage[1] = "Deploying Execution Environments."
7573 self.logger.debug(logging_text + stage[1])
7574
7575 # VNF Level charm. Normal case when proxy charms.
7576 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7577 descriptor_config = get_configuration(vnfd, vnfd_ref)
7578 if descriptor_config:
7579 # Continue if healed machine is management machine
7580 vnf_ip_address = db_vnfr.get("ip-address")
7581 target_instance = None
7582 for instance in db_vnfr.get("vdur", None):
7583 if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
7584 target_instance = instance
7585 break
7586 if vnf_ip_address == target_instance.get("ip-address"):
7587 self._heal_n2vc(
7588 logging_text=logging_text
7589 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7590 member_vnf_index, vdu_name, vdu_index
7591 ),
7592 db_nsr=db_nsr,
7593 db_vnfr=db_vnfr,
7594 nslcmop_id=nslcmop_id,
7595 nsr_id=nsr_id,
7596 nsi_id=nsi_id,
7597 vnfd_id=vnfd_ref,
7598 vdu_id=None,
7599 kdu_name=None,
7600 member_vnf_index=member_vnf_index,
7601 vdu_index=0,
7602 vdu_name=None,
7603 deploy_params=deploy_params_vdu,
7604 descriptor_config=descriptor_config,
7605 base_folder=base_folder,
7606 task_instantiation_info=tasks_dict_info,
7607 stage=stage,
7608 )
7609
7610 # VDU Level charm. Normal case with native charms.
7611 descriptor_config = get_configuration(vnfd, vdu_name)
7612 if descriptor_config:
7613 self._heal_n2vc(
7614 logging_text=logging_text
7615 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7616 member_vnf_index, vdu_name, vdu_index
7617 ),
7618 db_nsr=db_nsr,
7619 db_vnfr=db_vnfr,
7620 nslcmop_id=nslcmop_id,
7621 nsr_id=nsr_id,
7622 nsi_id=nsi_id,
7623 vnfd_id=vnfd_ref,
7624 vdu_id=vdu_id,
7625 kdu_name=kdu_name,
7626 member_vnf_index=member_vnf_index,
7627 vdu_index=vdu_index,
7628 vdu_name=vdu_name,
7629 deploy_params=deploy_params_vdu,
7630 descriptor_config=descriptor_config,
7631 base_folder=base_folder,
7632 task_instantiation_info=tasks_dict_info,
7633 stage=stage,
7634 )
7635
7636 except (
7637 ROclient.ROClientException,
7638 DbException,
7639 LcmException,
7640 NgRoException,
7641 ) as e:
7642 self.logger.error(logging_text + "Exit Exception {}".format(e))
7643 exc = e
7644 except asyncio.CancelledError:
7645 self.logger.error(
7646 logging_text + "Cancelled Exception while '{}'".format(step)
7647 )
7648 exc = "Operation was cancelled"
7649 except Exception as e:
7650 exc = traceback.format_exc()
7651 self.logger.critical(
7652 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7653 exc_info=True,
7654 )
7655 finally:
7656 if tasks_dict_info:
7657 stage[1] = "Waiting for healing pending tasks."
7658 self.logger.debug(logging_text + stage[1])
7659 exc = await self._wait_for_tasks(
7660 logging_text,
7661 tasks_dict_info,
7662 self.timeout_ns_deploy,
7663 stage,
7664 nslcmop_id,
7665 nsr_id=nsr_id,
7666 )
7667 if exc:
7668 db_nslcmop_update[
7669 "detailed-status"
7670 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7671 nslcmop_operation_state = "FAILED"
7672 if db_nsr:
7673 db_nsr_update["operational-status"] = old_operational_status
7674 db_nsr_update["config-status"] = old_config_status
7675 db_nsr_update[
7676 "detailed-status"
7677 ] = "FAILED healing nslcmop={} {}: {}".format(
7678 nslcmop_id, step, exc
7679 )
7680 for task, task_name in tasks_dict_info.items():
7681 if not task.done() or task.cancelled() or task.exception():
7682 if task_name.startswith(self.task_name_deploy_vca):
7683 # A N2VC task is pending
7684 db_nsr_update["config-status"] = "failed"
7685 else:
7686 # RO task is pending
7687 db_nsr_update["operational-status"] = "failed"
7688 else:
7689 error_description_nslcmop = None
7690 nslcmop_operation_state = "COMPLETED"
7691 db_nslcmop_update["detailed-status"] = "Done"
7692 db_nsr_update["detailed-status"] = "Done"
7693 db_nsr_update["operational-status"] = "running"
7694 db_nsr_update["config-status"] = "configured"
7695
7696 self._write_op_status(
7697 op_id=nslcmop_id,
7698 stage="",
7699 error_message=error_description_nslcmop,
7700 operation_state=nslcmop_operation_state,
7701 other_update=db_nslcmop_update,
7702 )
7703 if db_nsr:
7704 self._write_ns_status(
7705 nsr_id=nsr_id,
7706 ns_state=None,
7707 current_operation="IDLE",
7708 current_operation_id=None,
7709 other_update=db_nsr_update,
7710 )
7711
7712 if nslcmop_operation_state:
7713 try:
7714 msg = {
7715 "nsr_id": nsr_id,
7716 "nslcmop_id": nslcmop_id,
7717 "operationState": nslcmop_operation_state,
7718 }
7719 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7720 except Exception as e:
7721 self.logger.error(
7722 logging_text + "kafka_write notification Exception {}".format(e)
7723 )
7724 self.logger.debug(logging_text + "Exit")
7725 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7726
7727 async def heal_RO(
7728 self,
7729 logging_text,
7730 nsr_id,
7731 db_nslcmop,
7732 stage,
7733 ):
7734 """
7735 Heal at RO
7736 :param logging_text: preffix text to use at logging
7737 :param nsr_id: nsr identity
7738 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7739 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7740 :return: None or exception
7741 """
7742 def get_vim_account(vim_account_id):
7743 nonlocal db_vims
7744 if vim_account_id in db_vims:
7745 return db_vims[vim_account_id]
7746 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7747 db_vims[vim_account_id] = db_vim
7748 return db_vim
7749
7750 try:
7751 start_heal = time()
7752 ns_params = db_nslcmop.get("operationParams")
7753 if ns_params and ns_params.get("timeout_ns_heal"):
7754 timeout_ns_heal = ns_params["timeout_ns_heal"]
7755 else:
7756 timeout_ns_heal = self.timeout.get(
7757 "ns_heal", self.timeout_ns_heal
7758 )
7759
7760 db_vims = {}
7761
7762 nslcmop_id = db_nslcmop["_id"]
7763 target = {
7764 "action_id": nslcmop_id,
7765 }
7766 self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
7767 target.update(db_nslcmop.get("operationParams", {}))
7768
7769 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7770 desc = await self.RO.recreate(nsr_id, target)
7771 self.logger.debug("RO return > {}".format(desc))
7772 action_id = desc["action_id"]
7773 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7774 await self._wait_ng_ro(
7775 nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
7776 operation="healing"
7777 )
7778
7779 # Updating NSR
7780 db_nsr_update = {
7781 "_admin.deployed.RO.operational-status": "running",
7782 "detailed-status": " ".join(stage),
7783 }
7784 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7785 self._write_op_status(nslcmop_id, stage)
7786 self.logger.debug(
7787 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7788 )
7789
7790 except Exception as e:
7791 stage[2] = "ERROR healing at VIM"
7792 #self.set_vnfr_at_error(db_vnfrs, str(e))
7793 self.logger.error(
7794 "Error healing at VIM {}".format(e),
7795 exc_info=not isinstance(
7796 e,
7797 (
7798 ROclient.ROClientException,
7799 LcmException,
7800 DbException,
7801 NgRoException,
7802 ),
7803 ),
7804 )
7805 raise
7806
7807 def _heal_n2vc(
7808 self,
7809 logging_text,
7810 db_nsr,
7811 db_vnfr,
7812 nslcmop_id,
7813 nsr_id,
7814 nsi_id,
7815 vnfd_id,
7816 vdu_id,
7817 kdu_name,
7818 member_vnf_index,
7819 vdu_index,
7820 vdu_name,
7821 deploy_params,
7822 descriptor_config,
7823 base_folder,
7824 task_instantiation_info,
7825 stage,
7826 ):
7827 # launch instantiate_N2VC in a asyncio task and register task object
7828 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7829 # if not found, create one entry and update database
7830 # fill db_nsr._admin.deployed.VCA.<index>
7831
7832 self.logger.debug(
7833 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7834 )
7835 if "execution-environment-list" in descriptor_config:
7836 ee_list = descriptor_config.get("execution-environment-list", [])
7837 elif "juju" in descriptor_config:
7838 ee_list = [descriptor_config] # ns charms
7839 else: # other types as script are not supported
7840 ee_list = []
7841
7842 for ee_item in ee_list:
7843 self.logger.debug(
7844 logging_text
7845 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7846 ee_item.get("juju"), ee_item.get("helm-chart")
7847 )
7848 )
7849 ee_descriptor_id = ee_item.get("id")
7850 if ee_item.get("juju"):
7851 vca_name = ee_item["juju"].get("charm")
7852 vca_type = (
7853 "lxc_proxy_charm"
7854 if ee_item["juju"].get("charm") is not None
7855 else "native_charm"
7856 )
7857 if ee_item["juju"].get("cloud") == "k8s":
7858 vca_type = "k8s_proxy_charm"
7859 elif ee_item["juju"].get("proxy") is False:
7860 vca_type = "native_charm"
7861 elif ee_item.get("helm-chart"):
7862 vca_name = ee_item["helm-chart"]
7863 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7864 vca_type = "helm"
7865 else:
7866 vca_type = "helm-v3"
7867 else:
7868 self.logger.debug(
7869 logging_text + "skipping non juju neither charm configuration"
7870 )
7871 continue
7872
7873 vca_index = -1
7874 for vca_index, vca_deployed in enumerate(
7875 db_nsr["_admin"]["deployed"]["VCA"]
7876 ):
7877 if not vca_deployed:
7878 continue
7879 if (
7880 vca_deployed.get("member-vnf-index") == member_vnf_index
7881 and vca_deployed.get("vdu_id") == vdu_id
7882 and vca_deployed.get("kdu_name") == kdu_name
7883 and vca_deployed.get("vdu_count_index", 0) == vdu_index
7884 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
7885 ):
7886 break
7887 else:
7888 # not found, create one.
7889 target = (
7890 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
7891 )
7892 if vdu_id:
7893 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
7894 elif kdu_name:
7895 target += "/kdu/{}".format(kdu_name)
7896 vca_deployed = {
7897 "target_element": target,
7898 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
7899 "member-vnf-index": member_vnf_index,
7900 "vdu_id": vdu_id,
7901 "kdu_name": kdu_name,
7902 "vdu_count_index": vdu_index,
7903 "operational-status": "init", # TODO revise
7904 "detailed-status": "", # TODO revise
7905 "step": "initial-deploy", # TODO revise
7906 "vnfd_id": vnfd_id,
7907 "vdu_name": vdu_name,
7908 "type": vca_type,
7909 "ee_descriptor_id": ee_descriptor_id,
7910 }
7911 vca_index += 1
7912
7913 # create VCA and configurationStatus in db
7914 db_dict = {
7915 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
7916 "configurationStatus.{}".format(vca_index): dict(),
7917 }
7918 self.update_db_2("nsrs", nsr_id, db_dict)
7919
7920 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
7921
7922 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
7923 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
7924 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
7925
7926 # Launch task
7927 task_n2vc = asyncio.ensure_future(
7928 self.heal_N2VC(
7929 logging_text=logging_text,
7930 vca_index=vca_index,
7931 nsi_id=nsi_id,
7932 db_nsr=db_nsr,
7933 db_vnfr=db_vnfr,
7934 vdu_id=vdu_id,
7935 kdu_name=kdu_name,
7936 vdu_index=vdu_index,
7937 deploy_params=deploy_params,
7938 config_descriptor=descriptor_config,
7939 base_folder=base_folder,
7940 nslcmop_id=nslcmop_id,
7941 stage=stage,
7942 vca_type=vca_type,
7943 vca_name=vca_name,
7944 ee_config_descriptor=ee_item,
7945 )
7946 )
7947 self.lcm_tasks.register(
7948 "ns",
7949 nsr_id,
7950 nslcmop_id,
7951 "instantiate_N2VC-{}".format(vca_index),
7952 task_n2vc,
7953 )
7954 task_instantiation_info[
7955 task_n2vc
7956 ] = self.task_name_deploy_vca + " {}.{}".format(
7957 member_vnf_index or "", vdu_id or ""
7958 )
7959
7960 async def heal_N2VC(
7961 self,
7962 logging_text,
7963 vca_index,
7964 nsi_id,
7965 db_nsr,
7966 db_vnfr,
7967 vdu_id,
7968 kdu_name,
7969 vdu_index,
7970 config_descriptor,
7971 deploy_params,
7972 base_folder,
7973 nslcmop_id,
7974 stage,
7975 vca_type,
7976 vca_name,
7977 ee_config_descriptor,
7978 ):
7979 nsr_id = db_nsr["_id"]
7980 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
7981 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
7982 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
7983 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
7984 db_dict = {
7985 "collection": "nsrs",
7986 "filter": {"_id": nsr_id},
7987 "path": db_update_entry,
7988 }
7989 step = ""
7990 try:
7991
7992 element_type = "NS"
7993 element_under_configuration = nsr_id
7994
7995 vnfr_id = None
7996 if db_vnfr:
7997 vnfr_id = db_vnfr["_id"]
7998 osm_config["osm"]["vnf_id"] = vnfr_id
7999
8000 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
8001
8002 if vca_type == "native_charm":
8003 index_number = 0
8004 else:
8005 index_number = vdu_index or 0
8006
8007 if vnfr_id:
8008 element_type = "VNF"
8009 element_under_configuration = vnfr_id
8010 namespace += ".{}-{}".format(vnfr_id, index_number)
8011 if vdu_id:
8012 namespace += ".{}-{}".format(vdu_id, index_number)
8013 element_type = "VDU"
8014 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8015 osm_config["osm"]["vdu_id"] = vdu_id
8016 elif kdu_name:
8017 namespace += ".{}".format(kdu_name)
8018 element_type = "KDU"
8019 element_under_configuration = kdu_name
8020 osm_config["osm"]["kdu_name"] = kdu_name
8021
8022 # Get artifact path
8023 if base_folder["pkg-dir"]:
8024 artifact_path = "{}/{}/{}/{}".format(
8025 base_folder["folder"],
8026 base_folder["pkg-dir"],
8027 "charms"
8028 if vca_type
8029 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8030 else "helm-charts",
8031 vca_name,
8032 )
8033 else:
8034 artifact_path = "{}/Scripts/{}/{}/".format(
8035 base_folder["folder"],
8036 "charms"
8037 if vca_type
8038 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8039 else "helm-charts",
8040 vca_name,
8041 )
8042
8043 self.logger.debug("Artifact path > {}".format(artifact_path))
8044
8045 # get initial_config_primitive_list that applies to this element
8046 initial_config_primitive_list = config_descriptor.get(
8047 "initial-config-primitive"
8048 )
8049
8050 self.logger.debug(
8051 "Initial config primitive list > {}".format(
8052 initial_config_primitive_list
8053 )
8054 )
8055
8056 # add config if not present for NS charm
8057 ee_descriptor_id = ee_config_descriptor.get("id")
8058 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8059 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8060 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8061 )
8062
8063 self.logger.debug(
8064 "Initial config primitive list #2 > {}".format(
8065 initial_config_primitive_list
8066 )
8067 )
8068 # n2vc_redesign STEP 3.1
8069 # find old ee_id if exists
8070 ee_id = vca_deployed.get("ee_id")
8071
8072 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8073 # create or register execution environment in VCA. Only for native charms when healing
8074 if vca_type == "native_charm":
8075 step = "Waiting to VM being up and getting IP address"
8076 self.logger.debug(logging_text + step)
8077 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8078 logging_text,
8079 nsr_id,
8080 vnfr_id,
8081 vdu_id,
8082 vdu_index,
8083 user=None,
8084 pub_key=None,
8085 )
8086 credentials = {"hostname": rw_mgmt_ip}
8087 # get username
8088 username = deep_get(
8089 config_descriptor, ("config-access", "ssh-access", "default-user")
8090 )
8091 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8092 # merged. Meanwhile let's get username from initial-config-primitive
8093 if not username and initial_config_primitive_list:
8094 for config_primitive in initial_config_primitive_list:
8095 for param in config_primitive.get("parameter", ()):
8096 if param["name"] == "ssh-username":
8097 username = param["value"]
8098 break
8099 if not username:
8100 raise LcmException(
8101 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8102 "'config-access.ssh-access.default-user'"
8103 )
8104 credentials["username"] = username
8105
8106 # n2vc_redesign STEP 3.2
8107 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8108 self._write_configuration_status(
8109 nsr_id=nsr_id,
8110 vca_index=vca_index,
8111 status="REGISTERING",
8112 element_under_configuration=element_under_configuration,
8113 element_type=element_type,
8114 )
8115
8116 step = "register execution environment {}".format(credentials)
8117 self.logger.debug(logging_text + step)
8118 ee_id = await self.vca_map[vca_type].register_execution_environment(
8119 credentials=credentials,
8120 namespace=namespace,
8121 db_dict=db_dict,
8122 vca_id=vca_id,
8123 )
8124
8125 # update ee_id en db
8126 db_dict_ee_id = {
8127 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8128 }
8129 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8130
8131 # for compatibility with MON/POL modules, the need model and application name at database
8132 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8133 # Not sure if this need to be done when healing
8134 """
8135 ee_id_parts = ee_id.split(".")
8136 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8137 if len(ee_id_parts) >= 2:
8138 model_name = ee_id_parts[0]
8139 application_name = ee_id_parts[1]
8140 db_nsr_update[db_update_entry + "model"] = model_name
8141 db_nsr_update[db_update_entry + "application"] = application_name
8142 """
8143
8144 # n2vc_redesign STEP 3.3
8145 # Install configuration software. Only for native charms.
8146 step = "Install configuration Software"
8147
8148 self._write_configuration_status(
8149 nsr_id=nsr_id,
8150 vca_index=vca_index,
8151 status="INSTALLING SW",
8152 element_under_configuration=element_under_configuration,
8153 element_type=element_type,
8154 #other_update=db_nsr_update,
8155 other_update=None,
8156 )
8157
8158 # TODO check if already done
8159 self.logger.debug(logging_text + step)
8160 config = None
8161 if vca_type == "native_charm":
8162 config_primitive = next(
8163 (p for p in initial_config_primitive_list if p["name"] == "config"),
8164 None,
8165 )
8166 if config_primitive:
8167 config = self._map_primitive_params(
8168 config_primitive, {}, deploy_params
8169 )
8170 await self.vca_map[vca_type].install_configuration_sw(
8171 ee_id=ee_id,
8172 artifact_path=artifact_path,
8173 db_dict=db_dict,
8174 config=config,
8175 num_units=1,
8176 vca_id=vca_id,
8177 vca_type=vca_type,
8178 )
8179
8180 # write in db flag of configuration_sw already installed
8181 self.update_db_2(
8182 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8183 )
8184
8185 # Not sure if this need to be done when healing
8186 """
8187 # add relations for this VCA (wait for other peers related with this VCA)
8188 await self._add_vca_relations(
8189 logging_text=logging_text,
8190 nsr_id=nsr_id,
8191 vca_type=vca_type,
8192 vca_index=vca_index,
8193 )
8194 """
8195
8196 # if SSH access is required, then get execution environment SSH public
8197 # if native charm we have waited already to VM be UP
8198 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8199 pub_key = None
8200 user = None
8201 # self.logger.debug("get ssh key block")
8202 if deep_get(
8203 config_descriptor, ("config-access", "ssh-access", "required")
8204 ):
8205 # self.logger.debug("ssh key needed")
8206 # Needed to inject a ssh key
8207 user = deep_get(
8208 config_descriptor,
8209 ("config-access", "ssh-access", "default-user"),
8210 )
8211 step = "Install configuration Software, getting public ssh key"
8212 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8213 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8214 )
8215
8216 step = "Insert public key into VM user={} ssh_key={}".format(
8217 user, pub_key
8218 )
8219 else:
8220 # self.logger.debug("no need to get ssh key")
8221 step = "Waiting to VM being up and getting IP address"
8222 self.logger.debug(logging_text + step)
8223
8224 # n2vc_redesign STEP 5.1
8225 # wait for RO (ip-address) Insert pub_key into VM
8226 # IMPORTANT: We need do wait for RO to complete healing operation.
8227 await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
8228 if vnfr_id:
8229 if kdu_name:
8230 rw_mgmt_ip = await self.wait_kdu_up(
8231 logging_text, nsr_id, vnfr_id, kdu_name
8232 )
8233 else:
8234 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8235 logging_text,
8236 nsr_id,
8237 vnfr_id,
8238 vdu_id,
8239 vdu_index,
8240 user=user,
8241 pub_key=pub_key,
8242 )
8243 else:
8244 rw_mgmt_ip = None # This is for a NS configuration
8245
8246 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8247
8248 # store rw_mgmt_ip in deploy params for later replacement
8249 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8250
8251 # Day1 operations.
8252 # get run-day1 operation parameter
8253 runDay1 = deploy_params.get("run-day1",False)
8254 self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
8255 if runDay1:
8256 # n2vc_redesign STEP 6 Execute initial config primitive
8257 step = "execute initial config primitive"
8258
8259 # wait for dependent primitives execution (NS -> VNF -> VDU)
8260 if initial_config_primitive_list:
8261 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
8262
8263 # stage, in function of element type: vdu, kdu, vnf or ns
8264 my_vca = vca_deployed_list[vca_index]
8265 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8266 # VDU or KDU
8267 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8268 elif my_vca.get("member-vnf-index"):
8269 # VNF
8270 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8271 else:
8272 # NS
8273 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8274
8275 self._write_configuration_status(
8276 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8277 )
8278
8279 self._write_op_status(op_id=nslcmop_id, stage=stage)
8280
8281 check_if_terminated_needed = True
8282 for initial_config_primitive in initial_config_primitive_list:
8283 # adding information on the vca_deployed if it is a NS execution environment
8284 if not vca_deployed["member-vnf-index"]:
8285 deploy_params["ns_config_info"] = json.dumps(
8286 self._get_ns_config_info(nsr_id)
8287 )
8288 # TODO check if already done
8289 primitive_params_ = self._map_primitive_params(
8290 initial_config_primitive, {}, deploy_params
8291 )
8292
8293 step = "execute primitive '{}' params '{}'".format(
8294 initial_config_primitive["name"], primitive_params_
8295 )
8296 self.logger.debug(logging_text + step)
8297 await self.vca_map[vca_type].exec_primitive(
8298 ee_id=ee_id,
8299 primitive_name=initial_config_primitive["name"],
8300 params_dict=primitive_params_,
8301 db_dict=db_dict,
8302 vca_id=vca_id,
8303 vca_type=vca_type,
8304 )
8305 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8306 if check_if_terminated_needed:
8307 if config_descriptor.get("terminate-config-primitive"):
8308 self.update_db_2(
8309 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
8310 )
8311 check_if_terminated_needed = False
8312
8313 # TODO register in database that primitive is done
8314
8315 # STEP 7 Configure metrics
8316 # Not sure if this need to be done when healing
8317 """
8318 if vca_type == "helm" or vca_type == "helm-v3":
8319 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8320 ee_id=ee_id,
8321 artifact_path=artifact_path,
8322 ee_config_descriptor=ee_config_descriptor,
8323 vnfr_id=vnfr_id,
8324 nsr_id=nsr_id,
8325 target_ip=rw_mgmt_ip,
8326 )
8327 if prometheus_jobs:
8328 self.update_db_2(
8329 "nsrs",
8330 nsr_id,
8331 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8332 )
8333
8334 for job in prometheus_jobs:
8335 self.db.set_one(
8336 "prometheus_jobs",
8337 {"job_name": job["job_name"]},
8338 job,
8339 upsert=True,
8340 fail_on_empty=False,
8341 )
8342
8343 """
8344 step = "instantiated at VCA"
8345 self.logger.debug(logging_text + step)
8346
8347 self._write_configuration_status(
8348 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8349 )
8350
8351 except Exception as e: # TODO not use Exception but N2VC exception
8352 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8353 if not isinstance(
8354 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8355 ):
8356 self.logger.error(
8357 "Exception while {} : {}".format(step, e), exc_info=True
8358 )
8359 self._write_configuration_status(
8360 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8361 )
8362 raise LcmException("{} {}".format(step, e)) from e
8363
8364 async def _wait_heal_ro(
8365 self,
8366 nsr_id,
8367 timeout=600,
8368 ):
8369 start_time = time()
8370 while time() <= start_time + timeout:
8371 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8372 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
8373 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8374 if operational_status_ro != "healing":
8375 break
8376 await asyncio.sleep(15, loop=self.loop)
8377 else: # timeout_ns_deploy
8378 raise NgRoException("Timeout waiting ns to deploy")
8379
8380 async def vertical_scale(self, nsr_id, nslcmop_id):
8381 """
8382 Vertical Scale the VDUs in a NS
8383
8384 :param: nsr_id: NS Instance ID
8385 :param: nslcmop_id: nslcmop ID of migrate
8386
8387 """
8388 # Try to lock HA task here
8389 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8390 if not task_is_locked_by_me:
8391 return
8392 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8393 self.logger.debug(logging_text + "Enter")
8394 # get all needed from database
8395 db_nslcmop = None
8396 db_nslcmop_update = {}
8397 nslcmop_operation_state = None
8398 db_nsr_update = {}
8399 target = {}
8400 exc = None
8401 # in case of error, indicates what part of scale was failed to put nsr at error status
8402 start_deploy = time()
8403
8404 try:
8405 # wait for any previous tasks in process
8406 step = "Waiting for previous operations to terminate"
8407 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
8408
8409 self._write_ns_status(
8410 nsr_id=nsr_id,
8411 ns_state=None,
8412 current_operation="VerticalScale",
8413 current_operation_id=nslcmop_id
8414 )
8415 step = "Getting nslcmop from database"
8416 self.logger.debug(step + " after having waited for previous tasks to be completed")
8417 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8418 operationParams = db_nslcmop.get("operationParams")
8419 target = {}
8420 target.update(operationParams)
8421 desc = await self.RO.vertical_scale(nsr_id, target)
8422 self.logger.debug("RO return > {}".format(desc))
8423 action_id = desc["action_id"]
8424 await self._wait_ng_ro(
8425 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_verticalscale,
8426 operation="verticalscale"
8427 )
8428 except (ROclient.ROClientException, DbException, LcmException) as e:
8429 self.logger.error("Exit Exception {}".format(e))
8430 exc = e
8431 except asyncio.CancelledError:
8432 self.logger.error("Cancelled Exception while '{}'".format(step))
8433 exc = "Operation was cancelled"
8434 except Exception as e:
8435 exc = traceback.format_exc()
8436 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
8437 finally:
8438 self._write_ns_status(
8439 nsr_id=nsr_id,
8440 ns_state=None,
8441 current_operation="IDLE",
8442 current_operation_id=None,
8443 )
8444 if exc:
8445 db_nslcmop_update[
8446 "detailed-status"
8447 ] = "FAILED {}: {}".format(step, exc)
8448 nslcmop_operation_state = "FAILED"
8449 else:
8450 nslcmop_operation_state = "COMPLETED"
8451 db_nslcmop_update["detailed-status"] = "Done"
8452 db_nsr_update["detailed-status"] = "Done"
8453
8454 self._write_op_status(
8455 op_id=nslcmop_id,
8456 stage="",
8457 error_message="",
8458 operation_state=nslcmop_operation_state,
8459 other_update=db_nslcmop_update,
8460 )
8461 if nslcmop_operation_state:
8462 try:
8463 msg = {
8464 "nsr_id": nsr_id,
8465 "nslcmop_id": nslcmop_id,
8466 "operationState": nslcmop_operation_state,
8467 }
8468 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8469 except Exception as e:
8470 self.logger.error(
8471 logging_text + "kafka_write notification Exception {}".format(e)
8472 )
8473 self.logger.debug(logging_text + "Exit")
8474 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")