2100 Bug fix: added op_status_map key value and in _wait_ng_ro passing operation...
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import shutil
21 from typing import Any, Dict, List
22 import yaml
23 import logging
24 import logging.handlers
25 import traceback
26 import json
27 from jinja2 import (
28 Environment,
29 TemplateError,
30 TemplateNotFound,
31 StrictUndefined,
32 UndefinedError,
33 )
34
35 from osm_lcm import ROclient
36 from osm_lcm.data_utils.nsr import (
37 get_deployed_kdu,
38 get_deployed_vca,
39 get_deployed_vca_list,
40 get_nsd,
41 )
42 from osm_lcm.data_utils.vca import (
43 DeployedComponent,
44 DeployedK8sResource,
45 DeployedVCA,
46 EELevel,
47 Relation,
48 EERelation,
49 safe_get_ee_relation,
50 )
51 from osm_lcm.ng_ro import NgRoClient, NgRoException
52 from osm_lcm.lcm_utils import (
53 LcmException,
54 LcmExceptionNoMgmtIP,
55 LcmBase,
56 deep_get,
57 get_iterable,
58 populate_dict,
59 check_juju_bundle_existence,
60 get_charm_artifact_path,
61 )
62 from osm_lcm.data_utils.nsd import (
63 get_ns_configuration_relation_list,
64 get_vnf_profile,
65 get_vnf_profiles,
66 )
67 from osm_lcm.data_utils.vnfd import (
68 get_kdu,
69 get_kdu_services,
70 get_relation_list,
71 get_vdu_list,
72 get_vdu_profile,
73 get_ee_sorted_initial_config_primitive_list,
74 get_ee_sorted_terminate_config_primitive_list,
75 get_kdu_list,
76 get_virtual_link_profiles,
77 get_vdu,
78 get_configuration,
79 get_vdu_index,
80 get_scaling_aspect,
81 get_number_of_instances,
82 get_juju_ee_ref,
83 get_kdu_resource_profile,
84 find_software_version,
85 )
86 from osm_lcm.data_utils.list_utils import find_in_list
87 from osm_lcm.data_utils.vnfr import (
88 get_osm_params,
89 get_vdur_index,
90 get_kdur,
91 get_volumes_from_instantiation_params,
92 )
93 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
94 from osm_lcm.data_utils.database.vim_account import VimAccountDB
95 from n2vc.definitions import RelationEndpoint
96 from n2vc.k8s_helm_conn import K8sHelmConnector
97 from n2vc.k8s_helm3_conn import K8sHelm3Connector
98 from n2vc.k8s_juju_conn import K8sJujuConnector
99
100 from osm_common.dbbase import DbException
101 from osm_common.fsbase import FsException
102
103 from osm_lcm.data_utils.database.database import Database
104 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
105
106 from n2vc.n2vc_juju_conn import N2VCJujuConnector
107 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
108
109 from osm_lcm.lcm_helm_conn import LCMHelmConn
110 from osm_lcm.osm_config import OsmConfigBuilder
111 from osm_lcm.prometheus import parse_job
112
113 from copy import copy, deepcopy
114 from time import time
115 from uuid import uuid4
116
117 from random import randint
118
119 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
120
121
122 class NsLcm(LcmBase):
123 timeout_vca_on_error = (
124 5 * 60
125 ) # Time for charm from first time at blocked,error status to mark as failed
126 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
127 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
128 timeout_ns_heal = 1800 # default global timeout for un deployment a ns
129 timeout_charm_delete = 10 * 60
130 timeout_primitive = 30 * 60 # timeout for primitive execution
131 timeout_ns_update = 30 * 60 # timeout for ns update
132 timeout_progress_primitive = (
133 10 * 60
134 ) # timeout for some progress in a primitive execution
135 timeout_migrate = 1800 # default global timeout for migrating vnfs
136 timeout_operate = 1800 # default global timeout for migrating vnfs
137 timeout_verticalscale = 1800 # default global timeout for Vertical Sclaing
138 SUBOPERATION_STATUS_NOT_FOUND = -1
139 SUBOPERATION_STATUS_NEW = -2
140 SUBOPERATION_STATUS_SKIP = -3
141 task_name_deploy_vca = "Deploying VCA"
142
143 def __init__(self, msg, lcm_tasks, config, loop):
144 """
145 Init, Connect to database, filesystem storage, and messaging
146 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
147 :return: None
148 """
149 super().__init__(msg=msg, logger=logging.getLogger("lcm.ns"))
150
151 self.db = Database().instance.db
152 self.fs = Filesystem().instance.fs
153 self.loop = loop
154 self.lcm_tasks = lcm_tasks
155 self.timeout = config["timeout"]
156 self.ro_config = config["ro_config"]
157 self.ng_ro = config["ro_config"].get("ng")
158 self.vca_config = config["VCA"].copy()
159
160 # create N2VC connector
161 self.n2vc = N2VCJujuConnector(
162 log=self.logger,
163 loop=self.loop,
164 on_update_db=self._on_update_n2vc_db,
165 fs=self.fs,
166 db=self.db,
167 )
168
169 self.conn_helm_ee = LCMHelmConn(
170 log=self.logger,
171 loop=self.loop,
172 vca_config=self.vca_config,
173 on_update_db=self._on_update_n2vc_db,
174 )
175
176 self.k8sclusterhelm2 = K8sHelmConnector(
177 kubectl_command=self.vca_config.get("kubectlpath"),
178 helm_command=self.vca_config.get("helmpath"),
179 log=self.logger,
180 on_update_db=None,
181 fs=self.fs,
182 db=self.db,
183 )
184
185 self.k8sclusterhelm3 = K8sHelm3Connector(
186 kubectl_command=self.vca_config.get("kubectlpath"),
187 helm_command=self.vca_config.get("helm3path"),
188 fs=self.fs,
189 log=self.logger,
190 db=self.db,
191 on_update_db=None,
192 )
193
194 self.k8sclusterjuju = K8sJujuConnector(
195 kubectl_command=self.vca_config.get("kubectlpath"),
196 juju_command=self.vca_config.get("jujupath"),
197 log=self.logger,
198 loop=self.loop,
199 on_update_db=self._on_update_k8s_db,
200 fs=self.fs,
201 db=self.db,
202 )
203
204 self.k8scluster_map = {
205 "helm-chart": self.k8sclusterhelm2,
206 "helm-chart-v3": self.k8sclusterhelm3,
207 "chart": self.k8sclusterhelm3,
208 "juju-bundle": self.k8sclusterjuju,
209 "juju": self.k8sclusterjuju,
210 }
211
212 self.vca_map = {
213 "lxc_proxy_charm": self.n2vc,
214 "native_charm": self.n2vc,
215 "k8s_proxy_charm": self.n2vc,
216 "helm": self.conn_helm_ee,
217 "helm-v3": self.conn_helm_ee,
218 }
219
220 # create RO client
221 self.RO = NgRoClient(self.loop, **self.ro_config)
222
223 self.op_status_map = {
224 "instantiation": self.RO.status,
225 "termination": self.RO.status,
226 "migrate": self.RO.status,
227 "healing": self.RO.recreate_status,
228 "verticalscale": self.RO.status,
229 "start_stop_rebuild": self.RO.status,
230 }
231
232 @staticmethod
233 def increment_ip_mac(ip_mac, vm_index=1):
234 if not isinstance(ip_mac, str):
235 return ip_mac
236 try:
237 # try with ipv4 look for last dot
238 i = ip_mac.rfind(".")
239 if i > 0:
240 i += 1
241 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
242 # try with ipv6 or mac look for last colon. Operate in hex
243 i = ip_mac.rfind(":")
244 if i > 0:
245 i += 1
246 # format in hex, len can be 2 for mac or 4 for ipv6
247 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(
248 ip_mac[:i], int(ip_mac[i:], 16) + vm_index
249 )
250 except Exception:
251 pass
252 return None
253
254 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
255
256 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
257
258 try:
259 # TODO filter RO descriptor fields...
260
261 # write to database
262 db_dict = dict()
263 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
264 db_dict["deploymentStatus"] = ro_descriptor
265 self.update_db_2("nsrs", nsrs_id, db_dict)
266
267 except Exception as e:
268 self.logger.warn(
269 "Cannot write database RO deployment for ns={} -> {}".format(nsrs_id, e)
270 )
271
272 async def _on_update_n2vc_db(self, table, filter, path, updated_data, vca_id=None):
273
274 # remove last dot from path (if exists)
275 if path.endswith("."):
276 path = path[:-1]
277
278 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
279 # .format(table, filter, path, updated_data))
280 try:
281
282 nsr_id = filter.get("_id")
283
284 # read ns record from database
285 nsr = self.db.get_one(table="nsrs", q_filter=filter)
286 current_ns_status = nsr.get("nsState")
287
288 # get vca status for NS
289 status_dict = await self.n2vc.get_status(
290 namespace="." + nsr_id, yaml_format=False, vca_id=vca_id
291 )
292
293 # vcaStatus
294 db_dict = dict()
295 db_dict["vcaStatus"] = status_dict
296
297 # update configurationStatus for this VCA
298 try:
299 vca_index = int(path[path.rfind(".") + 1 :])
300
301 vca_list = deep_get(
302 target_dict=nsr, key_list=("_admin", "deployed", "VCA")
303 )
304 vca_status = vca_list[vca_index].get("status")
305
306 configuration_status_list = nsr.get("configurationStatus")
307 config_status = configuration_status_list[vca_index].get("status")
308
309 if config_status == "BROKEN" and vca_status != "failed":
310 db_dict["configurationStatus"][vca_index] = "READY"
311 elif config_status != "BROKEN" and vca_status == "failed":
312 db_dict["configurationStatus"][vca_index] = "BROKEN"
313 except Exception as e:
314 # not update configurationStatus
315 self.logger.debug("Error updating vca_index (ignore): {}".format(e))
316
317 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
318 # if nsState = 'DEGRADED' check if all is OK
319 is_degraded = False
320 if current_ns_status in ("READY", "DEGRADED"):
321 error_description = ""
322 # check machines
323 if status_dict.get("machines"):
324 for machine_id in status_dict.get("machines"):
325 machine = status_dict.get("machines").get(machine_id)
326 # check machine agent-status
327 if machine.get("agent-status"):
328 s = machine.get("agent-status").get("status")
329 if s != "started":
330 is_degraded = True
331 error_description += (
332 "machine {} agent-status={} ; ".format(
333 machine_id, s
334 )
335 )
336 # check machine instance status
337 if machine.get("instance-status"):
338 s = machine.get("instance-status").get("status")
339 if s != "running":
340 is_degraded = True
341 error_description += (
342 "machine {} instance-status={} ; ".format(
343 machine_id, s
344 )
345 )
346 # check applications
347 if status_dict.get("applications"):
348 for app_id in status_dict.get("applications"):
349 app = status_dict.get("applications").get(app_id)
350 # check application status
351 if app.get("status"):
352 s = app.get("status").get("status")
353 if s != "active":
354 is_degraded = True
355 error_description += (
356 "application {} status={} ; ".format(app_id, s)
357 )
358
359 if error_description:
360 db_dict["errorDescription"] = error_description
361 if current_ns_status == "READY" and is_degraded:
362 db_dict["nsState"] = "DEGRADED"
363 if current_ns_status == "DEGRADED" and not is_degraded:
364 db_dict["nsState"] = "READY"
365
366 # write to database
367 self.update_db_2("nsrs", nsr_id, db_dict)
368
369 except (asyncio.CancelledError, asyncio.TimeoutError):
370 raise
371 except Exception as e:
372 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
373
374 async def _on_update_k8s_db(
375 self, cluster_uuid, kdu_instance, filter=None, vca_id=None, cluster_type="juju"
376 ):
377 """
378 Updating vca status in NSR record
379 :param cluster_uuid: UUID of a k8s cluster
380 :param kdu_instance: The unique name of the KDU instance
381 :param filter: To get nsr_id
382 :cluster_type: The cluster type (juju, k8s)
383 :return: none
384 """
385
386 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
387 # .format(cluster_uuid, kdu_instance, filter))
388
389 nsr_id = filter.get("_id")
390 try:
391 vca_status = await self.k8scluster_map[cluster_type].status_kdu(
392 cluster_uuid=cluster_uuid,
393 kdu_instance=kdu_instance,
394 yaml_format=False,
395 complete_status=True,
396 vca_id=vca_id,
397 )
398
399 # vcaStatus
400 db_dict = dict()
401 db_dict["vcaStatus"] = {nsr_id: vca_status}
402
403 self.logger.debug(
404 f"Obtained VCA status for cluster type '{cluster_type}': {vca_status}"
405 )
406
407 # write to database
408 self.update_db_2("nsrs", nsr_id, db_dict)
409 except (asyncio.CancelledError, asyncio.TimeoutError):
410 raise
411 except Exception as e:
412 self.logger.warn("Error updating NS state for ns={}: {}".format(nsr_id, e))
413
414 @staticmethod
415 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
416 try:
417 env = Environment(undefined=StrictUndefined)
418 template = env.from_string(cloud_init_text)
419 return template.render(additional_params or {})
420 except UndefinedError as e:
421 raise LcmException(
422 "Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
423 "file, must be provided in the instantiation parameters inside the "
424 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id)
425 )
426 except (TemplateError, TemplateNotFound) as e:
427 raise LcmException(
428 "Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".format(
429 vnfd_id, vdu_id, e
430 )
431 )
432
433 def _get_vdu_cloud_init_content(self, vdu, vnfd):
434 cloud_init_content = cloud_init_file = None
435 try:
436 if vdu.get("cloud-init-file"):
437 base_folder = vnfd["_admin"]["storage"]
438 if base_folder["pkg-dir"]:
439 cloud_init_file = "{}/{}/cloud_init/{}".format(
440 base_folder["folder"],
441 base_folder["pkg-dir"],
442 vdu["cloud-init-file"],
443 )
444 else:
445 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
446 base_folder["folder"],
447 vdu["cloud-init-file"],
448 )
449 with self.fs.file_open(cloud_init_file, "r") as ci_file:
450 cloud_init_content = ci_file.read()
451 elif vdu.get("cloud-init"):
452 cloud_init_content = vdu["cloud-init"]
453
454 return cloud_init_content
455 except FsException as e:
456 raise LcmException(
457 "Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".format(
458 vnfd["id"], vdu["id"], cloud_init_file, e
459 )
460 )
461
462 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
463 vdur = next(
464 (vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"]), {}
465 )
466 additional_params = vdur.get("additionalParams")
467 return parse_yaml_strings(additional_params)
468
469 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
470 """
471 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
472 :param vnfd: input vnfd
473 :param new_id: overrides vnf id if provided
474 :param additionalParams: Instantiation params for VNFs provided
475 :param nsrId: Id of the NSR
476 :return: copy of vnfd
477 """
478 vnfd_RO = deepcopy(vnfd)
479 # remove unused by RO configuration, monitoring, scaling and internal keys
480 vnfd_RO.pop("_id", None)
481 vnfd_RO.pop("_admin", None)
482 vnfd_RO.pop("monitoring-param", None)
483 vnfd_RO.pop("scaling-group-descriptor", None)
484 vnfd_RO.pop("kdu", None)
485 vnfd_RO.pop("k8s-cluster", None)
486 if new_id:
487 vnfd_RO["id"] = new_id
488
489 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
490 for vdu in get_iterable(vnfd_RO, "vdu"):
491 vdu.pop("cloud-init-file", None)
492 vdu.pop("cloud-init", None)
493 return vnfd_RO
494
495 @staticmethod
496 def ip_profile_2_RO(ip_profile):
497 RO_ip_profile = deepcopy(ip_profile)
498 if "dns-server" in RO_ip_profile:
499 if isinstance(RO_ip_profile["dns-server"], list):
500 RO_ip_profile["dns-address"] = []
501 for ds in RO_ip_profile.pop("dns-server"):
502 RO_ip_profile["dns-address"].append(ds["address"])
503 else:
504 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
505 if RO_ip_profile.get("ip-version") == "ipv4":
506 RO_ip_profile["ip-version"] = "IPv4"
507 if RO_ip_profile.get("ip-version") == "ipv6":
508 RO_ip_profile["ip-version"] = "IPv6"
509 if "dhcp-params" in RO_ip_profile:
510 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
511 return RO_ip_profile
512
513 def _get_ro_vim_id_for_vim_account(self, vim_account):
514 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
515 if db_vim["_admin"]["operationalState"] != "ENABLED":
516 raise LcmException(
517 "VIM={} is not available. operationalState={}".format(
518 vim_account, db_vim["_admin"]["operationalState"]
519 )
520 )
521 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
522 return RO_vim_id
523
524 def get_ro_wim_id_for_wim_account(self, wim_account):
525 if isinstance(wim_account, str):
526 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
527 if db_wim["_admin"]["operationalState"] != "ENABLED":
528 raise LcmException(
529 "WIM={} is not available. operationalState={}".format(
530 wim_account, db_wim["_admin"]["operationalState"]
531 )
532 )
533 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
534 return RO_wim_id
535 else:
536 return wim_account
537
538 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
539
540 db_vdu_push_list = []
541 template_vdur = []
542 db_update = {"_admin.modified": time()}
543 if vdu_create:
544 for vdu_id, vdu_count in vdu_create.items():
545 vdur = next(
546 (
547 vdur
548 for vdur in reversed(db_vnfr["vdur"])
549 if vdur["vdu-id-ref"] == vdu_id
550 ),
551 None,
552 )
553 if not vdur:
554 # Read the template saved in the db:
555 self.logger.debug(
556 "No vdur in the database. Using the vdur-template to scale"
557 )
558 vdur_template = db_vnfr.get("vdur-template")
559 if not vdur_template:
560 raise LcmException(
561 "Error scaling OUT VNFR for {}. No vnfr or template exists".format(
562 vdu_id
563 )
564 )
565 vdur = vdur_template[0]
566 # Delete a template from the database after using it
567 self.db.set_one(
568 "vnfrs",
569 {"_id": db_vnfr["_id"]},
570 None,
571 pull={"vdur-template": {"_id": vdur["_id"]}},
572 )
573 for count in range(vdu_count):
574 vdur_copy = deepcopy(vdur)
575 vdur_copy["status"] = "BUILD"
576 vdur_copy["status-detailed"] = None
577 vdur_copy["ip-address"] = None
578 vdur_copy["_id"] = str(uuid4())
579 vdur_copy["count-index"] += count + 1
580 vdur_copy["id"] = "{}-{}".format(
581 vdur_copy["vdu-id-ref"], vdur_copy["count-index"]
582 )
583 vdur_copy.pop("vim_info", None)
584 for iface in vdur_copy["interfaces"]:
585 if iface.get("fixed-ip"):
586 iface["ip-address"] = self.increment_ip_mac(
587 iface["ip-address"], count + 1
588 )
589 else:
590 iface.pop("ip-address", None)
591 if iface.get("fixed-mac"):
592 iface["mac-address"] = self.increment_ip_mac(
593 iface["mac-address"], count + 1
594 )
595 else:
596 iface.pop("mac-address", None)
597 if db_vnfr["vdur"]:
598 iface.pop(
599 "mgmt_vnf", None
600 ) # only first vdu can be managment of vnf
601 db_vdu_push_list.append(vdur_copy)
602 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
603 if vdu_delete:
604 if len(db_vnfr["vdur"]) == 1:
605 # The scale will move to 0 instances
606 self.logger.debug(
607 "Scaling to 0 !, creating the template with the last vdur"
608 )
609 template_vdur = [db_vnfr["vdur"][0]]
610 for vdu_id, vdu_count in vdu_delete.items():
611 if mark_delete:
612 indexes_to_delete = [
613 iv[0]
614 for iv in enumerate(db_vnfr["vdur"])
615 if iv[1]["vdu-id-ref"] == vdu_id
616 ]
617 db_update.update(
618 {
619 "vdur.{}.status".format(i): "DELETING"
620 for i in indexes_to_delete[-vdu_count:]
621 }
622 )
623 else:
624 # it must be deleted one by one because common.db does not allow otherwise
625 vdus_to_delete = [
626 v
627 for v in reversed(db_vnfr["vdur"])
628 if v["vdu-id-ref"] == vdu_id
629 ]
630 for vdu in vdus_to_delete[:vdu_count]:
631 self.db.set_one(
632 "vnfrs",
633 {"_id": db_vnfr["_id"]},
634 None,
635 pull={"vdur": {"_id": vdu["_id"]}},
636 )
637 db_push = {}
638 if db_vdu_push_list:
639 db_push["vdur"] = db_vdu_push_list
640 if template_vdur:
641 db_push["vdur-template"] = template_vdur
642 if not db_push:
643 db_push = None
644 db_vnfr["vdur-template"] = template_vdur
645 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
646 # modify passed dictionary db_vnfr
647 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
648 db_vnfr["vdur"] = db_vnfr_["vdur"]
649
650 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
651 """
652 Updates database nsr with the RO info for the created vld
653 :param ns_update_nsr: dictionary to be filled with the updated info
654 :param db_nsr: content of db_nsr. This is also modified
655 :param nsr_desc_RO: nsr descriptor from RO
656 :return: Nothing, LcmException is raised on errors
657 """
658
659 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
660 for net_RO in get_iterable(nsr_desc_RO, "nets"):
661 if vld["id"] != net_RO.get("ns_net_osm_id"):
662 continue
663 vld["vim-id"] = net_RO.get("vim_net_id")
664 vld["name"] = net_RO.get("vim_name")
665 vld["status"] = net_RO.get("status")
666 vld["status-detailed"] = net_RO.get("error_msg")
667 ns_update_nsr["vld.{}".format(vld_index)] = vld
668 break
669 else:
670 raise LcmException(
671 "ns_update_nsr: Not found vld={} at RO info".format(vld["id"])
672 )
673
674 def set_vnfr_at_error(self, db_vnfrs, error_text):
675 try:
676 for db_vnfr in db_vnfrs.values():
677 vnfr_update = {"status": "ERROR"}
678 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
679 if "status" not in vdur:
680 vdur["status"] = "ERROR"
681 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
682 if error_text:
683 vdur["status-detailed"] = str(error_text)
684 vnfr_update[
685 "vdur.{}.status-detailed".format(vdu_index)
686 ] = "ERROR"
687 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
688 except DbException as e:
689 self.logger.error("Cannot update vnf. {}".format(e))
690
691 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
692 """
693 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
694 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
695 :param nsr_desc_RO: nsr descriptor from RO
696 :return: Nothing, LcmException is raised on errors
697 """
698 for vnf_index, db_vnfr in db_vnfrs.items():
699 for vnf_RO in nsr_desc_RO["vnfs"]:
700 if vnf_RO["member_vnf_index"] != vnf_index:
701 continue
702 vnfr_update = {}
703 if vnf_RO.get("ip_address"):
704 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO[
705 "ip_address"
706 ].split(";")[0]
707 elif not db_vnfr.get("ip-address"):
708 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
709 raise LcmExceptionNoMgmtIP(
710 "ns member_vnf_index '{}' has no IP address".format(
711 vnf_index
712 )
713 )
714
715 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
716 vdur_RO_count_index = 0
717 if vdur.get("pdu-type"):
718 continue
719 for vdur_RO in get_iterable(vnf_RO, "vms"):
720 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
721 continue
722 if vdur["count-index"] != vdur_RO_count_index:
723 vdur_RO_count_index += 1
724 continue
725 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
726 if vdur_RO.get("ip_address"):
727 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
728 else:
729 vdur["ip-address"] = None
730 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
731 vdur["name"] = vdur_RO.get("vim_name")
732 vdur["status"] = vdur_RO.get("status")
733 vdur["status-detailed"] = vdur_RO.get("error_msg")
734 for ifacer in get_iterable(vdur, "interfaces"):
735 for interface_RO in get_iterable(vdur_RO, "interfaces"):
736 if ifacer["name"] == interface_RO.get("internal_name"):
737 ifacer["ip-address"] = interface_RO.get(
738 "ip_address"
739 )
740 ifacer["mac-address"] = interface_RO.get(
741 "mac_address"
742 )
743 break
744 else:
745 raise LcmException(
746 "ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
747 "from VIM info".format(
748 vnf_index, vdur["vdu-id-ref"], ifacer["name"]
749 )
750 )
751 vnfr_update["vdur.{}".format(vdu_index)] = vdur
752 break
753 else:
754 raise LcmException(
755 "ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
756 "VIM info".format(
757 vnf_index, vdur["vdu-id-ref"], vdur["count-index"]
758 )
759 )
760
761 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
762 for net_RO in get_iterable(nsr_desc_RO, "nets"):
763 if vld["id"] != net_RO.get("vnf_net_osm_id"):
764 continue
765 vld["vim-id"] = net_RO.get("vim_net_id")
766 vld["name"] = net_RO.get("vim_name")
767 vld["status"] = net_RO.get("status")
768 vld["status-detailed"] = net_RO.get("error_msg")
769 vnfr_update["vld.{}".format(vld_index)] = vld
770 break
771 else:
772 raise LcmException(
773 "ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
774 vnf_index, vld["id"]
775 )
776 )
777
778 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
779 break
780
781 else:
782 raise LcmException(
783 "ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(
784 vnf_index
785 )
786 )
787
788 def _get_ns_config_info(self, nsr_id):
789 """
790 Generates a mapping between vnf,vdu elements and the N2VC id
791 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
792 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
793 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
794 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
795 """
796 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
797 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
798 mapping = {}
799 ns_config_info = {"osm-config-mapping": mapping}
800 for vca in vca_deployed_list:
801 if not vca["member-vnf-index"]:
802 continue
803 if not vca["vdu_id"]:
804 mapping[vca["member-vnf-index"]] = vca["application"]
805 else:
806 mapping[
807 "{}.{}.{}".format(
808 vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"]
809 )
810 ] = vca["application"]
811 return ns_config_info
812
813 async def _instantiate_ng_ro(
814 self,
815 logging_text,
816 nsr_id,
817 nsd,
818 db_nsr,
819 db_nslcmop,
820 db_vnfrs,
821 db_vnfds,
822 n2vc_key_list,
823 stage,
824 start_deploy,
825 timeout_ns_deploy,
826 ):
827
828 db_vims = {}
829
830 def get_vim_account(vim_account_id):
831 nonlocal db_vims
832 if vim_account_id in db_vims:
833 return db_vims[vim_account_id]
834 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
835 db_vims[vim_account_id] = db_vim
836 return db_vim
837
838 # modify target_vld info with instantiation parameters
839 def parse_vld_instantiation_params(
840 target_vim, target_vld, vld_params, target_sdn
841 ):
842 if vld_params.get("ip-profile"):
843 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params[
844 "ip-profile"
845 ]
846 if vld_params.get("provider-network"):
847 target_vld["vim_info"][target_vim]["provider_network"] = vld_params[
848 "provider-network"
849 ]
850 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
851 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params[
852 "provider-network"
853 ]["sdn-ports"]
854 if vld_params.get("wimAccountId"):
855 target_wim = "wim:{}".format(vld_params["wimAccountId"])
856 target_vld["vim_info"][target_wim] = {}
857 for param in ("vim-network-name", "vim-network-id"):
858 if vld_params.get(param):
859 if isinstance(vld_params[param], dict):
860 for vim, vim_net in vld_params[param].items():
861 other_target_vim = "vim:" + vim
862 populate_dict(
863 target_vld["vim_info"],
864 (other_target_vim, param.replace("-", "_")),
865 vim_net,
866 )
867 else: # isinstance str
868 target_vld["vim_info"][target_vim][
869 param.replace("-", "_")
870 ] = vld_params[param]
871 if vld_params.get("common_id"):
872 target_vld["common_id"] = vld_params.get("common_id")
873
874 # modify target["ns"]["vld"] with instantiation parameters to override vnf vim-account
875 def update_ns_vld_target(target, ns_params):
876 for vnf_params in ns_params.get("vnf", ()):
877 if vnf_params.get("vimAccountId"):
878 target_vnf = next(
879 (
880 vnfr
881 for vnfr in db_vnfrs.values()
882 if vnf_params["member-vnf-index"]
883 == vnfr["member-vnf-index-ref"]
884 ),
885 None,
886 )
887 vdur = next((vdur for vdur in target_vnf.get("vdur", ())), None)
888 for a_index, a_vld in enumerate(target["ns"]["vld"]):
889 target_vld = find_in_list(
890 get_iterable(vdur, "interfaces"),
891 lambda iface: iface.get("ns-vld-id") == a_vld["name"],
892 )
893
894 vld_params = find_in_list(
895 get_iterable(ns_params, "vld"),
896 lambda v_vld: v_vld["name"] in (a_vld["name"], a_vld["id"]),
897 )
898 if target_vld:
899
900 if vnf_params.get("vimAccountId") not in a_vld.get(
901 "vim_info", {}
902 ):
903 target_vim_network_list = [
904 v for _, v in a_vld.get("vim_info").items()
905 ]
906 target_vim_network_name = next(
907 (
908 item.get("vim_network_name", "")
909 for item in target_vim_network_list
910 ),
911 "",
912 )
913
914 target["ns"]["vld"][a_index].get("vim_info").update(
915 {
916 "vim:{}".format(vnf_params["vimAccountId"]): {
917 "vim_network_name": target_vim_network_name,
918 }
919 }
920 )
921
922 if vld_params:
923 for param in ("vim-network-name", "vim-network-id"):
924 if vld_params.get(param) and isinstance(
925 vld_params[param], dict
926 ):
927 for vim, vim_net in vld_params[
928 param
929 ].items():
930 other_target_vim = "vim:" + vim
931 populate_dict(
932 target["ns"]["vld"][a_index].get(
933 "vim_info"
934 ),
935 (
936 other_target_vim,
937 param.replace("-", "_"),
938 ),
939 vim_net,
940 )
941
942 nslcmop_id = db_nslcmop["_id"]
943 target = {
944 "name": db_nsr["name"],
945 "ns": {"vld": []},
946 "vnf": [],
947 "image": deepcopy(db_nsr["image"]),
948 "flavor": deepcopy(db_nsr["flavor"]),
949 "action_id": nslcmop_id,
950 "cloud_init_content": {},
951 }
952 for image in target["image"]:
953 image["vim_info"] = {}
954 for flavor in target["flavor"]:
955 flavor["vim_info"] = {}
956 if db_nsr.get("affinity-or-anti-affinity-group"):
957 target["affinity-or-anti-affinity-group"] = deepcopy(
958 db_nsr["affinity-or-anti-affinity-group"]
959 )
960 for affinity_or_anti_affinity_group in target[
961 "affinity-or-anti-affinity-group"
962 ]:
963 affinity_or_anti_affinity_group["vim_info"] = {}
964
965 if db_nslcmop.get("lcmOperationType") != "instantiate":
966 # get parameters of instantiation:
967 db_nslcmop_instantiate = self.db.get_list(
968 "nslcmops",
969 {
970 "nsInstanceId": db_nslcmop["nsInstanceId"],
971 "lcmOperationType": "instantiate",
972 },
973 )[-1]
974 ns_params = db_nslcmop_instantiate.get("operationParams")
975 else:
976 ns_params = db_nslcmop.get("operationParams")
977 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
978 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
979
980 cp2target = {}
981 for vld_index, vld in enumerate(db_nsr.get("vld")):
982 target_vim = "vim:{}".format(ns_params["vimAccountId"])
983 target_vld = {
984 "id": vld["id"],
985 "name": vld["name"],
986 "mgmt-network": vld.get("mgmt-network", False),
987 "type": vld.get("type"),
988 "vim_info": {
989 target_vim: {
990 "vim_network_name": vld.get("vim-network-name"),
991 "vim_account_id": ns_params["vimAccountId"],
992 }
993 },
994 }
995 # check if this network needs SDN assist
996 if vld.get("pci-interfaces"):
997 db_vim = get_vim_account(ns_params["vimAccountId"])
998 sdnc_id = db_vim["config"].get("sdn-controller")
999 if sdnc_id:
1000 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
1001 target_sdn = "sdn:{}".format(sdnc_id)
1002 target_vld["vim_info"][target_sdn] = {
1003 "sdn": True,
1004 "target_vim": target_vim,
1005 "vlds": [sdn_vld],
1006 "type": vld.get("type"),
1007 }
1008
1009 nsd_vnf_profiles = get_vnf_profiles(nsd)
1010 for nsd_vnf_profile in nsd_vnf_profiles:
1011 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
1012 if cp["virtual-link-profile-id"] == vld["id"]:
1013 cp2target[
1014 "member_vnf:{}.{}".format(
1015 cp["constituent-cpd-id"][0][
1016 "constituent-base-element-id"
1017 ],
1018 cp["constituent-cpd-id"][0]["constituent-cpd-id"],
1019 )
1020 ] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
1021
1022 # check at nsd descriptor, if there is an ip-profile
1023 vld_params = {}
1024 nsd_vlp = find_in_list(
1025 get_virtual_link_profiles(nsd),
1026 lambda a_link_profile: a_link_profile["virtual-link-desc-id"]
1027 == vld["id"],
1028 )
1029 if (
1030 nsd_vlp
1031 and nsd_vlp.get("virtual-link-protocol-data")
1032 and nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1033 ):
1034 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"][
1035 "l3-protocol-data"
1036 ]
1037 ip_profile_dest_data = {}
1038 if "ip-version" in ip_profile_source_data:
1039 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1040 "ip-version"
1041 ]
1042 if "cidr" in ip_profile_source_data:
1043 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1044 "cidr"
1045 ]
1046 if "gateway-ip" in ip_profile_source_data:
1047 ip_profile_dest_data["gateway-address"] = ip_profile_source_data[
1048 "gateway-ip"
1049 ]
1050 if "dhcp-enabled" in ip_profile_source_data:
1051 ip_profile_dest_data["dhcp-params"] = {
1052 "enabled": ip_profile_source_data["dhcp-enabled"]
1053 }
1054 vld_params["ip-profile"] = ip_profile_dest_data
1055
1056 # update vld_params with instantiation params
1057 vld_instantiation_params = find_in_list(
1058 get_iterable(ns_params, "vld"),
1059 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]),
1060 )
1061 if vld_instantiation_params:
1062 vld_params.update(vld_instantiation_params)
1063 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
1064 target["ns"]["vld"].append(target_vld)
1065 # Update the target ns_vld if vnf vim_account is overriden by instantiation params
1066 update_ns_vld_target(target, ns_params)
1067
1068 for vnfr in db_vnfrs.values():
1069 vnfd = find_in_list(
1070 db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"]
1071 )
1072 vnf_params = find_in_list(
1073 get_iterable(ns_params, "vnf"),
1074 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"],
1075 )
1076 target_vnf = deepcopy(vnfr)
1077 target_vim = "vim:{}".format(vnfr["vim-account-id"])
1078 for vld in target_vnf.get("vld", ()):
1079 # check if connected to a ns.vld, to fill target'
1080 vnf_cp = find_in_list(
1081 vnfd.get("int-virtual-link-desc", ()),
1082 lambda cpd: cpd.get("id") == vld["id"],
1083 )
1084 if vnf_cp:
1085 ns_cp = "member_vnf:{}.{}".format(
1086 vnfr["member-vnf-index-ref"], vnf_cp["id"]
1087 )
1088 if cp2target.get(ns_cp):
1089 vld["target"] = cp2target[ns_cp]
1090
1091 vld["vim_info"] = {
1092 target_vim: {"vim_network_name": vld.get("vim-network-name")}
1093 }
1094 # check if this network needs SDN assist
1095 target_sdn = None
1096 if vld.get("pci-interfaces"):
1097 db_vim = get_vim_account(vnfr["vim-account-id"])
1098 sdnc_id = db_vim["config"].get("sdn-controller")
1099 if sdnc_id:
1100 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1101 target_sdn = "sdn:{}".format(sdnc_id)
1102 vld["vim_info"][target_sdn] = {
1103 "sdn": True,
1104 "target_vim": target_vim,
1105 "vlds": [sdn_vld],
1106 "type": vld.get("type"),
1107 }
1108
1109 # check at vnfd descriptor, if there is an ip-profile
1110 vld_params = {}
1111 vnfd_vlp = find_in_list(
1112 get_virtual_link_profiles(vnfd),
1113 lambda a_link_profile: a_link_profile["id"] == vld["id"],
1114 )
1115 if (
1116 vnfd_vlp
1117 and vnfd_vlp.get("virtual-link-protocol-data")
1118 and vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data")
1119 ):
1120 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"][
1121 "l3-protocol-data"
1122 ]
1123 ip_profile_dest_data = {}
1124 if "ip-version" in ip_profile_source_data:
1125 ip_profile_dest_data["ip-version"] = ip_profile_source_data[
1126 "ip-version"
1127 ]
1128 if "cidr" in ip_profile_source_data:
1129 ip_profile_dest_data["subnet-address"] = ip_profile_source_data[
1130 "cidr"
1131 ]
1132 if "gateway-ip" in ip_profile_source_data:
1133 ip_profile_dest_data[
1134 "gateway-address"
1135 ] = ip_profile_source_data["gateway-ip"]
1136 if "dhcp-enabled" in ip_profile_source_data:
1137 ip_profile_dest_data["dhcp-params"] = {
1138 "enabled": ip_profile_source_data["dhcp-enabled"]
1139 }
1140
1141 vld_params["ip-profile"] = ip_profile_dest_data
1142 # update vld_params with instantiation params
1143 if vnf_params:
1144 vld_instantiation_params = find_in_list(
1145 get_iterable(vnf_params, "internal-vld"),
1146 lambda i_vld: i_vld["name"] == vld["id"],
1147 )
1148 if vld_instantiation_params:
1149 vld_params.update(vld_instantiation_params)
1150 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1151
1152 vdur_list = []
1153 for vdur in target_vnf.get("vdur", ()):
1154 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1155 continue # This vdu must not be created
1156 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
1157
1158 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
1159
1160 if ssh_keys_all:
1161 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
1162 vnf_configuration = get_configuration(vnfd, vnfd["id"])
1163 if (
1164 vdu_configuration
1165 and vdu_configuration.get("config-access")
1166 and vdu_configuration.get("config-access").get("ssh-access")
1167 ):
1168 vdur["ssh-keys"] = ssh_keys_all
1169 vdur["ssh-access-required"] = vdu_configuration[
1170 "config-access"
1171 ]["ssh-access"]["required"]
1172 elif (
1173 vnf_configuration
1174 and vnf_configuration.get("config-access")
1175 and vnf_configuration.get("config-access").get("ssh-access")
1176 and any(iface.get("mgmt-vnf") for iface in vdur["interfaces"])
1177 ):
1178 vdur["ssh-keys"] = ssh_keys_all
1179 vdur["ssh-access-required"] = vnf_configuration[
1180 "config-access"
1181 ]["ssh-access"]["required"]
1182 elif ssh_keys_instantiation and find_in_list(
1183 vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")
1184 ):
1185 vdur["ssh-keys"] = ssh_keys_instantiation
1186
1187 self.logger.debug("NS > vdur > {}".format(vdur))
1188
1189 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
1190 # cloud-init
1191 if vdud.get("cloud-init-file"):
1192 vdur["cloud-init"] = "{}:file:{}".format(
1193 vnfd["_id"], vdud.get("cloud-init-file")
1194 )
1195 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1196 if vdur["cloud-init"] not in target["cloud_init_content"]:
1197 base_folder = vnfd["_admin"]["storage"]
1198 if base_folder["pkg-dir"]:
1199 cloud_init_file = "{}/{}/cloud_init/{}".format(
1200 base_folder["folder"],
1201 base_folder["pkg-dir"],
1202 vdud.get("cloud-init-file"),
1203 )
1204 else:
1205 cloud_init_file = "{}/Scripts/cloud_init/{}".format(
1206 base_folder["folder"],
1207 vdud.get("cloud-init-file"),
1208 )
1209 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1210 target["cloud_init_content"][
1211 vdur["cloud-init"]
1212 ] = ci_file.read()
1213 elif vdud.get("cloud-init"):
1214 vdur["cloud-init"] = "{}:vdu:{}".format(
1215 vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"])
1216 )
1217 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1218 target["cloud_init_content"][vdur["cloud-init"]] = vdud[
1219 "cloud-init"
1220 ]
1221 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1222 deploy_params_vdu = self._format_additional_params(
1223 vdur.get("additionalParams") or {}
1224 )
1225 deploy_params_vdu["OSM"] = get_osm_params(
1226 vnfr, vdur["vdu-id-ref"], vdur["count-index"]
1227 )
1228 vdur["additionalParams"] = deploy_params_vdu
1229
1230 # flavor
1231 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1232 if target_vim not in ns_flavor["vim_info"]:
1233 ns_flavor["vim_info"][target_vim] = {}
1234
1235 # deal with images
1236 # in case alternative images are provided we must check if they should be applied
1237 # for the vim_type, modify the vim_type taking into account
1238 ns_image_id = int(vdur["ns-image-id"])
1239 if vdur.get("alt-image-ids"):
1240 db_vim = get_vim_account(vnfr["vim-account-id"])
1241 vim_type = db_vim["vim_type"]
1242 for alt_image_id in vdur.get("alt-image-ids"):
1243 ns_alt_image = target["image"][int(alt_image_id)]
1244 if vim_type == ns_alt_image.get("vim-type"):
1245 # must use alternative image
1246 self.logger.debug(
1247 "use alternative image id: {}".format(alt_image_id)
1248 )
1249 ns_image_id = alt_image_id
1250 vdur["ns-image-id"] = ns_image_id
1251 break
1252 ns_image = target["image"][int(ns_image_id)]
1253 if target_vim not in ns_image["vim_info"]:
1254 ns_image["vim_info"][target_vim] = {}
1255
1256 # Affinity groups
1257 if vdur.get("affinity-or-anti-affinity-group-id"):
1258 for ags_id in vdur["affinity-or-anti-affinity-group-id"]:
1259 ns_ags = target["affinity-or-anti-affinity-group"][int(ags_id)]
1260 if target_vim not in ns_ags["vim_info"]:
1261 ns_ags["vim_info"][target_vim] = {}
1262
1263 vdur["vim_info"] = {target_vim: {}}
1264 # instantiation parameters
1265 if vnf_params:
1266 vdu_instantiation_params = find_in_list(
1267 get_iterable(vnf_params, "vdu"),
1268 lambda i_vdu: i_vdu["id"] == vdud["id"],
1269 )
1270 if vdu_instantiation_params:
1271 # Parse the vdu_volumes from the instantiation params
1272 vdu_volumes = get_volumes_from_instantiation_params(
1273 vdu_instantiation_params, vdud
1274 )
1275 vdur["additionalParams"]["OSM"]["vdu_volumes"] = vdu_volumes
1276 vdur_list.append(vdur)
1277 target_vnf["vdur"] = vdur_list
1278 target["vnf"].append(target_vnf)
1279
1280 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
1281 desc = await self.RO.deploy(nsr_id, target)
1282 self.logger.debug("RO return > {}".format(desc))
1283 action_id = desc["action_id"]
1284 await self._wait_ng_ro(
1285 nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage,
1286 operation="instantiation"
1287 )
1288
1289 # Updating NSR
1290 db_nsr_update = {
1291 "_admin.deployed.RO.operational-status": "running",
1292 "detailed-status": " ".join(stage),
1293 }
1294 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1295 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1296 self._write_op_status(nslcmop_id, stage)
1297 self.logger.debug(
1298 logging_text + "ns deployed at RO. RO_id={}".format(action_id)
1299 )
1300 return
1301
1302 async def _wait_ng_ro(
1303 self,
1304 nsr_id,
1305 action_id,
1306 nslcmop_id=None,
1307 start_time=None,
1308 timeout=600,
1309 stage=None,
1310 operation=None,
1311 ):
1312 detailed_status_old = None
1313 db_nsr_update = {}
1314 start_time = start_time or time()
1315 while time() <= start_time + timeout:
1316 desc_status = await self.op_status_map[operation](nsr_id, action_id)
1317 self.logger.debug("Wait NG RO > {}".format(desc_status))
1318 if desc_status["status"] == "FAILED":
1319 raise NgRoException(desc_status["details"])
1320 elif desc_status["status"] == "BUILD":
1321 if stage:
1322 stage[2] = "VIM: ({})".format(desc_status["details"])
1323 elif desc_status["status"] == "DONE":
1324 if stage:
1325 stage[2] = "Deployed at VIM"
1326 break
1327 else:
1328 assert False, "ROclient.check_ns_status returns unknown {}".format(
1329 desc_status["status"]
1330 )
1331 if stage and nslcmop_id and stage[2] != detailed_status_old:
1332 detailed_status_old = stage[2]
1333 db_nsr_update["detailed-status"] = " ".join(stage)
1334 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1335 self._write_op_status(nslcmop_id, stage)
1336 await asyncio.sleep(15, loop=self.loop)
1337 else: # timeout_ns_deploy
1338 raise NgRoException("Timeout waiting ns to deploy")
1339
1340 async def _terminate_ng_ro(
1341 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
1342 ):
1343 db_nsr_update = {}
1344 failed_detail = []
1345 action_id = None
1346 start_deploy = time()
1347 try:
1348 target = {
1349 "ns": {"vld": []},
1350 "vnf": [],
1351 "image": [],
1352 "flavor": [],
1353 "action_id": nslcmop_id,
1354 }
1355 desc = await self.RO.deploy(nsr_id, target)
1356 action_id = desc["action_id"]
1357 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1358 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1359 self.logger.debug(
1360 logging_text
1361 + "ns terminate action at RO. action_id={}".format(action_id)
1362 )
1363
1364 # wait until done
1365 delete_timeout = 20 * 60 # 20 minutes
1366 await self._wait_ng_ro(
1367 nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage,
1368 operation="termination"
1369 )
1370
1371 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1372 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1373 # delete all nsr
1374 await self.RO.delete(nsr_id)
1375 except Exception as e:
1376 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1377 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1378 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1379 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1380 self.logger.debug(
1381 logging_text + "RO_action_id={} already deleted".format(action_id)
1382 )
1383 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1384 failed_detail.append("delete conflict: {}".format(e))
1385 self.logger.debug(
1386 logging_text
1387 + "RO_action_id={} delete conflict: {}".format(action_id, e)
1388 )
1389 else:
1390 failed_detail.append("delete error: {}".format(e))
1391 self.logger.error(
1392 logging_text
1393 + "RO_action_id={} delete error: {}".format(action_id, e)
1394 )
1395
1396 if failed_detail:
1397 stage[2] = "Error deleting from VIM"
1398 else:
1399 stage[2] = "Deleted from VIM"
1400 db_nsr_update["detailed-status"] = " ".join(stage)
1401 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1402 self._write_op_status(nslcmop_id, stage)
1403
1404 if failed_detail:
1405 raise LcmException("; ".join(failed_detail))
1406 return
1407
1408 async def instantiate_RO(
1409 self,
1410 logging_text,
1411 nsr_id,
1412 nsd,
1413 db_nsr,
1414 db_nslcmop,
1415 db_vnfrs,
1416 db_vnfds,
1417 n2vc_key_list,
1418 stage,
1419 ):
1420 """
1421 Instantiate at RO
1422 :param logging_text: preffix text to use at logging
1423 :param nsr_id: nsr identity
1424 :param nsd: database content of ns descriptor
1425 :param db_nsr: database content of ns record
1426 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1427 :param db_vnfrs:
1428 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1429 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1430 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1431 :return: None or exception
1432 """
1433 try:
1434 start_deploy = time()
1435 ns_params = db_nslcmop.get("operationParams")
1436 if ns_params and ns_params.get("timeout_ns_deploy"):
1437 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1438 else:
1439 timeout_ns_deploy = self.timeout.get(
1440 "ns_deploy", self.timeout_ns_deploy
1441 )
1442
1443 # Check for and optionally request placement optimization. Database will be updated if placement activated
1444 stage[2] = "Waiting for Placement."
1445 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1446 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1447 for vnfr in db_vnfrs.values():
1448 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1449 break
1450 else:
1451 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1452
1453 return await self._instantiate_ng_ro(
1454 logging_text,
1455 nsr_id,
1456 nsd,
1457 db_nsr,
1458 db_nslcmop,
1459 db_vnfrs,
1460 db_vnfds,
1461 n2vc_key_list,
1462 stage,
1463 start_deploy,
1464 timeout_ns_deploy,
1465 )
1466 except Exception as e:
1467 stage[2] = "ERROR deploying at VIM"
1468 self.set_vnfr_at_error(db_vnfrs, str(e))
1469 self.logger.error(
1470 "Error deploying at VIM {}".format(e),
1471 exc_info=not isinstance(
1472 e,
1473 (
1474 ROclient.ROClientException,
1475 LcmException,
1476 DbException,
1477 NgRoException,
1478 ),
1479 ),
1480 )
1481 raise
1482
1483 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1484 """
1485 Wait for kdu to be up, get ip address
1486 :param logging_text: prefix use for logging
1487 :param nsr_id:
1488 :param vnfr_id:
1489 :param kdu_name:
1490 :return: IP address, K8s services
1491 """
1492
1493 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1494 nb_tries = 0
1495
1496 while nb_tries < 360:
1497 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1498 kdur = next(
1499 (
1500 x
1501 for x in get_iterable(db_vnfr, "kdur")
1502 if x.get("kdu-name") == kdu_name
1503 ),
1504 None,
1505 )
1506 if not kdur:
1507 raise LcmException(
1508 "Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name)
1509 )
1510 if kdur.get("status"):
1511 if kdur["status"] in ("READY", "ENABLED"):
1512 return kdur.get("ip-address"), kdur.get("services")
1513 else:
1514 raise LcmException(
1515 "target KDU={} is in error state".format(kdu_name)
1516 )
1517
1518 await asyncio.sleep(10, loop=self.loop)
1519 nb_tries += 1
1520 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1521
1522 async def wait_vm_up_insert_key_ro(
1523 self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None
1524 ):
1525 """
1526 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1527 :param logging_text: prefix use for logging
1528 :param nsr_id:
1529 :param vnfr_id:
1530 :param vdu_id:
1531 :param vdu_index:
1532 :param pub_key: public ssh key to inject, None to skip
1533 :param user: user to apply the public ssh key
1534 :return: IP address
1535 """
1536
1537 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1538 ro_nsr_id = None
1539 ip_address = None
1540 nb_tries = 0
1541 target_vdu_id = None
1542 ro_retries = 0
1543
1544 while True:
1545
1546 ro_retries += 1
1547 if ro_retries >= 360: # 1 hour
1548 raise LcmException(
1549 "Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id)
1550 )
1551
1552 await asyncio.sleep(10, loop=self.loop)
1553
1554 # get ip address
1555 if not target_vdu_id:
1556 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1557
1558 if not vdu_id: # for the VNF case
1559 if db_vnfr.get("status") == "ERROR":
1560 raise LcmException(
1561 "Cannot inject ssh-key because target VNF is in error state"
1562 )
1563 ip_address = db_vnfr.get("ip-address")
1564 if not ip_address:
1565 continue
1566 vdur = next(
1567 (
1568 x
1569 for x in get_iterable(db_vnfr, "vdur")
1570 if x.get("ip-address") == ip_address
1571 ),
1572 None,
1573 )
1574 else: # VDU case
1575 vdur = next(
1576 (
1577 x
1578 for x in get_iterable(db_vnfr, "vdur")
1579 if x.get("vdu-id-ref") == vdu_id
1580 and x.get("count-index") == vdu_index
1581 ),
1582 None,
1583 )
1584
1585 if (
1586 not vdur and len(db_vnfr.get("vdur", ())) == 1
1587 ): # If only one, this should be the target vdu
1588 vdur = db_vnfr["vdur"][0]
1589 if not vdur:
1590 raise LcmException(
1591 "Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(
1592 vnfr_id, vdu_id, vdu_index
1593 )
1594 )
1595 # New generation RO stores information at "vim_info"
1596 ng_ro_status = None
1597 target_vim = None
1598 if vdur.get("vim_info"):
1599 target_vim = next(
1600 t for t in vdur["vim_info"]
1601 ) # there should be only one key
1602 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1603 if (
1604 vdur.get("pdu-type")
1605 or vdur.get("status") == "ACTIVE"
1606 or ng_ro_status == "ACTIVE"
1607 ):
1608 ip_address = vdur.get("ip-address")
1609 if not ip_address:
1610 continue
1611 target_vdu_id = vdur["vdu-id-ref"]
1612 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1613 raise LcmException(
1614 "Cannot inject ssh-key because target VM is in error state"
1615 )
1616
1617 if not target_vdu_id:
1618 continue
1619
1620 # inject public key into machine
1621 if pub_key and user:
1622 self.logger.debug(logging_text + "Inserting RO key")
1623 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1624 if vdur.get("pdu-type"):
1625 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1626 return ip_address
1627 try:
1628 ro_vm_id = "{}-{}".format(
1629 db_vnfr["member-vnf-index-ref"], target_vdu_id
1630 ) # TODO add vdu_index
1631 if self.ng_ro:
1632 target = {
1633 "action": {
1634 "action": "inject_ssh_key",
1635 "key": pub_key,
1636 "user": user,
1637 },
1638 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1639 }
1640 desc = await self.RO.deploy(nsr_id, target)
1641 action_id = desc["action_id"]
1642 await self._wait_ng_ro(nsr_id, action_id, timeout=600, operation="instantiation")
1643 break
1644 else:
1645 # wait until NS is deployed at RO
1646 if not ro_nsr_id:
1647 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1648 ro_nsr_id = deep_get(
1649 db_nsrs, ("_admin", "deployed", "RO", "nsr_id")
1650 )
1651 if not ro_nsr_id:
1652 continue
1653 result_dict = await self.RO.create_action(
1654 item="ns",
1655 item_id_name=ro_nsr_id,
1656 descriptor={
1657 "add_public_key": pub_key,
1658 "vms": [ro_vm_id],
1659 "user": user,
1660 },
1661 )
1662 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1663 if not result_dict or not isinstance(result_dict, dict):
1664 raise LcmException(
1665 "Unknown response from RO when injecting key"
1666 )
1667 for result in result_dict.values():
1668 if result.get("vim_result") == 200:
1669 break
1670 else:
1671 raise ROclient.ROClientException(
1672 "error injecting key: {}".format(
1673 result.get("description")
1674 )
1675 )
1676 break
1677 except NgRoException as e:
1678 raise LcmException(
1679 "Reaching max tries injecting key. Error: {}".format(e)
1680 )
1681 except ROclient.ROClientException as e:
1682 if not nb_tries:
1683 self.logger.debug(
1684 logging_text
1685 + "error injecting key: {}. Retrying until {} seconds".format(
1686 e, 20 * 10
1687 )
1688 )
1689 nb_tries += 1
1690 if nb_tries >= 20:
1691 raise LcmException(
1692 "Reaching max tries injecting key. Error: {}".format(e)
1693 )
1694 else:
1695 break
1696
1697 return ip_address
1698
1699 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1700 """
1701 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1702 """
1703 my_vca = vca_deployed_list[vca_index]
1704 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1705 # vdu or kdu: no dependencies
1706 return
1707 timeout = 300
1708 while timeout >= 0:
1709 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1710 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1711 configuration_status_list = db_nsr["configurationStatus"]
1712 for index, vca_deployed in enumerate(configuration_status_list):
1713 if index == vca_index:
1714 # myself
1715 continue
1716 if not my_vca.get("member-vnf-index") or (
1717 vca_deployed.get("member-vnf-index")
1718 == my_vca.get("member-vnf-index")
1719 ):
1720 internal_status = configuration_status_list[index].get("status")
1721 if internal_status == "READY":
1722 continue
1723 elif internal_status == "BROKEN":
1724 raise LcmException(
1725 "Configuration aborted because dependent charm/s has failed"
1726 )
1727 else:
1728 break
1729 else:
1730 # no dependencies, return
1731 return
1732 await asyncio.sleep(10)
1733 timeout -= 1
1734
1735 raise LcmException("Configuration aborted because dependent charm/s timeout")
1736
1737 def get_vca_id(self, db_vnfr: dict, db_nsr: dict):
1738 vca_id = None
1739 if db_vnfr:
1740 vca_id = deep_get(db_vnfr, ("vca-id",))
1741 elif db_nsr:
1742 vim_account_id = deep_get(db_nsr, ("instantiate_params", "vimAccountId"))
1743 vca_id = VimAccountDB.get_vim_account_with_id(vim_account_id).get("vca")
1744 return vca_id
1745
1746 async def instantiate_N2VC(
1747 self,
1748 logging_text,
1749 vca_index,
1750 nsi_id,
1751 db_nsr,
1752 db_vnfr,
1753 vdu_id,
1754 kdu_name,
1755 vdu_index,
1756 config_descriptor,
1757 deploy_params,
1758 base_folder,
1759 nslcmop_id,
1760 stage,
1761 vca_type,
1762 vca_name,
1763 ee_config_descriptor,
1764 ):
1765 nsr_id = db_nsr["_id"]
1766 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1767 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1768 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1769 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1770 db_dict = {
1771 "collection": "nsrs",
1772 "filter": {"_id": nsr_id},
1773 "path": db_update_entry,
1774 }
1775 step = ""
1776 try:
1777
1778 element_type = "NS"
1779 element_under_configuration = nsr_id
1780
1781 vnfr_id = None
1782 if db_vnfr:
1783 vnfr_id = db_vnfr["_id"]
1784 osm_config["osm"]["vnf_id"] = vnfr_id
1785
1786 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
1787
1788 if vca_type == "native_charm":
1789 index_number = 0
1790 else:
1791 index_number = vdu_index or 0
1792
1793 if vnfr_id:
1794 element_type = "VNF"
1795 element_under_configuration = vnfr_id
1796 namespace += ".{}-{}".format(vnfr_id, index_number)
1797 if vdu_id:
1798 namespace += ".{}-{}".format(vdu_id, index_number)
1799 element_type = "VDU"
1800 element_under_configuration = "{}-{}".format(vdu_id, index_number)
1801 osm_config["osm"]["vdu_id"] = vdu_id
1802 elif kdu_name:
1803 namespace += ".{}".format(kdu_name)
1804 element_type = "KDU"
1805 element_under_configuration = kdu_name
1806 osm_config["osm"]["kdu_name"] = kdu_name
1807
1808 # Get artifact path
1809 if base_folder["pkg-dir"]:
1810 artifact_path = "{}/{}/{}/{}".format(
1811 base_folder["folder"],
1812 base_folder["pkg-dir"],
1813 "charms"
1814 if vca_type
1815 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1816 else "helm-charts",
1817 vca_name,
1818 )
1819 else:
1820 artifact_path = "{}/Scripts/{}/{}/".format(
1821 base_folder["folder"],
1822 "charms"
1823 if vca_type
1824 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
1825 else "helm-charts",
1826 vca_name,
1827 )
1828
1829 self.logger.debug("Artifact path > {}".format(artifact_path))
1830
1831 # get initial_config_primitive_list that applies to this element
1832 initial_config_primitive_list = config_descriptor.get(
1833 "initial-config-primitive"
1834 )
1835
1836 self.logger.debug(
1837 "Initial config primitive list > {}".format(
1838 initial_config_primitive_list
1839 )
1840 )
1841
1842 # add config if not present for NS charm
1843 ee_descriptor_id = ee_config_descriptor.get("id")
1844 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1845 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
1846 initial_config_primitive_list, vca_deployed, ee_descriptor_id
1847 )
1848
1849 self.logger.debug(
1850 "Initial config primitive list #2 > {}".format(
1851 initial_config_primitive_list
1852 )
1853 )
1854 # n2vc_redesign STEP 3.1
1855 # find old ee_id if exists
1856 ee_id = vca_deployed.get("ee_id")
1857
1858 vca_id = self.get_vca_id(db_vnfr, db_nsr)
1859 # create or register execution environment in VCA
1860 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1861
1862 self._write_configuration_status(
1863 nsr_id=nsr_id,
1864 vca_index=vca_index,
1865 status="CREATING",
1866 element_under_configuration=element_under_configuration,
1867 element_type=element_type,
1868 )
1869
1870 step = "create execution environment"
1871 self.logger.debug(logging_text + step)
1872
1873 ee_id = None
1874 credentials = None
1875 if vca_type == "k8s_proxy_charm":
1876 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1877 charm_name=artifact_path[artifact_path.rfind("/") + 1 :],
1878 namespace=namespace,
1879 artifact_path=artifact_path,
1880 db_dict=db_dict,
1881 vca_id=vca_id,
1882 )
1883 elif vca_type == "helm" or vca_type == "helm-v3":
1884 ee_id, credentials = await self.vca_map[
1885 vca_type
1886 ].create_execution_environment(
1887 namespace=namespace,
1888 reuse_ee_id=ee_id,
1889 db_dict=db_dict,
1890 config=osm_config,
1891 artifact_path=artifact_path,
1892 vca_type=vca_type,
1893 )
1894 else:
1895 ee_id, credentials = await self.vca_map[
1896 vca_type
1897 ].create_execution_environment(
1898 namespace=namespace,
1899 reuse_ee_id=ee_id,
1900 db_dict=db_dict,
1901 vca_id=vca_id,
1902 )
1903
1904 elif vca_type == "native_charm":
1905 step = "Waiting to VM being up and getting IP address"
1906 self.logger.debug(logging_text + step)
1907 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
1908 logging_text,
1909 nsr_id,
1910 vnfr_id,
1911 vdu_id,
1912 vdu_index,
1913 user=None,
1914 pub_key=None,
1915 )
1916 credentials = {"hostname": rw_mgmt_ip}
1917 # get username
1918 username = deep_get(
1919 config_descriptor, ("config-access", "ssh-access", "default-user")
1920 )
1921 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1922 # merged. Meanwhile let's get username from initial-config-primitive
1923 if not username and initial_config_primitive_list:
1924 for config_primitive in initial_config_primitive_list:
1925 for param in config_primitive.get("parameter", ()):
1926 if param["name"] == "ssh-username":
1927 username = param["value"]
1928 break
1929 if not username:
1930 raise LcmException(
1931 "Cannot determine the username neither with 'initial-config-primitive' nor with "
1932 "'config-access.ssh-access.default-user'"
1933 )
1934 credentials["username"] = username
1935 # n2vc_redesign STEP 3.2
1936
1937 self._write_configuration_status(
1938 nsr_id=nsr_id,
1939 vca_index=vca_index,
1940 status="REGISTERING",
1941 element_under_configuration=element_under_configuration,
1942 element_type=element_type,
1943 )
1944
1945 step = "register execution environment {}".format(credentials)
1946 self.logger.debug(logging_text + step)
1947 ee_id = await self.vca_map[vca_type].register_execution_environment(
1948 credentials=credentials,
1949 namespace=namespace,
1950 db_dict=db_dict,
1951 vca_id=vca_id,
1952 )
1953
1954 # for compatibility with MON/POL modules, the need model and application name at database
1955 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1956 ee_id_parts = ee_id.split(".")
1957 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1958 if len(ee_id_parts) >= 2:
1959 model_name = ee_id_parts[0]
1960 application_name = ee_id_parts[1]
1961 db_nsr_update[db_update_entry + "model"] = model_name
1962 db_nsr_update[db_update_entry + "application"] = application_name
1963
1964 # n2vc_redesign STEP 3.3
1965 step = "Install configuration Software"
1966
1967 self._write_configuration_status(
1968 nsr_id=nsr_id,
1969 vca_index=vca_index,
1970 status="INSTALLING SW",
1971 element_under_configuration=element_under_configuration,
1972 element_type=element_type,
1973 other_update=db_nsr_update,
1974 )
1975
1976 # TODO check if already done
1977 self.logger.debug(logging_text + step)
1978 config = None
1979 if vca_type == "native_charm":
1980 config_primitive = next(
1981 (p for p in initial_config_primitive_list if p["name"] == "config"),
1982 None,
1983 )
1984 if config_primitive:
1985 config = self._map_primitive_params(
1986 config_primitive, {}, deploy_params
1987 )
1988 num_units = 1
1989 if vca_type == "lxc_proxy_charm":
1990 if element_type == "NS":
1991 num_units = db_nsr.get("config-units") or 1
1992 elif element_type == "VNF":
1993 num_units = db_vnfr.get("config-units") or 1
1994 elif element_type == "VDU":
1995 for v in db_vnfr["vdur"]:
1996 if vdu_id == v["vdu-id-ref"]:
1997 num_units = v.get("config-units") or 1
1998 break
1999 if vca_type != "k8s_proxy_charm":
2000 await self.vca_map[vca_type].install_configuration_sw(
2001 ee_id=ee_id,
2002 artifact_path=artifact_path,
2003 db_dict=db_dict,
2004 config=config,
2005 num_units=num_units,
2006 vca_id=vca_id,
2007 vca_type=vca_type,
2008 )
2009
2010 # write in db flag of configuration_sw already installed
2011 self.update_db_2(
2012 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
2013 )
2014
2015 # add relations for this VCA (wait for other peers related with this VCA)
2016 await self._add_vca_relations(
2017 logging_text=logging_text,
2018 nsr_id=nsr_id,
2019 vca_type=vca_type,
2020 vca_index=vca_index,
2021 )
2022
2023 # if SSH access is required, then get execution environment SSH public
2024 # if native charm we have waited already to VM be UP
2025 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
2026 pub_key = None
2027 user = None
2028 # self.logger.debug("get ssh key block")
2029 if deep_get(
2030 config_descriptor, ("config-access", "ssh-access", "required")
2031 ):
2032 # self.logger.debug("ssh key needed")
2033 # Needed to inject a ssh key
2034 user = deep_get(
2035 config_descriptor,
2036 ("config-access", "ssh-access", "default-user"),
2037 )
2038 step = "Install configuration Software, getting public ssh key"
2039 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
2040 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
2041 )
2042
2043 step = "Insert public key into VM user={} ssh_key={}".format(
2044 user, pub_key
2045 )
2046 else:
2047 # self.logger.debug("no need to get ssh key")
2048 step = "Waiting to VM being up and getting IP address"
2049 self.logger.debug(logging_text + step)
2050
2051 # default rw_mgmt_ip to None, avoiding the non definition of the variable
2052 rw_mgmt_ip = None
2053
2054 # n2vc_redesign STEP 5.1
2055 # wait for RO (ip-address) Insert pub_key into VM
2056 if vnfr_id:
2057 if kdu_name:
2058 rw_mgmt_ip, services = await self.wait_kdu_up(
2059 logging_text, nsr_id, vnfr_id, kdu_name
2060 )
2061 vnfd = self.db.get_one(
2062 "vnfds_revisions",
2063 {"_id": f'{db_vnfr["vnfd-id"]}:{db_vnfr["revision"]}'},
2064 )
2065 kdu = get_kdu(vnfd, kdu_name)
2066 kdu_services = [
2067 service["name"] for service in get_kdu_services(kdu)
2068 ]
2069 exposed_services = []
2070 for service in services:
2071 if any(s in service["name"] for s in kdu_services):
2072 exposed_services.append(service)
2073 await self.vca_map[vca_type].exec_primitive(
2074 ee_id=ee_id,
2075 primitive_name="config",
2076 params_dict={
2077 "osm-config": json.dumps(
2078 OsmConfigBuilder(
2079 k8s={"services": exposed_services}
2080 ).build()
2081 )
2082 },
2083 vca_id=vca_id,
2084 )
2085
2086 # This verification is needed in order to avoid trying to add a public key
2087 # to a VM, when the VNF is a KNF (in the edge case where the user creates a VCA
2088 # for a KNF and not for its KDUs, the previous verification gives False, and the code
2089 # jumps to this block, meaning that there is the need to verify if the VNF is actually a VNF
2090 # or it is a KNF)
2091 elif db_vnfr.get('vdur'):
2092 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
2093 logging_text,
2094 nsr_id,
2095 vnfr_id,
2096 vdu_id,
2097 vdu_index,
2098 user=user,
2099 pub_key=pub_key,
2100 )
2101
2102 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
2103
2104 # store rw_mgmt_ip in deploy params for later replacement
2105 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
2106
2107 # n2vc_redesign STEP 6 Execute initial config primitive
2108 step = "execute initial config primitive"
2109
2110 # wait for dependent primitives execution (NS -> VNF -> VDU)
2111 if initial_config_primitive_list:
2112 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
2113
2114 # stage, in function of element type: vdu, kdu, vnf or ns
2115 my_vca = vca_deployed_list[vca_index]
2116 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
2117 # VDU or KDU
2118 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
2119 elif my_vca.get("member-vnf-index"):
2120 # VNF
2121 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
2122 else:
2123 # NS
2124 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
2125
2126 self._write_configuration_status(
2127 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
2128 )
2129
2130 self._write_op_status(op_id=nslcmop_id, stage=stage)
2131
2132 check_if_terminated_needed = True
2133 for initial_config_primitive in initial_config_primitive_list:
2134 # adding information on the vca_deployed if it is a NS execution environment
2135 if not vca_deployed["member-vnf-index"]:
2136 deploy_params["ns_config_info"] = json.dumps(
2137 self._get_ns_config_info(nsr_id)
2138 )
2139 # TODO check if already done
2140 primitive_params_ = self._map_primitive_params(
2141 initial_config_primitive, {}, deploy_params
2142 )
2143
2144 step = "execute primitive '{}' params '{}'".format(
2145 initial_config_primitive["name"], primitive_params_
2146 )
2147 self.logger.debug(logging_text + step)
2148 await self.vca_map[vca_type].exec_primitive(
2149 ee_id=ee_id,
2150 primitive_name=initial_config_primitive["name"],
2151 params_dict=primitive_params_,
2152 db_dict=db_dict,
2153 vca_id=vca_id,
2154 vca_type=vca_type,
2155 )
2156 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
2157 if check_if_terminated_needed:
2158 if config_descriptor.get("terminate-config-primitive"):
2159 self.update_db_2(
2160 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
2161 )
2162 check_if_terminated_needed = False
2163
2164 # TODO register in database that primitive is done
2165
2166 # STEP 7 Configure metrics
2167 if vca_type == "helm" or vca_type == "helm-v3":
2168 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
2169 ee_id=ee_id,
2170 artifact_path=artifact_path,
2171 ee_config_descriptor=ee_config_descriptor,
2172 vnfr_id=vnfr_id,
2173 nsr_id=nsr_id,
2174 target_ip=rw_mgmt_ip,
2175 )
2176 if prometheus_jobs:
2177 self.update_db_2(
2178 "nsrs",
2179 nsr_id,
2180 {db_update_entry + "prometheus_jobs": prometheus_jobs},
2181 )
2182
2183 for job in prometheus_jobs:
2184 self.db.set_one(
2185 "prometheus_jobs",
2186 {"job_name": job["job_name"]},
2187 job,
2188 upsert=True,
2189 fail_on_empty=False,
2190 )
2191
2192 step = "instantiated at VCA"
2193 self.logger.debug(logging_text + step)
2194
2195 self._write_configuration_status(
2196 nsr_id=nsr_id, vca_index=vca_index, status="READY"
2197 )
2198
2199 except Exception as e: # TODO not use Exception but N2VC exception
2200 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
2201 if not isinstance(
2202 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
2203 ):
2204 self.logger.error(
2205 "Exception while {} : {}".format(step, e), exc_info=True
2206 )
2207 self._write_configuration_status(
2208 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
2209 )
2210 raise LcmException("{} {}".format(step, e)) from e
2211
2212 def _write_ns_status(
2213 self,
2214 nsr_id: str,
2215 ns_state: str,
2216 current_operation: str,
2217 current_operation_id: str,
2218 error_description: str = None,
2219 error_detail: str = None,
2220 other_update: dict = None,
2221 ):
2222 """
2223 Update db_nsr fields.
2224 :param nsr_id:
2225 :param ns_state:
2226 :param current_operation:
2227 :param current_operation_id:
2228 :param error_description:
2229 :param error_detail:
2230 :param other_update: Other required changes at database if provided, will be cleared
2231 :return:
2232 """
2233 try:
2234 db_dict = other_update or {}
2235 db_dict[
2236 "_admin.nslcmop"
2237 ] = current_operation_id # for backward compatibility
2238 db_dict["_admin.current-operation"] = current_operation_id
2239 db_dict["_admin.operation-type"] = (
2240 current_operation if current_operation != "IDLE" else None
2241 )
2242 db_dict["currentOperation"] = current_operation
2243 db_dict["currentOperationID"] = current_operation_id
2244 db_dict["errorDescription"] = error_description
2245 db_dict["errorDetail"] = error_detail
2246
2247 if ns_state:
2248 db_dict["nsState"] = ns_state
2249 self.update_db_2("nsrs", nsr_id, db_dict)
2250 except DbException as e:
2251 self.logger.warn("Error writing NS status, ns={}: {}".format(nsr_id, e))
2252
2253 def _write_op_status(
2254 self,
2255 op_id: str,
2256 stage: list = None,
2257 error_message: str = None,
2258 queuePosition: int = 0,
2259 operation_state: str = None,
2260 other_update: dict = None,
2261 ):
2262 try:
2263 db_dict = other_update or {}
2264 db_dict["queuePosition"] = queuePosition
2265 if isinstance(stage, list):
2266 db_dict["stage"] = stage[0]
2267 db_dict["detailed-status"] = " ".join(stage)
2268 elif stage is not None:
2269 db_dict["stage"] = str(stage)
2270
2271 if error_message is not None:
2272 db_dict["errorMessage"] = error_message
2273 if operation_state is not None:
2274 db_dict["operationState"] = operation_state
2275 db_dict["statusEnteredTime"] = time()
2276 self.update_db_2("nslcmops", op_id, db_dict)
2277 except DbException as e:
2278 self.logger.warn(
2279 "Error writing OPERATION status for op_id: {} -> {}".format(op_id, e)
2280 )
2281
2282 def _write_all_config_status(self, db_nsr: dict, status: str):
2283 try:
2284 nsr_id = db_nsr["_id"]
2285 # configurationStatus
2286 config_status = db_nsr.get("configurationStatus")
2287 if config_status:
2288 db_nsr_update = {
2289 "configurationStatus.{}.status".format(index): status
2290 for index, v in enumerate(config_status)
2291 if v
2292 }
2293 # update status
2294 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2295
2296 except DbException as e:
2297 self.logger.warn(
2298 "Error writing all configuration status, ns={}: {}".format(nsr_id, e)
2299 )
2300
2301 def _write_configuration_status(
2302 self,
2303 nsr_id: str,
2304 vca_index: int,
2305 status: str = None,
2306 element_under_configuration: str = None,
2307 element_type: str = None,
2308 other_update: dict = None,
2309 ):
2310
2311 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
2312 # .format(vca_index, status))
2313
2314 try:
2315 db_path = "configurationStatus.{}.".format(vca_index)
2316 db_dict = other_update or {}
2317 if status:
2318 db_dict[db_path + "status"] = status
2319 if element_under_configuration:
2320 db_dict[
2321 db_path + "elementUnderConfiguration"
2322 ] = element_under_configuration
2323 if element_type:
2324 db_dict[db_path + "elementType"] = element_type
2325 self.update_db_2("nsrs", nsr_id, db_dict)
2326 except DbException as e:
2327 self.logger.warn(
2328 "Error writing configuration status={}, ns={}, vca_index={}: {}".format(
2329 status, nsr_id, vca_index, e
2330 )
2331 )
2332
2333 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2334 """
2335 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2336 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2337 Database is used because the result can be obtained from a different LCM worker in case of HA.
2338 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2339 :param db_nslcmop: database content of nslcmop
2340 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2341 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2342 computed 'vim-account-id'
2343 """
2344 modified = False
2345 nslcmop_id = db_nslcmop["_id"]
2346 placement_engine = deep_get(db_nslcmop, ("operationParams", "placement-engine"))
2347 if placement_engine == "PLA":
2348 self.logger.debug(
2349 logging_text + "Invoke and wait for placement optimization"
2350 )
2351 await self.msg.aiowrite(
2352 "pla", "get_placement", {"nslcmopId": nslcmop_id}, loop=self.loop
2353 )
2354 db_poll_interval = 5
2355 wait = db_poll_interval * 10
2356 pla_result = None
2357 while not pla_result and wait >= 0:
2358 await asyncio.sleep(db_poll_interval)
2359 wait -= db_poll_interval
2360 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2361 pla_result = deep_get(db_nslcmop, ("_admin", "pla"))
2362
2363 if not pla_result:
2364 raise LcmException(
2365 "Placement timeout for nslcmopId={}".format(nslcmop_id)
2366 )
2367
2368 for pla_vnf in pla_result["vnf"]:
2369 vnfr = db_vnfrs.get(pla_vnf["member-vnf-index"])
2370 if not pla_vnf.get("vimAccountId") or not vnfr:
2371 continue
2372 modified = True
2373 self.db.set_one(
2374 "vnfrs",
2375 {"_id": vnfr["_id"]},
2376 {"vim-account-id": pla_vnf["vimAccountId"]},
2377 )
2378 # Modifies db_vnfrs
2379 vnfr["vim-account-id"] = pla_vnf["vimAccountId"]
2380 return modified
2381
2382 def update_nsrs_with_pla_result(self, params):
2383 try:
2384 nslcmop_id = deep_get(params, ("placement", "nslcmopId"))
2385 self.update_db_2(
2386 "nslcmops", nslcmop_id, {"_admin.pla": params.get("placement")}
2387 )
2388 except Exception as e:
2389 self.logger.warn("Update failed for nslcmop_id={}:{}".format(nslcmop_id, e))
2390
2391 async def instantiate(self, nsr_id, nslcmop_id):
2392 """
2393
2394 :param nsr_id: ns instance to deploy
2395 :param nslcmop_id: operation to run
2396 :return:
2397 """
2398
2399 # Try to lock HA task here
2400 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
2401 if not task_is_locked_by_me:
2402 self.logger.debug(
2403 "instantiate() task is not locked by me, ns={}".format(nsr_id)
2404 )
2405 return
2406
2407 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2408 self.logger.debug(logging_text + "Enter")
2409
2410 # get all needed from database
2411
2412 # database nsrs record
2413 db_nsr = None
2414
2415 # database nslcmops record
2416 db_nslcmop = None
2417
2418 # update operation on nsrs
2419 db_nsr_update = {}
2420 # update operation on nslcmops
2421 db_nslcmop_update = {}
2422
2423 nslcmop_operation_state = None
2424 db_vnfrs = {} # vnf's info indexed by member-index
2425 # n2vc_info = {}
2426 tasks_dict_info = {} # from task to info text
2427 exc = None
2428 error_list = []
2429 stage = [
2430 "Stage 1/5: preparation of the environment.",
2431 "Waiting for previous operations to terminate.",
2432 "",
2433 ]
2434 # ^ stage, step, VIM progress
2435 try:
2436 # wait for any previous tasks in process
2437 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
2438
2439 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2440 stage[1] = "Reading from database."
2441 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2442 db_nsr_update["detailed-status"] = "creating"
2443 db_nsr_update["operational-status"] = "init"
2444 self._write_ns_status(
2445 nsr_id=nsr_id,
2446 ns_state="BUILDING",
2447 current_operation="INSTANTIATING",
2448 current_operation_id=nslcmop_id,
2449 other_update=db_nsr_update,
2450 )
2451 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
2452
2453 # read from db: operation
2454 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2455 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2456 if db_nslcmop["operationParams"].get("additionalParamsForVnf"):
2457 db_nslcmop["operationParams"]["additionalParamsForVnf"] = json.loads(
2458 db_nslcmop["operationParams"]["additionalParamsForVnf"]
2459 )
2460 ns_params = db_nslcmop.get("operationParams")
2461 if ns_params and ns_params.get("timeout_ns_deploy"):
2462 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2463 else:
2464 timeout_ns_deploy = self.timeout.get(
2465 "ns_deploy", self.timeout_ns_deploy
2466 )
2467
2468 # read from db: ns
2469 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2470 self.logger.debug(logging_text + stage[1])
2471 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2472 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2473 self.logger.debug(logging_text + stage[1])
2474 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2475 self.fs.sync(db_nsr["nsd-id"])
2476 db_nsr["nsd"] = nsd
2477 # nsr_name = db_nsr["name"] # TODO short-name??
2478
2479 # read from db: vnf's of this ns
2480 stage[1] = "Getting vnfrs from db."
2481 self.logger.debug(logging_text + stage[1])
2482 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2483
2484 # read from db: vnfd's for every vnf
2485 db_vnfds = [] # every vnfd data
2486
2487 # for each vnf in ns, read vnfd
2488 for vnfr in db_vnfrs_list:
2489 if vnfr.get("kdur"):
2490 kdur_list = []
2491 for kdur in vnfr["kdur"]:
2492 if kdur.get("additionalParams"):
2493 kdur["additionalParams"] = json.loads(
2494 kdur["additionalParams"]
2495 )
2496 kdur_list.append(kdur)
2497 vnfr["kdur"] = kdur_list
2498
2499 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
2500 vnfd_id = vnfr["vnfd-id"]
2501 vnfd_ref = vnfr["vnfd-ref"]
2502 self.fs.sync(vnfd_id)
2503
2504 # if we haven't this vnfd, read it from db
2505 if vnfd_id not in db_vnfds:
2506 # read from db
2507 stage[1] = "Getting vnfd={} id='{}' from db.".format(
2508 vnfd_id, vnfd_ref
2509 )
2510 self.logger.debug(logging_text + stage[1])
2511 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2512
2513 # store vnfd
2514 db_vnfds.append(vnfd)
2515
2516 # Get or generates the _admin.deployed.VCA list
2517 vca_deployed_list = None
2518 if db_nsr["_admin"].get("deployed"):
2519 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2520 if vca_deployed_list is None:
2521 vca_deployed_list = []
2522 configuration_status_list = []
2523 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2524 db_nsr_update["configurationStatus"] = configuration_status_list
2525 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2526 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2527 elif isinstance(vca_deployed_list, dict):
2528 # maintain backward compatibility. Change a dict to list at database
2529 vca_deployed_list = list(vca_deployed_list.values())
2530 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2531 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2532
2533 if not isinstance(
2534 deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list
2535 ):
2536 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2537 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2538
2539 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2540 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2541 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2542 self.db.set_list(
2543 "vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"}
2544 )
2545
2546 # n2vc_redesign STEP 2 Deploy Network Scenario
2547 stage[0] = "Stage 2/5: deployment of KDUs, VMs and execution environments."
2548 self._write_op_status(op_id=nslcmop_id, stage=stage)
2549
2550 stage[1] = "Deploying KDUs."
2551 # self.logger.debug(logging_text + "Before deploy_kdus")
2552 # Call to deploy_kdus in case exists the "vdu:kdu" param
2553 await self.deploy_kdus(
2554 logging_text=logging_text,
2555 nsr_id=nsr_id,
2556 nslcmop_id=nslcmop_id,
2557 db_vnfrs=db_vnfrs,
2558 db_vnfds=db_vnfds,
2559 task_instantiation_info=tasks_dict_info,
2560 )
2561
2562 stage[1] = "Getting VCA public key."
2563 # n2vc_redesign STEP 1 Get VCA public ssh-key
2564 # feature 1429. Add n2vc public key to needed VMs
2565 n2vc_key = self.n2vc.get_public_key()
2566 n2vc_key_list = [n2vc_key]
2567 if self.vca_config.get("public_key"):
2568 n2vc_key_list.append(self.vca_config["public_key"])
2569
2570 stage[1] = "Deploying NS at VIM."
2571 task_ro = asyncio.ensure_future(
2572 self.instantiate_RO(
2573 logging_text=logging_text,
2574 nsr_id=nsr_id,
2575 nsd=nsd,
2576 db_nsr=db_nsr,
2577 db_nslcmop=db_nslcmop,
2578 db_vnfrs=db_vnfrs,
2579 db_vnfds=db_vnfds,
2580 n2vc_key_list=n2vc_key_list,
2581 stage=stage,
2582 )
2583 )
2584 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2585 tasks_dict_info[task_ro] = "Deploying at VIM"
2586
2587 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2588 stage[1] = "Deploying Execution Environments."
2589 self.logger.debug(logging_text + stage[1])
2590
2591 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2592 for vnf_profile in get_vnf_profiles(nsd):
2593 vnfd_id = vnf_profile["vnfd-id"]
2594 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
2595 member_vnf_index = str(vnf_profile["id"])
2596 db_vnfr = db_vnfrs[member_vnf_index]
2597 base_folder = vnfd["_admin"]["storage"]
2598 vdu_id = None
2599 vdu_index = 0
2600 vdu_name = None
2601 kdu_name = None
2602
2603 # Get additional parameters
2604 deploy_params = {"OSM": get_osm_params(db_vnfr)}
2605 if db_vnfr.get("additionalParamsForVnf"):
2606 deploy_params.update(
2607 parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy())
2608 )
2609
2610 descriptor_config = get_configuration(vnfd, vnfd["id"])
2611 if descriptor_config:
2612 self._deploy_n2vc(
2613 logging_text=logging_text
2614 + "member_vnf_index={} ".format(member_vnf_index),
2615 db_nsr=db_nsr,
2616 db_vnfr=db_vnfr,
2617 nslcmop_id=nslcmop_id,
2618 nsr_id=nsr_id,
2619 nsi_id=nsi_id,
2620 vnfd_id=vnfd_id,
2621 vdu_id=vdu_id,
2622 kdu_name=kdu_name,
2623 member_vnf_index=member_vnf_index,
2624 vdu_index=vdu_index,
2625 vdu_name=vdu_name,
2626 deploy_params=deploy_params,
2627 descriptor_config=descriptor_config,
2628 base_folder=base_folder,
2629 task_instantiation_info=tasks_dict_info,
2630 stage=stage,
2631 )
2632
2633 # Deploy charms for each VDU that supports one.
2634 for vdud in get_vdu_list(vnfd):
2635 vdu_id = vdud["id"]
2636 descriptor_config = get_configuration(vnfd, vdu_id)
2637 vdur = find_in_list(
2638 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
2639 )
2640
2641 if vdur.get("additionalParams"):
2642 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
2643 else:
2644 deploy_params_vdu = deploy_params
2645 deploy_params_vdu["OSM"] = get_osm_params(
2646 db_vnfr, vdu_id, vdu_count_index=0
2647 )
2648 vdud_count = get_number_of_instances(vnfd, vdu_id)
2649
2650 self.logger.debug("VDUD > {}".format(vdud))
2651 self.logger.debug(
2652 "Descriptor config > {}".format(descriptor_config)
2653 )
2654 if descriptor_config:
2655 vdu_name = None
2656 kdu_name = None
2657 for vdu_index in range(vdud_count):
2658 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2659 self._deploy_n2vc(
2660 logging_text=logging_text
2661 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2662 member_vnf_index, vdu_id, vdu_index
2663 ),
2664 db_nsr=db_nsr,
2665 db_vnfr=db_vnfr,
2666 nslcmop_id=nslcmop_id,
2667 nsr_id=nsr_id,
2668 nsi_id=nsi_id,
2669 vnfd_id=vnfd_id,
2670 vdu_id=vdu_id,
2671 kdu_name=kdu_name,
2672 member_vnf_index=member_vnf_index,
2673 vdu_index=vdu_index,
2674 vdu_name=vdu_name,
2675 deploy_params=deploy_params_vdu,
2676 descriptor_config=descriptor_config,
2677 base_folder=base_folder,
2678 task_instantiation_info=tasks_dict_info,
2679 stage=stage,
2680 )
2681 for kdud in get_kdu_list(vnfd):
2682 kdu_name = kdud["name"]
2683 descriptor_config = get_configuration(vnfd, kdu_name)
2684 if descriptor_config:
2685 vdu_id = None
2686 vdu_index = 0
2687 vdu_name = None
2688 kdur = next(
2689 x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name
2690 )
2691 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
2692 if kdur.get("additionalParams"):
2693 deploy_params_kdu.update(
2694 parse_yaml_strings(kdur["additionalParams"].copy())
2695 )
2696
2697 self._deploy_n2vc(
2698 logging_text=logging_text,
2699 db_nsr=db_nsr,
2700 db_vnfr=db_vnfr,
2701 nslcmop_id=nslcmop_id,
2702 nsr_id=nsr_id,
2703 nsi_id=nsi_id,
2704 vnfd_id=vnfd_id,
2705 vdu_id=vdu_id,
2706 kdu_name=kdu_name,
2707 member_vnf_index=member_vnf_index,
2708 vdu_index=vdu_index,
2709 vdu_name=vdu_name,
2710 deploy_params=deploy_params_kdu,
2711 descriptor_config=descriptor_config,
2712 base_folder=base_folder,
2713 task_instantiation_info=tasks_dict_info,
2714 stage=stage,
2715 )
2716
2717 # Check if this NS has a charm configuration
2718 descriptor_config = nsd.get("ns-configuration")
2719 if descriptor_config and descriptor_config.get("juju"):
2720 vnfd_id = None
2721 db_vnfr = None
2722 member_vnf_index = None
2723 vdu_id = None
2724 kdu_name = None
2725 vdu_index = 0
2726 vdu_name = None
2727
2728 # Get additional parameters
2729 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
2730 if db_nsr.get("additionalParamsForNs"):
2731 deploy_params.update(
2732 parse_yaml_strings(db_nsr["additionalParamsForNs"].copy())
2733 )
2734 base_folder = nsd["_admin"]["storage"]
2735 self._deploy_n2vc(
2736 logging_text=logging_text,
2737 db_nsr=db_nsr,
2738 db_vnfr=db_vnfr,
2739 nslcmop_id=nslcmop_id,
2740 nsr_id=nsr_id,
2741 nsi_id=nsi_id,
2742 vnfd_id=vnfd_id,
2743 vdu_id=vdu_id,
2744 kdu_name=kdu_name,
2745 member_vnf_index=member_vnf_index,
2746 vdu_index=vdu_index,
2747 vdu_name=vdu_name,
2748 deploy_params=deploy_params,
2749 descriptor_config=descriptor_config,
2750 base_folder=base_folder,
2751 task_instantiation_info=tasks_dict_info,
2752 stage=stage,
2753 )
2754
2755 # rest of staff will be done at finally
2756
2757 except (
2758 ROclient.ROClientException,
2759 DbException,
2760 LcmException,
2761 N2VCException,
2762 ) as e:
2763 self.logger.error(
2764 logging_text + "Exit Exception while '{}': {}".format(stage[1], e)
2765 )
2766 exc = e
2767 except asyncio.CancelledError:
2768 self.logger.error(
2769 logging_text + "Cancelled Exception while '{}'".format(stage[1])
2770 )
2771 exc = "Operation was cancelled"
2772 except Exception as e:
2773 exc = traceback.format_exc()
2774 self.logger.critical(
2775 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
2776 exc_info=True,
2777 )
2778 finally:
2779 if exc:
2780 error_list.append(str(exc))
2781 try:
2782 # wait for pending tasks
2783 if tasks_dict_info:
2784 stage[1] = "Waiting for instantiate pending tasks."
2785 self.logger.debug(logging_text + stage[1])
2786 error_list += await self._wait_for_tasks(
2787 logging_text,
2788 tasks_dict_info,
2789 timeout_ns_deploy,
2790 stage,
2791 nslcmop_id,
2792 nsr_id=nsr_id,
2793 )
2794 stage[1] = stage[2] = ""
2795 except asyncio.CancelledError:
2796 error_list.append("Cancelled")
2797 # TODO cancel all tasks
2798 except Exception as exc:
2799 error_list.append(str(exc))
2800
2801 # update operation-status
2802 db_nsr_update["operational-status"] = "running"
2803 # let's begin with VCA 'configured' status (later we can change it)
2804 db_nsr_update["config-status"] = "configured"
2805 for task, task_name in tasks_dict_info.items():
2806 if not task.done() or task.cancelled() or task.exception():
2807 if task_name.startswith(self.task_name_deploy_vca):
2808 # A N2VC task is pending
2809 db_nsr_update["config-status"] = "failed"
2810 else:
2811 # RO or KDU task is pending
2812 db_nsr_update["operational-status"] = "failed"
2813
2814 # update status at database
2815 if error_list:
2816 error_detail = ". ".join(error_list)
2817 self.logger.error(logging_text + error_detail)
2818 error_description_nslcmop = "{} Detail: {}".format(
2819 stage[0], error_detail
2820 )
2821 error_description_nsr = "Operation: INSTANTIATING.{}, {}".format(
2822 nslcmop_id, stage[0]
2823 )
2824
2825 db_nsr_update["detailed-status"] = (
2826 error_description_nsr + " Detail: " + error_detail
2827 )
2828 db_nslcmop_update["detailed-status"] = error_detail
2829 nslcmop_operation_state = "FAILED"
2830 ns_state = "BROKEN"
2831 else:
2832 error_detail = None
2833 error_description_nsr = error_description_nslcmop = None
2834 ns_state = "READY"
2835 db_nsr_update["detailed-status"] = "Done"
2836 db_nslcmop_update["detailed-status"] = "Done"
2837 nslcmop_operation_state = "COMPLETED"
2838
2839 if db_nsr:
2840 self._write_ns_status(
2841 nsr_id=nsr_id,
2842 ns_state=ns_state,
2843 current_operation="IDLE",
2844 current_operation_id=None,
2845 error_description=error_description_nsr,
2846 error_detail=error_detail,
2847 other_update=db_nsr_update,
2848 )
2849 self._write_op_status(
2850 op_id=nslcmop_id,
2851 stage="",
2852 error_message=error_description_nslcmop,
2853 operation_state=nslcmop_operation_state,
2854 other_update=db_nslcmop_update,
2855 )
2856
2857 if nslcmop_operation_state:
2858 try:
2859 await self.msg.aiowrite(
2860 "ns",
2861 "instantiated",
2862 {
2863 "nsr_id": nsr_id,
2864 "nslcmop_id": nslcmop_id,
2865 "operationState": nslcmop_operation_state,
2866 },
2867 loop=self.loop,
2868 )
2869 except Exception as e:
2870 self.logger.error(
2871 logging_text + "kafka_write notification Exception {}".format(e)
2872 )
2873
2874 self.logger.debug(logging_text + "Exit")
2875 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2876
2877 def _get_vnfd(self, vnfd_id: str, cached_vnfds: Dict[str, Any]):
2878 if vnfd_id not in cached_vnfds:
2879 cached_vnfds[vnfd_id] = self.db.get_one("vnfds", {"id": vnfd_id})
2880 return cached_vnfds[vnfd_id]
2881
2882 def _get_vnfr(self, nsr_id: str, vnf_profile_id: str, cached_vnfrs: Dict[str, Any]):
2883 if vnf_profile_id not in cached_vnfrs:
2884 cached_vnfrs[vnf_profile_id] = self.db.get_one(
2885 "vnfrs",
2886 {
2887 "member-vnf-index-ref": vnf_profile_id,
2888 "nsr-id-ref": nsr_id,
2889 },
2890 )
2891 return cached_vnfrs[vnf_profile_id]
2892
2893 def _is_deployed_vca_in_relation(
2894 self, vca: DeployedVCA, relation: Relation
2895 ) -> bool:
2896 found = False
2897 for endpoint in (relation.provider, relation.requirer):
2898 if endpoint["kdu-resource-profile-id"]:
2899 continue
2900 found = (
2901 vca.vnf_profile_id == endpoint.vnf_profile_id
2902 and vca.vdu_profile_id == endpoint.vdu_profile_id
2903 and vca.execution_environment_ref == endpoint.execution_environment_ref
2904 )
2905 if found:
2906 break
2907 return found
2908
2909 def _update_ee_relation_data_with_implicit_data(
2910 self, nsr_id, nsd, ee_relation_data, cached_vnfds, vnf_profile_id: str = None
2911 ):
2912 ee_relation_data = safe_get_ee_relation(
2913 nsr_id, ee_relation_data, vnf_profile_id=vnf_profile_id
2914 )
2915 ee_relation_level = EELevel.get_level(ee_relation_data)
2916 if (ee_relation_level in (EELevel.VNF, EELevel.VDU)) and not ee_relation_data[
2917 "execution-environment-ref"
2918 ]:
2919 vnf_profile = get_vnf_profile(nsd, ee_relation_data["vnf-profile-id"])
2920 vnfd_id = vnf_profile["vnfd-id"]
2921 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2922 entity_id = (
2923 vnfd_id
2924 if ee_relation_level == EELevel.VNF
2925 else ee_relation_data["vdu-profile-id"]
2926 )
2927 ee = get_juju_ee_ref(db_vnfd, entity_id)
2928 if not ee:
2929 raise Exception(
2930 f"not execution environments found for ee_relation {ee_relation_data}"
2931 )
2932 ee_relation_data["execution-environment-ref"] = ee["id"]
2933 return ee_relation_data
2934
2935 def _get_ns_relations(
2936 self,
2937 nsr_id: str,
2938 nsd: Dict[str, Any],
2939 vca: DeployedVCA,
2940 cached_vnfds: Dict[str, Any],
2941 ) -> List[Relation]:
2942 relations = []
2943 db_ns_relations = get_ns_configuration_relation_list(nsd)
2944 for r in db_ns_relations:
2945 provider_dict = None
2946 requirer_dict = None
2947 if all(key in r for key in ("provider", "requirer")):
2948 provider_dict = r["provider"]
2949 requirer_dict = r["requirer"]
2950 elif "entities" in r:
2951 provider_id = r["entities"][0]["id"]
2952 provider_dict = {
2953 "nsr-id": nsr_id,
2954 "endpoint": r["entities"][0]["endpoint"],
2955 }
2956 if provider_id != nsd["id"]:
2957 provider_dict["vnf-profile-id"] = provider_id
2958 requirer_id = r["entities"][1]["id"]
2959 requirer_dict = {
2960 "nsr-id": nsr_id,
2961 "endpoint": r["entities"][1]["endpoint"],
2962 }
2963 if requirer_id != nsd["id"]:
2964 requirer_dict["vnf-profile-id"] = requirer_id
2965 else:
2966 raise Exception(
2967 "provider/requirer or entities must be included in the relation."
2968 )
2969 relation_provider = self._update_ee_relation_data_with_implicit_data(
2970 nsr_id, nsd, provider_dict, cached_vnfds
2971 )
2972 relation_requirer = self._update_ee_relation_data_with_implicit_data(
2973 nsr_id, nsd, requirer_dict, cached_vnfds
2974 )
2975 provider = EERelation(relation_provider)
2976 requirer = EERelation(relation_requirer)
2977 relation = Relation(r["name"], provider, requirer)
2978 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
2979 if vca_in_relation:
2980 relations.append(relation)
2981 return relations
2982
2983 def _get_vnf_relations(
2984 self,
2985 nsr_id: str,
2986 nsd: Dict[str, Any],
2987 vca: DeployedVCA,
2988 cached_vnfds: Dict[str, Any],
2989 ) -> List[Relation]:
2990 relations = []
2991 vnf_profile = get_vnf_profile(nsd, vca.vnf_profile_id)
2992 vnf_profile_id = vnf_profile["id"]
2993 vnfd_id = vnf_profile["vnfd-id"]
2994 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
2995 db_vnf_relations = get_relation_list(db_vnfd, vnfd_id)
2996 for r in db_vnf_relations:
2997 provider_dict = None
2998 requirer_dict = None
2999 if all(key in r for key in ("provider", "requirer")):
3000 provider_dict = r["provider"]
3001 requirer_dict = r["requirer"]
3002 elif "entities" in r:
3003 provider_id = r["entities"][0]["id"]
3004 provider_dict = {
3005 "nsr-id": nsr_id,
3006 "vnf-profile-id": vnf_profile_id,
3007 "endpoint": r["entities"][0]["endpoint"],
3008 }
3009 if provider_id != vnfd_id:
3010 provider_dict["vdu-profile-id"] = provider_id
3011 requirer_id = r["entities"][1]["id"]
3012 requirer_dict = {
3013 "nsr-id": nsr_id,
3014 "vnf-profile-id": vnf_profile_id,
3015 "endpoint": r["entities"][1]["endpoint"],
3016 }
3017 if requirer_id != vnfd_id:
3018 requirer_dict["vdu-profile-id"] = requirer_id
3019 else:
3020 raise Exception(
3021 "provider/requirer or entities must be included in the relation."
3022 )
3023 relation_provider = self._update_ee_relation_data_with_implicit_data(
3024 nsr_id, nsd, provider_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3025 )
3026 relation_requirer = self._update_ee_relation_data_with_implicit_data(
3027 nsr_id, nsd, requirer_dict, cached_vnfds, vnf_profile_id=vnf_profile_id
3028 )
3029 provider = EERelation(relation_provider)
3030 requirer = EERelation(relation_requirer)
3031 relation = Relation(r["name"], provider, requirer)
3032 vca_in_relation = self._is_deployed_vca_in_relation(vca, relation)
3033 if vca_in_relation:
3034 relations.append(relation)
3035 return relations
3036
3037 def _get_kdu_resource_data(
3038 self,
3039 ee_relation: EERelation,
3040 db_nsr: Dict[str, Any],
3041 cached_vnfds: Dict[str, Any],
3042 ) -> DeployedK8sResource:
3043 nsd = get_nsd(db_nsr)
3044 vnf_profiles = get_vnf_profiles(nsd)
3045 vnfd_id = find_in_list(
3046 vnf_profiles,
3047 lambda vnf_profile: vnf_profile["id"] == ee_relation.vnf_profile_id,
3048 )["vnfd-id"]
3049 db_vnfd = self._get_vnfd(vnfd_id, cached_vnfds)
3050 kdu_resource_profile = get_kdu_resource_profile(
3051 db_vnfd, ee_relation.kdu_resource_profile_id
3052 )
3053 kdu_name = kdu_resource_profile["kdu-name"]
3054 deployed_kdu, _ = get_deployed_kdu(
3055 db_nsr.get("_admin", ()).get("deployed", ()),
3056 kdu_name,
3057 ee_relation.vnf_profile_id,
3058 )
3059 deployed_kdu.update({"resource-name": kdu_resource_profile["resource-name"]})
3060 return deployed_kdu
3061
3062 def _get_deployed_component(
3063 self,
3064 ee_relation: EERelation,
3065 db_nsr: Dict[str, Any],
3066 cached_vnfds: Dict[str, Any],
3067 ) -> DeployedComponent:
3068 nsr_id = db_nsr["_id"]
3069 deployed_component = None
3070 ee_level = EELevel.get_level(ee_relation)
3071 if ee_level == EELevel.NS:
3072 vca = get_deployed_vca(db_nsr, {"vdu_id": None, "member-vnf-index": None})
3073 if vca:
3074 deployed_component = DeployedVCA(nsr_id, vca)
3075 elif ee_level == EELevel.VNF:
3076 vca = get_deployed_vca(
3077 db_nsr,
3078 {
3079 "vdu_id": None,
3080 "member-vnf-index": ee_relation.vnf_profile_id,
3081 "ee_descriptor_id": ee_relation.execution_environment_ref,
3082 },
3083 )
3084 if vca:
3085 deployed_component = DeployedVCA(nsr_id, vca)
3086 elif ee_level == EELevel.VDU:
3087 vca = get_deployed_vca(
3088 db_nsr,
3089 {
3090 "vdu_id": ee_relation.vdu_profile_id,
3091 "member-vnf-index": ee_relation.vnf_profile_id,
3092 "ee_descriptor_id": ee_relation.execution_environment_ref,
3093 },
3094 )
3095 if vca:
3096 deployed_component = DeployedVCA(nsr_id, vca)
3097 elif ee_level == EELevel.KDU:
3098 kdu_resource_data = self._get_kdu_resource_data(
3099 ee_relation, db_nsr, cached_vnfds
3100 )
3101 if kdu_resource_data:
3102 deployed_component = DeployedK8sResource(kdu_resource_data)
3103 return deployed_component
3104
3105 async def _add_relation(
3106 self,
3107 relation: Relation,
3108 vca_type: str,
3109 db_nsr: Dict[str, Any],
3110 cached_vnfds: Dict[str, Any],
3111 cached_vnfrs: Dict[str, Any],
3112 ) -> bool:
3113 deployed_provider = self._get_deployed_component(
3114 relation.provider, db_nsr, cached_vnfds
3115 )
3116 deployed_requirer = self._get_deployed_component(
3117 relation.requirer, db_nsr, cached_vnfds
3118 )
3119 if (
3120 deployed_provider
3121 and deployed_requirer
3122 and deployed_provider.config_sw_installed
3123 and deployed_requirer.config_sw_installed
3124 ):
3125 provider_db_vnfr = (
3126 self._get_vnfr(
3127 relation.provider.nsr_id,
3128 relation.provider.vnf_profile_id,
3129 cached_vnfrs,
3130 )
3131 if relation.provider.vnf_profile_id
3132 else None
3133 )
3134 requirer_db_vnfr = (
3135 self._get_vnfr(
3136 relation.requirer.nsr_id,
3137 relation.requirer.vnf_profile_id,
3138 cached_vnfrs,
3139 )
3140 if relation.requirer.vnf_profile_id
3141 else None
3142 )
3143 provider_vca_id = self.get_vca_id(provider_db_vnfr, db_nsr)
3144 requirer_vca_id = self.get_vca_id(requirer_db_vnfr, db_nsr)
3145 provider_relation_endpoint = RelationEndpoint(
3146 deployed_provider.ee_id,
3147 provider_vca_id,
3148 relation.provider.endpoint,
3149 )
3150 requirer_relation_endpoint = RelationEndpoint(
3151 deployed_requirer.ee_id,
3152 requirer_vca_id,
3153 relation.requirer.endpoint,
3154 )
3155 await self.vca_map[vca_type].add_relation(
3156 provider=provider_relation_endpoint,
3157 requirer=requirer_relation_endpoint,
3158 )
3159 # remove entry from relations list
3160 return True
3161 return False
3162
3163 async def _add_vca_relations(
3164 self,
3165 logging_text,
3166 nsr_id,
3167 vca_type: str,
3168 vca_index: int,
3169 timeout: int = 3600,
3170 ) -> bool:
3171
3172 # steps:
3173 # 1. find all relations for this VCA
3174 # 2. wait for other peers related
3175 # 3. add relations
3176
3177 try:
3178 # STEP 1: find all relations for this VCA
3179
3180 # read nsr record
3181 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3182 nsd = get_nsd(db_nsr)
3183
3184 # this VCA data
3185 deployed_vca_dict = get_deployed_vca_list(db_nsr)[vca_index]
3186 my_vca = DeployedVCA(nsr_id, deployed_vca_dict)
3187
3188 cached_vnfds = {}
3189 cached_vnfrs = {}
3190 relations = []
3191 relations.extend(self._get_ns_relations(nsr_id, nsd, my_vca, cached_vnfds))
3192 relations.extend(self._get_vnf_relations(nsr_id, nsd, my_vca, cached_vnfds))
3193
3194 # if no relations, terminate
3195 if not relations:
3196 self.logger.debug(logging_text + " No relations")
3197 return True
3198
3199 self.logger.debug(logging_text + " adding relations {}".format(relations))
3200
3201 # add all relations
3202 start = time()
3203 while True:
3204 # check timeout
3205 now = time()
3206 if now - start >= timeout:
3207 self.logger.error(logging_text + " : timeout adding relations")
3208 return False
3209
3210 # reload nsr from database (we need to update record: _admin.deployed.VCA)
3211 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3212
3213 # for each relation, find the VCA's related
3214 for relation in relations.copy():
3215 added = await self._add_relation(
3216 relation,
3217 vca_type,
3218 db_nsr,
3219 cached_vnfds,
3220 cached_vnfrs,
3221 )
3222 if added:
3223 relations.remove(relation)
3224
3225 if not relations:
3226 self.logger.debug("Relations added")
3227 break
3228 await asyncio.sleep(5.0)
3229
3230 return True
3231
3232 except Exception as e:
3233 self.logger.warn(logging_text + " ERROR adding relations: {}".format(e))
3234 return False
3235
3236 async def _install_kdu(
3237 self,
3238 nsr_id: str,
3239 nsr_db_path: str,
3240 vnfr_data: dict,
3241 kdu_index: int,
3242 kdud: dict,
3243 vnfd: dict,
3244 k8s_instance_info: dict,
3245 k8params: dict = None,
3246 timeout: int = 600,
3247 vca_id: str = None,
3248 ):
3249
3250 try:
3251 k8sclustertype = k8s_instance_info["k8scluster-type"]
3252 # Instantiate kdu
3253 db_dict_install = {
3254 "collection": "nsrs",
3255 "filter": {"_id": nsr_id},
3256 "path": nsr_db_path,
3257 }
3258
3259 if k8s_instance_info.get("kdu-deployment-name"):
3260 kdu_instance = k8s_instance_info.get("kdu-deployment-name")
3261 else:
3262 kdu_instance = self.k8scluster_map[
3263 k8sclustertype
3264 ].generate_kdu_instance_name(
3265 db_dict=db_dict_install,
3266 kdu_model=k8s_instance_info["kdu-model"],
3267 kdu_name=k8s_instance_info["kdu-name"],
3268 )
3269
3270 # Update the nsrs table with the kdu-instance value
3271 self.update_db_2(
3272 item="nsrs",
3273 _id=nsr_id,
3274 _desc={nsr_db_path + ".kdu-instance": kdu_instance},
3275 )
3276
3277 # Update the nsrs table with the actual namespace being used, if the k8scluster-type is `juju` or
3278 # `juju-bundle`. This verification is needed because there is not a standard/homogeneous namespace
3279 # between the Helm Charts and Juju Bundles-based KNFs. If we found a way of having an homogeneous
3280 # namespace, this first verification could be removed, and the next step would be done for any kind
3281 # of KNF.
3282 # TODO -> find a way to have an homogeneous namespace between the Helm Charts and Juju Bundles-based
3283 # KNFs (Bug 2027: https://osm.etsi.org/bugzilla/show_bug.cgi?id=2027)
3284 if k8sclustertype in ("juju", "juju-bundle"):
3285 # First, verify if the current namespace is present in the `_admin.projects_read` (if not, it means
3286 # that the user passed a namespace which he wants its KDU to be deployed in)
3287 if (
3288 self.db.count(
3289 table="nsrs",
3290 q_filter={
3291 "_id": nsr_id,
3292 "_admin.projects_write": k8s_instance_info["namespace"],
3293 "_admin.projects_read": k8s_instance_info["namespace"],
3294 },
3295 )
3296 > 0
3297 ):
3298 self.logger.debug(
3299 f"Updating namespace/model for Juju Bundle from {k8s_instance_info['namespace']} to {kdu_instance}"
3300 )
3301 self.update_db_2(
3302 item="nsrs",
3303 _id=nsr_id,
3304 _desc={f"{nsr_db_path}.namespace": kdu_instance},
3305 )
3306 k8s_instance_info["namespace"] = kdu_instance
3307
3308 await self.k8scluster_map[k8sclustertype].install(
3309 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3310 kdu_model=k8s_instance_info["kdu-model"],
3311 atomic=True,
3312 params=k8params,
3313 db_dict=db_dict_install,
3314 timeout=timeout,
3315 kdu_name=k8s_instance_info["kdu-name"],
3316 namespace=k8s_instance_info["namespace"],
3317 kdu_instance=kdu_instance,
3318 vca_id=vca_id,
3319 )
3320
3321 # Obtain services to obtain management service ip
3322 services = await self.k8scluster_map[k8sclustertype].get_services(
3323 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3324 kdu_instance=kdu_instance,
3325 namespace=k8s_instance_info["namespace"],
3326 )
3327
3328 # Obtain management service info (if exists)
3329 vnfr_update_dict = {}
3330 kdu_config = get_configuration(vnfd, kdud["name"])
3331 if kdu_config:
3332 target_ee_list = kdu_config.get("execution-environment-list", [])
3333 else:
3334 target_ee_list = []
3335
3336 if services:
3337 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
3338 mgmt_services = [
3339 service
3340 for service in kdud.get("service", [])
3341 if service.get("mgmt-service")
3342 ]
3343 for mgmt_service in mgmt_services:
3344 for service in services:
3345 if service["name"].startswith(mgmt_service["name"]):
3346 # Mgmt service found, Obtain service ip
3347 ip = service.get("external_ip", service.get("cluster_ip"))
3348 if isinstance(ip, list) and len(ip) == 1:
3349 ip = ip[0]
3350
3351 vnfr_update_dict[
3352 "kdur.{}.ip-address".format(kdu_index)
3353 ] = ip
3354
3355 # Check if must update also mgmt ip at the vnf
3356 service_external_cp = mgmt_service.get(
3357 "external-connection-point-ref"
3358 )
3359 if service_external_cp:
3360 if (
3361 deep_get(vnfd, ("mgmt-interface", "cp"))
3362 == service_external_cp
3363 ):
3364 vnfr_update_dict["ip-address"] = ip
3365
3366 if find_in_list(
3367 target_ee_list,
3368 lambda ee: ee.get(
3369 "external-connection-point-ref", ""
3370 )
3371 == service_external_cp,
3372 ):
3373 vnfr_update_dict[
3374 "kdur.{}.ip-address".format(kdu_index)
3375 ] = ip
3376 break
3377 else:
3378 self.logger.warn(
3379 "Mgmt service name: {} not found".format(
3380 mgmt_service["name"]
3381 )
3382 )
3383
3384 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
3385 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
3386
3387 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
3388 if (
3389 kdu_config
3390 and kdu_config.get("initial-config-primitive")
3391 and get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None
3392 ):
3393 initial_config_primitive_list = kdu_config.get(
3394 "initial-config-primitive"
3395 )
3396 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
3397
3398 for initial_config_primitive in initial_config_primitive_list:
3399 primitive_params_ = self._map_primitive_params(
3400 initial_config_primitive, {}, {}
3401 )
3402
3403 await asyncio.wait_for(
3404 self.k8scluster_map[k8sclustertype].exec_primitive(
3405 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
3406 kdu_instance=kdu_instance,
3407 primitive_name=initial_config_primitive["name"],
3408 params=primitive_params_,
3409 db_dict=db_dict_install,
3410 vca_id=vca_id,
3411 ),
3412 timeout=timeout,
3413 )
3414
3415 except Exception as e:
3416 # Prepare update db with error and raise exception
3417 try:
3418 self.update_db_2(
3419 "nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)}
3420 )
3421 self.update_db_2(
3422 "vnfrs",
3423 vnfr_data.get("_id"),
3424 {"kdur.{}.status".format(kdu_index): "ERROR"},
3425 )
3426 except Exception:
3427 # ignore to keep original exception
3428 pass
3429 # reraise original error
3430 raise
3431
3432 return kdu_instance
3433
3434 async def deploy_kdus(
3435 self,
3436 logging_text,
3437 nsr_id,
3438 nslcmop_id,
3439 db_vnfrs,
3440 db_vnfds,
3441 task_instantiation_info,
3442 ):
3443 # Launch kdus if present in the descriptor
3444
3445 k8scluster_id_2_uuic = {
3446 "helm-chart-v3": {},
3447 "helm-chart": {},
3448 "juju-bundle": {},
3449 }
3450
3451 async def _get_cluster_id(cluster_id, cluster_type):
3452 nonlocal k8scluster_id_2_uuic
3453 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
3454 return k8scluster_id_2_uuic[cluster_type][cluster_id]
3455
3456 # check if K8scluster is creating and wait look if previous tasks in process
3457 task_name, task_dependency = self.lcm_tasks.lookfor_related(
3458 "k8scluster", cluster_id
3459 )
3460 if task_dependency:
3461 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(
3462 task_name, cluster_id
3463 )
3464 self.logger.debug(logging_text + text)
3465 await asyncio.wait(task_dependency, timeout=3600)
3466
3467 db_k8scluster = self.db.get_one(
3468 "k8sclusters", {"_id": cluster_id}, fail_on_empty=False
3469 )
3470 if not db_k8scluster:
3471 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
3472
3473 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
3474 if not k8s_id:
3475 if cluster_type == "helm-chart-v3":
3476 try:
3477 # backward compatibility for existing clusters that have not been initialized for helm v3
3478 k8s_credentials = yaml.safe_dump(
3479 db_k8scluster.get("credentials")
3480 )
3481 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(
3482 k8s_credentials, reuse_cluster_uuid=cluster_id
3483 )
3484 db_k8scluster_update = {}
3485 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
3486 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
3487 db_k8scluster_update[
3488 "_admin.helm-chart-v3.created"
3489 ] = uninstall_sw
3490 db_k8scluster_update[
3491 "_admin.helm-chart-v3.operationalState"
3492 ] = "ENABLED"
3493 self.update_db_2(
3494 "k8sclusters", cluster_id, db_k8scluster_update
3495 )
3496 except Exception as e:
3497 self.logger.error(
3498 logging_text
3499 + "error initializing helm-v3 cluster: {}".format(str(e))
3500 )
3501 raise LcmException(
3502 "K8s cluster '{}' has not been initialized for '{}'".format(
3503 cluster_id, cluster_type
3504 )
3505 )
3506 else:
3507 raise LcmException(
3508 "K8s cluster '{}' has not been initialized for '{}'".format(
3509 cluster_id, cluster_type
3510 )
3511 )
3512 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
3513 return k8s_id
3514
3515 logging_text += "Deploy kdus: "
3516 step = ""
3517 try:
3518 db_nsr_update = {"_admin.deployed.K8s": []}
3519 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3520
3521 index = 0
3522 updated_cluster_list = []
3523 updated_v3_cluster_list = []
3524
3525 for vnfr_data in db_vnfrs.values():
3526 vca_id = self.get_vca_id(vnfr_data, {})
3527 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
3528 # Step 0: Prepare and set parameters
3529 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3530 vnfd_id = vnfr_data.get("vnfd-id")
3531 vnfd_with_id = find_in_list(
3532 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3533 )
3534 kdud = next(
3535 kdud
3536 for kdud in vnfd_with_id["kdu"]
3537 if kdud["name"] == kdur["kdu-name"]
3538 )
3539 namespace = kdur.get("k8s-namespace")
3540 kdu_deployment_name = kdur.get("kdu-deployment-name")
3541 if kdur.get("helm-chart"):
3542 kdumodel = kdur["helm-chart"]
3543 # Default version: helm3, if helm-version is v2 assign v2
3544 k8sclustertype = "helm-chart-v3"
3545 self.logger.debug("kdur: {}".format(kdur))
3546 if (
3547 kdur.get("helm-version")
3548 and kdur.get("helm-version") == "v2"
3549 ):
3550 k8sclustertype = "helm-chart"
3551 elif kdur.get("juju-bundle"):
3552 kdumodel = kdur["juju-bundle"]
3553 k8sclustertype = "juju-bundle"
3554 else:
3555 raise LcmException(
3556 "kdu type for kdu='{}.{}' is neither helm-chart nor "
3557 "juju-bundle. Maybe an old NBI version is running".format(
3558 vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]
3559 )
3560 )
3561 # check if kdumodel is a file and exists
3562 try:
3563 vnfd_with_id = find_in_list(
3564 db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id
3565 )
3566 storage = deep_get(vnfd_with_id, ("_admin", "storage"))
3567 if storage: # may be not present if vnfd has not artifacts
3568 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
3569 if storage["pkg-dir"]:
3570 filename = "{}/{}/{}s/{}".format(
3571 storage["folder"],
3572 storage["pkg-dir"],
3573 k8sclustertype,
3574 kdumodel,
3575 )
3576 else:
3577 filename = "{}/Scripts/{}s/{}".format(
3578 storage["folder"],
3579 k8sclustertype,
3580 kdumodel,
3581 )
3582 if self.fs.file_exists(
3583 filename, mode="file"
3584 ) or self.fs.file_exists(filename, mode="dir"):
3585 kdumodel = self.fs.path + filename
3586 except (asyncio.TimeoutError, asyncio.CancelledError):
3587 raise
3588 except Exception: # it is not a file
3589 pass
3590
3591 k8s_cluster_id = kdur["k8s-cluster"]["id"]
3592 step = "Synchronize repos for k8s cluster '{}'".format(
3593 k8s_cluster_id
3594 )
3595 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
3596
3597 # Synchronize repos
3598 if (
3599 k8sclustertype == "helm-chart"
3600 and cluster_uuid not in updated_cluster_list
3601 ) or (
3602 k8sclustertype == "helm-chart-v3"
3603 and cluster_uuid not in updated_v3_cluster_list
3604 ):
3605 del_repo_list, added_repo_dict = await asyncio.ensure_future(
3606 self.k8scluster_map[k8sclustertype].synchronize_repos(
3607 cluster_uuid=cluster_uuid
3608 )
3609 )
3610 if del_repo_list or added_repo_dict:
3611 if k8sclustertype == "helm-chart":
3612 unset = {
3613 "_admin.helm_charts_added." + item: None
3614 for item in del_repo_list
3615 }
3616 updated = {
3617 "_admin.helm_charts_added." + item: name
3618 for item, name in added_repo_dict.items()
3619 }
3620 updated_cluster_list.append(cluster_uuid)
3621 elif k8sclustertype == "helm-chart-v3":
3622 unset = {
3623 "_admin.helm_charts_v3_added." + item: None
3624 for item in del_repo_list
3625 }
3626 updated = {
3627 "_admin.helm_charts_v3_added." + item: name
3628 for item, name in added_repo_dict.items()
3629 }
3630 updated_v3_cluster_list.append(cluster_uuid)
3631 self.logger.debug(
3632 logging_text + "repos synchronized on k8s cluster "
3633 "'{}' to_delete: {}, to_add: {}".format(
3634 k8s_cluster_id, del_repo_list, added_repo_dict
3635 )
3636 )
3637 self.db.set_one(
3638 "k8sclusters",
3639 {"_id": k8s_cluster_id},
3640 updated,
3641 unset=unset,
3642 )
3643
3644 # Instantiate kdu
3645 step = "Instantiating KDU {}.{} in k8s cluster {}".format(
3646 vnfr_data["member-vnf-index-ref"],
3647 kdur["kdu-name"],
3648 k8s_cluster_id,
3649 )
3650 k8s_instance_info = {
3651 "kdu-instance": None,
3652 "k8scluster-uuid": cluster_uuid,
3653 "k8scluster-type": k8sclustertype,
3654 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
3655 "kdu-name": kdur["kdu-name"],
3656 "kdu-model": kdumodel,
3657 "namespace": namespace,
3658 "kdu-deployment-name": kdu_deployment_name,
3659 }
3660 db_path = "_admin.deployed.K8s.{}".format(index)
3661 db_nsr_update[db_path] = k8s_instance_info
3662 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3663 vnfd_with_id = find_in_list(
3664 db_vnfds, lambda vnf: vnf["_id"] == vnfd_id
3665 )
3666 task = asyncio.ensure_future(
3667 self._install_kdu(
3668 nsr_id,
3669 db_path,
3670 vnfr_data,
3671 kdu_index,
3672 kdud,
3673 vnfd_with_id,
3674 k8s_instance_info,
3675 k8params=desc_params,
3676 timeout=1800,
3677 vca_id=vca_id,
3678 )
3679 )
3680 self.lcm_tasks.register(
3681 "ns",
3682 nsr_id,
3683 nslcmop_id,
3684 "instantiate_KDU-{}".format(index),
3685 task,
3686 )
3687 task_instantiation_info[task] = "Deploying KDU {}".format(
3688 kdur["kdu-name"]
3689 )
3690
3691 index += 1
3692
3693 except (LcmException, asyncio.CancelledError):
3694 raise
3695 except Exception as e:
3696 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
3697 if isinstance(e, (N2VCException, DbException)):
3698 self.logger.error(logging_text + msg)
3699 else:
3700 self.logger.critical(logging_text + msg, exc_info=True)
3701 raise LcmException(msg)
3702 finally:
3703 if db_nsr_update:
3704 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3705
3706 def _deploy_n2vc(
3707 self,
3708 logging_text,
3709 db_nsr,
3710 db_vnfr,
3711 nslcmop_id,
3712 nsr_id,
3713 nsi_id,
3714 vnfd_id,
3715 vdu_id,
3716 kdu_name,
3717 member_vnf_index,
3718 vdu_index,
3719 vdu_name,
3720 deploy_params,
3721 descriptor_config,
3722 base_folder,
3723 task_instantiation_info,
3724 stage,
3725 ):
3726 # launch instantiate_N2VC in a asyncio task and register task object
3727 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
3728 # if not found, create one entry and update database
3729 # fill db_nsr._admin.deployed.VCA.<index>
3730
3731 self.logger.debug(
3732 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
3733 )
3734 if "execution-environment-list" in descriptor_config:
3735 ee_list = descriptor_config.get("execution-environment-list", [])
3736 elif "juju" in descriptor_config:
3737 ee_list = [descriptor_config] # ns charms
3738 else: # other types as script are not supported
3739 ee_list = []
3740
3741 for ee_item in ee_list:
3742 self.logger.debug(
3743 logging_text
3744 + "_deploy_n2vc ee_item juju={}, helm={}".format(
3745 ee_item.get("juju"), ee_item.get("helm-chart")
3746 )
3747 )
3748 ee_descriptor_id = ee_item.get("id")
3749 if ee_item.get("juju"):
3750 vca_name = ee_item["juju"].get("charm")
3751 vca_type = (
3752 "lxc_proxy_charm"
3753 if ee_item["juju"].get("charm") is not None
3754 else "native_charm"
3755 )
3756 if ee_item["juju"].get("cloud") == "k8s":
3757 vca_type = "k8s_proxy_charm"
3758 elif ee_item["juju"].get("proxy") is False:
3759 vca_type = "native_charm"
3760 elif ee_item.get("helm-chart"):
3761 vca_name = ee_item["helm-chart"]
3762 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
3763 vca_type = "helm"
3764 else:
3765 vca_type = "helm-v3"
3766 else:
3767 self.logger.debug(
3768 logging_text + "skipping non juju neither charm configuration"
3769 )
3770 continue
3771
3772 vca_index = -1
3773 for vca_index, vca_deployed in enumerate(
3774 db_nsr["_admin"]["deployed"]["VCA"]
3775 ):
3776 if not vca_deployed:
3777 continue
3778 if (
3779 vca_deployed.get("member-vnf-index") == member_vnf_index
3780 and vca_deployed.get("vdu_id") == vdu_id
3781 and vca_deployed.get("kdu_name") == kdu_name
3782 and vca_deployed.get("vdu_count_index", 0) == vdu_index
3783 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
3784 ):
3785 break
3786 else:
3787 # not found, create one.
3788 target = (
3789 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
3790 )
3791 if vdu_id:
3792 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
3793 elif kdu_name:
3794 target += "/kdu/{}".format(kdu_name)
3795 vca_deployed = {
3796 "target_element": target,
3797 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
3798 "member-vnf-index": member_vnf_index,
3799 "vdu_id": vdu_id,
3800 "kdu_name": kdu_name,
3801 "vdu_count_index": vdu_index,
3802 "operational-status": "init", # TODO revise
3803 "detailed-status": "", # TODO revise
3804 "step": "initial-deploy", # TODO revise
3805 "vnfd_id": vnfd_id,
3806 "vdu_name": vdu_name,
3807 "type": vca_type,
3808 "ee_descriptor_id": ee_descriptor_id,
3809 }
3810 vca_index += 1
3811
3812 # create VCA and configurationStatus in db
3813 db_dict = {
3814 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
3815 "configurationStatus.{}".format(vca_index): dict(),
3816 }
3817 self.update_db_2("nsrs", nsr_id, db_dict)
3818
3819 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
3820
3821 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
3822 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
3823 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
3824
3825 # Launch task
3826 task_n2vc = asyncio.ensure_future(
3827 self.instantiate_N2VC(
3828 logging_text=logging_text,
3829 vca_index=vca_index,
3830 nsi_id=nsi_id,
3831 db_nsr=db_nsr,
3832 db_vnfr=db_vnfr,
3833 vdu_id=vdu_id,
3834 kdu_name=kdu_name,
3835 vdu_index=vdu_index,
3836 deploy_params=deploy_params,
3837 config_descriptor=descriptor_config,
3838 base_folder=base_folder,
3839 nslcmop_id=nslcmop_id,
3840 stage=stage,
3841 vca_type=vca_type,
3842 vca_name=vca_name,
3843 ee_config_descriptor=ee_item,
3844 )
3845 )
3846 self.lcm_tasks.register(
3847 "ns",
3848 nsr_id,
3849 nslcmop_id,
3850 "instantiate_N2VC-{}".format(vca_index),
3851 task_n2vc,
3852 )
3853 task_instantiation_info[
3854 task_n2vc
3855 ] = self.task_name_deploy_vca + " {}.{}".format(
3856 member_vnf_index or "", vdu_id or ""
3857 )
3858
3859 @staticmethod
3860 def _create_nslcmop(nsr_id, operation, params):
3861 """
3862 Creates a ns-lcm-opp content to be stored at database.
3863 :param nsr_id: internal id of the instance
3864 :param operation: instantiate, terminate, scale, action, ...
3865 :param params: user parameters for the operation
3866 :return: dictionary following SOL005 format
3867 """
3868 # Raise exception if invalid arguments
3869 if not (nsr_id and operation and params):
3870 raise LcmException(
3871 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided"
3872 )
3873 now = time()
3874 _id = str(uuid4())
3875 nslcmop = {
3876 "id": _id,
3877 "_id": _id,
3878 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3879 "operationState": "PROCESSING",
3880 "statusEnteredTime": now,
3881 "nsInstanceId": nsr_id,
3882 "lcmOperationType": operation,
3883 "startTime": now,
3884 "isAutomaticInvocation": False,
3885 "operationParams": params,
3886 "isCancelPending": False,
3887 "links": {
3888 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3889 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3890 },
3891 }
3892 return nslcmop
3893
3894 def _format_additional_params(self, params):
3895 params = params or {}
3896 for key, value in params.items():
3897 if str(value).startswith("!!yaml "):
3898 params[key] = yaml.safe_load(value[7:])
3899 return params
3900
3901 def _get_terminate_primitive_params(self, seq, vnf_index):
3902 primitive = seq.get("name")
3903 primitive_params = {}
3904 params = {
3905 "member_vnf_index": vnf_index,
3906 "primitive": primitive,
3907 "primitive_params": primitive_params,
3908 }
3909 desc_params = {}
3910 return self._map_primitive_params(seq, params, desc_params)
3911
3912 # sub-operations
3913
3914 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3915 op = deep_get(db_nslcmop, ("_admin", "operations"), [])[op_index]
3916 if op.get("operationState") == "COMPLETED":
3917 # b. Skip sub-operation
3918 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3919 return self.SUBOPERATION_STATUS_SKIP
3920 else:
3921 # c. retry executing sub-operation
3922 # The sub-operation exists, and operationState != 'COMPLETED'
3923 # Update operationState = 'PROCESSING' to indicate a retry.
3924 operationState = "PROCESSING"
3925 detailed_status = "In progress"
3926 self._update_suboperation_status(
3927 db_nslcmop, op_index, operationState, detailed_status
3928 )
3929 # Return the sub-operation index
3930 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3931 # with arguments extracted from the sub-operation
3932 return op_index
3933
3934 # Find a sub-operation where all keys in a matching dictionary must match
3935 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3936 def _find_suboperation(self, db_nslcmop, match):
3937 if db_nslcmop and match:
3938 op_list = db_nslcmop.get("_admin", {}).get("operations", [])
3939 for i, op in enumerate(op_list):
3940 if all(op.get(k) == match[k] for k in match):
3941 return i
3942 return self.SUBOPERATION_STATUS_NOT_FOUND
3943
3944 # Update status for a sub-operation given its index
3945 def _update_suboperation_status(
3946 self, db_nslcmop, op_index, operationState, detailed_status
3947 ):
3948 # Update DB for HA tasks
3949 q_filter = {"_id": db_nslcmop["_id"]}
3950 update_dict = {
3951 "_admin.operations.{}.operationState".format(op_index): operationState,
3952 "_admin.operations.{}.detailed-status".format(op_index): detailed_status,
3953 }
3954 self.db.set_one(
3955 "nslcmops", q_filter=q_filter, update_dict=update_dict, fail_on_empty=False
3956 )
3957
3958 # Add sub-operation, return the index of the added sub-operation
3959 # Optionally, set operationState, detailed-status, and operationType
3960 # Status and type are currently set for 'scale' sub-operations:
3961 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3962 # 'detailed-status' : status message
3963 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3964 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3965 def _add_suboperation(
3966 self,
3967 db_nslcmop,
3968 vnf_index,
3969 vdu_id,
3970 vdu_count_index,
3971 vdu_name,
3972 primitive,
3973 mapped_primitive_params,
3974 operationState=None,
3975 detailed_status=None,
3976 operationType=None,
3977 RO_nsr_id=None,
3978 RO_scaling_info=None,
3979 ):
3980 if not db_nslcmop:
3981 return self.SUBOPERATION_STATUS_NOT_FOUND
3982 # Get the "_admin.operations" list, if it exists
3983 db_nslcmop_admin = db_nslcmop.get("_admin", {})
3984 op_list = db_nslcmop_admin.get("operations")
3985 # Create or append to the "_admin.operations" list
3986 new_op = {
3987 "member_vnf_index": vnf_index,
3988 "vdu_id": vdu_id,
3989 "vdu_count_index": vdu_count_index,
3990 "primitive": primitive,
3991 "primitive_params": mapped_primitive_params,
3992 }
3993 if operationState:
3994 new_op["operationState"] = operationState
3995 if detailed_status:
3996 new_op["detailed-status"] = detailed_status
3997 if operationType:
3998 new_op["lcmOperationType"] = operationType
3999 if RO_nsr_id:
4000 new_op["RO_nsr_id"] = RO_nsr_id
4001 if RO_scaling_info:
4002 new_op["RO_scaling_info"] = RO_scaling_info
4003 if not op_list:
4004 # No existing operations, create key 'operations' with current operation as first list element
4005 db_nslcmop_admin.update({"operations": [new_op]})
4006 op_list = db_nslcmop_admin.get("operations")
4007 else:
4008 # Existing operations, append operation to list
4009 op_list.append(new_op)
4010
4011 db_nslcmop_update = {"_admin.operations": op_list}
4012 self.update_db_2("nslcmops", db_nslcmop["_id"], db_nslcmop_update)
4013 op_index = len(op_list) - 1
4014 return op_index
4015
4016 # Helper methods for scale() sub-operations
4017
4018 # pre-scale/post-scale:
4019 # Check for 3 different cases:
4020 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
4021 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
4022 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
4023 def _check_or_add_scale_suboperation(
4024 self,
4025 db_nslcmop,
4026 vnf_index,
4027 vnf_config_primitive,
4028 primitive_params,
4029 operationType,
4030 RO_nsr_id=None,
4031 RO_scaling_info=None,
4032 ):
4033 # Find this sub-operation
4034 if RO_nsr_id and RO_scaling_info:
4035 operationType = "SCALE-RO"
4036 match = {
4037 "member_vnf_index": vnf_index,
4038 "RO_nsr_id": RO_nsr_id,
4039 "RO_scaling_info": RO_scaling_info,
4040 }
4041 else:
4042 match = {
4043 "member_vnf_index": vnf_index,
4044 "primitive": vnf_config_primitive,
4045 "primitive_params": primitive_params,
4046 "lcmOperationType": operationType,
4047 }
4048 op_index = self._find_suboperation(db_nslcmop, match)
4049 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
4050 # a. New sub-operation
4051 # The sub-operation does not exist, add it.
4052 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
4053 # The following parameters are set to None for all kind of scaling:
4054 vdu_id = None
4055 vdu_count_index = None
4056 vdu_name = None
4057 if RO_nsr_id and RO_scaling_info:
4058 vnf_config_primitive = None
4059 primitive_params = None
4060 else:
4061 RO_nsr_id = None
4062 RO_scaling_info = None
4063 # Initial status for sub-operation
4064 operationState = "PROCESSING"
4065 detailed_status = "In progress"
4066 # Add sub-operation for pre/post-scaling (zero or more operations)
4067 self._add_suboperation(
4068 db_nslcmop,
4069 vnf_index,
4070 vdu_id,
4071 vdu_count_index,
4072 vdu_name,
4073 vnf_config_primitive,
4074 primitive_params,
4075 operationState,
4076 detailed_status,
4077 operationType,
4078 RO_nsr_id,
4079 RO_scaling_info,
4080 )
4081 return self.SUBOPERATION_STATUS_NEW
4082 else:
4083 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
4084 # or op_index (operationState != 'COMPLETED')
4085 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
4086
4087 # Function to return execution_environment id
4088
4089 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
4090 # TODO vdu_index_count
4091 for vca in vca_deployed_list:
4092 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
4093 return vca["ee_id"]
4094
4095 async def destroy_N2VC(
4096 self,
4097 logging_text,
4098 db_nslcmop,
4099 vca_deployed,
4100 config_descriptor,
4101 vca_index,
4102 destroy_ee=True,
4103 exec_primitives=True,
4104 scaling_in=False,
4105 vca_id: str = None,
4106 ):
4107 """
4108 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
4109 :param logging_text:
4110 :param db_nslcmop:
4111 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
4112 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
4113 :param vca_index: index in the database _admin.deployed.VCA
4114 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
4115 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
4116 not executed properly
4117 :param scaling_in: True destroys the application, False destroys the model
4118 :return: None or exception
4119 """
4120
4121 self.logger.debug(
4122 logging_text
4123 + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
4124 vca_index, vca_deployed, config_descriptor, destroy_ee
4125 )
4126 )
4127
4128 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
4129
4130 # execute terminate_primitives
4131 if exec_primitives:
4132 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
4133 config_descriptor.get("terminate-config-primitive"),
4134 vca_deployed.get("ee_descriptor_id"),
4135 )
4136 vdu_id = vca_deployed.get("vdu_id")
4137 vdu_count_index = vca_deployed.get("vdu_count_index")
4138 vdu_name = vca_deployed.get("vdu_name")
4139 vnf_index = vca_deployed.get("member-vnf-index")
4140 if terminate_primitives and vca_deployed.get("needed_terminate"):
4141 for seq in terminate_primitives:
4142 # For each sequence in list, get primitive and call _ns_execute_primitive()
4143 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
4144 vnf_index, seq.get("name")
4145 )
4146 self.logger.debug(logging_text + step)
4147 # Create the primitive for each sequence, i.e. "primitive": "touch"
4148 primitive = seq.get("name")
4149 mapped_primitive_params = self._get_terminate_primitive_params(
4150 seq, vnf_index
4151 )
4152
4153 # Add sub-operation
4154 self._add_suboperation(
4155 db_nslcmop,
4156 vnf_index,
4157 vdu_id,
4158 vdu_count_index,
4159 vdu_name,
4160 primitive,
4161 mapped_primitive_params,
4162 )
4163 # Sub-operations: Call _ns_execute_primitive() instead of action()
4164 try:
4165 result, result_detail = await self._ns_execute_primitive(
4166 vca_deployed["ee_id"],
4167 primitive,
4168 mapped_primitive_params,
4169 vca_type=vca_type,
4170 vca_id=vca_id,
4171 )
4172 except LcmException:
4173 # this happens when VCA is not deployed. In this case it is not needed to terminate
4174 continue
4175 result_ok = ["COMPLETED", "PARTIALLY_COMPLETED"]
4176 if result not in result_ok:
4177 raise LcmException(
4178 "terminate_primitive {} for vnf_member_index={} fails with "
4179 "error {}".format(seq.get("name"), vnf_index, result_detail)
4180 )
4181 # set that this VCA do not need terminated
4182 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(
4183 vca_index
4184 )
4185 self.update_db_2(
4186 "nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False}
4187 )
4188
4189 # Delete Prometheus Jobs if any
4190 # This uses NSR_ID, so it will destroy any jobs under this index
4191 self.db.del_list("prometheus_jobs", {"nsr_id": db_nslcmop["nsInstanceId"]})
4192
4193 if destroy_ee:
4194 await self.vca_map[vca_type].delete_execution_environment(
4195 vca_deployed["ee_id"],
4196 scaling_in=scaling_in,
4197 vca_type=vca_type,
4198 vca_id=vca_id,
4199 )
4200
4201 async def _delete_all_N2VC(self, db_nsr: dict, vca_id: str = None):
4202 self._write_all_config_status(db_nsr=db_nsr, status="TERMINATING")
4203 namespace = "." + db_nsr["_id"]
4204 try:
4205 await self.n2vc.delete_namespace(
4206 namespace=namespace,
4207 total_timeout=self.timeout_charm_delete,
4208 vca_id=vca_id,
4209 )
4210 except N2VCNotFound: # already deleted. Skip
4211 pass
4212 self._write_all_config_status(db_nsr=db_nsr, status="DELETED")
4213
4214 async def _terminate_RO(
4215 self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4216 ):
4217 """
4218 Terminates a deployment from RO
4219 :param logging_text:
4220 :param nsr_deployed: db_nsr._admin.deployed
4221 :param nsr_id:
4222 :param nslcmop_id:
4223 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
4224 this method will update only the index 2, but it will write on database the concatenated content of the list
4225 :return:
4226 """
4227 db_nsr_update = {}
4228 failed_detail = []
4229 ro_nsr_id = ro_delete_action = None
4230 if nsr_deployed and nsr_deployed.get("RO"):
4231 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
4232 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
4233 try:
4234 if ro_nsr_id:
4235 stage[2] = "Deleting ns from VIM."
4236 db_nsr_update["detailed-status"] = " ".join(stage)
4237 self._write_op_status(nslcmop_id, stage)
4238 self.logger.debug(logging_text + stage[2])
4239 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4240 self._write_op_status(nslcmop_id, stage)
4241 desc = await self.RO.delete("ns", ro_nsr_id)
4242 ro_delete_action = desc["action_id"]
4243 db_nsr_update[
4244 "_admin.deployed.RO.nsr_delete_action_id"
4245 ] = ro_delete_action
4246 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4247 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4248 if ro_delete_action:
4249 # wait until NS is deleted from VIM
4250 stage[2] = "Waiting ns deleted from VIM."
4251 detailed_status_old = None
4252 self.logger.debug(
4253 logging_text
4254 + stage[2]
4255 + " RO_id={} ro_delete_action={}".format(
4256 ro_nsr_id, ro_delete_action
4257 )
4258 )
4259 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4260 self._write_op_status(nslcmop_id, stage)
4261
4262 delete_timeout = 20 * 60 # 20 minutes
4263 while delete_timeout > 0:
4264 desc = await self.RO.show(
4265 "ns",
4266 item_id_name=ro_nsr_id,
4267 extra_item="action",
4268 extra_item_id=ro_delete_action,
4269 )
4270
4271 # deploymentStatus
4272 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4273
4274 ns_status, ns_status_info = self.RO.check_action_status(desc)
4275 if ns_status == "ERROR":
4276 raise ROclient.ROClientException(ns_status_info)
4277 elif ns_status == "BUILD":
4278 stage[2] = "Deleting from VIM {}".format(ns_status_info)
4279 elif ns_status == "ACTIVE":
4280 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4281 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4282 break
4283 else:
4284 assert (
4285 False
4286 ), "ROclient.check_action_status returns unknown {}".format(
4287 ns_status
4288 )
4289 if stage[2] != detailed_status_old:
4290 detailed_status_old = stage[2]
4291 db_nsr_update["detailed-status"] = " ".join(stage)
4292 self._write_op_status(nslcmop_id, stage)
4293 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4294 await asyncio.sleep(5, loop=self.loop)
4295 delete_timeout -= 5
4296 else: # delete_timeout <= 0:
4297 raise ROclient.ROClientException(
4298 "Timeout waiting ns deleted from VIM"
4299 )
4300
4301 except Exception as e:
4302 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4303 if (
4304 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4305 ): # not found
4306 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
4307 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
4308 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
4309 self.logger.debug(
4310 logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id)
4311 )
4312 elif (
4313 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4314 ): # conflict
4315 failed_detail.append("delete conflict: {}".format(e))
4316 self.logger.debug(
4317 logging_text
4318 + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e)
4319 )
4320 else:
4321 failed_detail.append("delete error: {}".format(e))
4322 self.logger.error(
4323 logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e)
4324 )
4325
4326 # Delete nsd
4327 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
4328 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
4329 try:
4330 stage[2] = "Deleting nsd from RO."
4331 db_nsr_update["detailed-status"] = " ".join(stage)
4332 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4333 self._write_op_status(nslcmop_id, stage)
4334 await self.RO.delete("nsd", ro_nsd_id)
4335 self.logger.debug(
4336 logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id)
4337 )
4338 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4339 except Exception as e:
4340 if (
4341 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4342 ): # not found
4343 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
4344 self.logger.debug(
4345 logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id)
4346 )
4347 elif (
4348 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4349 ): # conflict
4350 failed_detail.append(
4351 "ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e)
4352 )
4353 self.logger.debug(logging_text + failed_detail[-1])
4354 else:
4355 failed_detail.append(
4356 "ro_nsd_id={} delete error: {}".format(ro_nsd_id, e)
4357 )
4358 self.logger.error(logging_text + failed_detail[-1])
4359
4360 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
4361 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
4362 if not vnf_deployed or not vnf_deployed["id"]:
4363 continue
4364 try:
4365 ro_vnfd_id = vnf_deployed["id"]
4366 stage[
4367 2
4368 ] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
4369 vnf_deployed["member-vnf-index"], ro_vnfd_id
4370 )
4371 db_nsr_update["detailed-status"] = " ".join(stage)
4372 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4373 self._write_op_status(nslcmop_id, stage)
4374 await self.RO.delete("vnfd", ro_vnfd_id)
4375 self.logger.debug(
4376 logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id)
4377 )
4378 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
4379 except Exception as e:
4380 if (
4381 isinstance(e, ROclient.ROClientException) and e.http_code == 404
4382 ): # not found
4383 db_nsr_update[
4384 "_admin.deployed.RO.vnfd.{}.id".format(index)
4385 ] = None
4386 self.logger.debug(
4387 logging_text
4388 + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id)
4389 )
4390 elif (
4391 isinstance(e, ROclient.ROClientException) and e.http_code == 409
4392 ): # conflict
4393 failed_detail.append(
4394 "ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e)
4395 )
4396 self.logger.debug(logging_text + failed_detail[-1])
4397 else:
4398 failed_detail.append(
4399 "ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e)
4400 )
4401 self.logger.error(logging_text + failed_detail[-1])
4402
4403 if failed_detail:
4404 stage[2] = "Error deleting from VIM"
4405 else:
4406 stage[2] = "Deleted from VIM"
4407 db_nsr_update["detailed-status"] = " ".join(stage)
4408 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4409 self._write_op_status(nslcmop_id, stage)
4410
4411 if failed_detail:
4412 raise LcmException("; ".join(failed_detail))
4413
4414 async def terminate(self, nsr_id, nslcmop_id):
4415 # Try to lock HA task here
4416 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
4417 if not task_is_locked_by_me:
4418 return
4419
4420 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
4421 self.logger.debug(logging_text + "Enter")
4422 timeout_ns_terminate = self.timeout_ns_terminate
4423 db_nsr = None
4424 db_nslcmop = None
4425 operation_params = None
4426 exc = None
4427 error_list = [] # annotates all failed error messages
4428 db_nslcmop_update = {}
4429 autoremove = False # autoremove after terminated
4430 tasks_dict_info = {}
4431 db_nsr_update = {}
4432 stage = [
4433 "Stage 1/3: Preparing task.",
4434 "Waiting for previous operations to terminate.",
4435 "",
4436 ]
4437 # ^ contains [stage, step, VIM-status]
4438 try:
4439 # wait for any previous tasks in process
4440 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
4441
4442 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
4443 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4444 operation_params = db_nslcmop.get("operationParams") or {}
4445 if operation_params.get("timeout_ns_terminate"):
4446 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
4447 stage[1] = "Getting nsr={} from db.".format(nsr_id)
4448 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4449
4450 db_nsr_update["operational-status"] = "terminating"
4451 db_nsr_update["config-status"] = "terminating"
4452 self._write_ns_status(
4453 nsr_id=nsr_id,
4454 ns_state="TERMINATING",
4455 current_operation="TERMINATING",
4456 current_operation_id=nslcmop_id,
4457 other_update=db_nsr_update,
4458 )
4459 self._write_op_status(op_id=nslcmop_id, queuePosition=0, stage=stage)
4460 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
4461 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
4462 return
4463
4464 stage[1] = "Getting vnf descriptors from db."
4465 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
4466 db_vnfrs_dict = {
4467 db_vnfr["member-vnf-index-ref"]: db_vnfr for db_vnfr in db_vnfrs_list
4468 }
4469 db_vnfds_from_id = {}
4470 db_vnfds_from_member_index = {}
4471 # Loop over VNFRs
4472 for vnfr in db_vnfrs_list:
4473 vnfd_id = vnfr["vnfd-id"]
4474 if vnfd_id not in db_vnfds_from_id:
4475 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4476 db_vnfds_from_id[vnfd_id] = vnfd
4477 db_vnfds_from_member_index[
4478 vnfr["member-vnf-index-ref"]
4479 ] = db_vnfds_from_id[vnfd_id]
4480
4481 # Destroy individual execution environments when there are terminating primitives.
4482 # Rest of EE will be deleted at once
4483 # TODO - check before calling _destroy_N2VC
4484 # if not operation_params.get("skip_terminate_primitives"):#
4485 # or not vca.get("needed_terminate"):
4486 stage[0] = "Stage 2/3 execute terminating primitives."
4487 self.logger.debug(logging_text + stage[0])
4488 stage[1] = "Looking execution environment that needs terminate."
4489 self.logger.debug(logging_text + stage[1])
4490
4491 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
4492 config_descriptor = None
4493 vca_member_vnf_index = vca.get("member-vnf-index")
4494 vca_id = self.get_vca_id(
4495 db_vnfrs_dict.get(vca_member_vnf_index)
4496 if vca_member_vnf_index
4497 else None,
4498 db_nsr,
4499 )
4500 if not vca or not vca.get("ee_id"):
4501 continue
4502 if not vca.get("member-vnf-index"):
4503 # ns
4504 config_descriptor = db_nsr.get("ns-configuration")
4505 elif vca.get("vdu_id"):
4506 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4507 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4508 elif vca.get("kdu_name"):
4509 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4510 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4511 else:
4512 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
4513 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4514 vca_type = vca.get("type")
4515 exec_terminate_primitives = not operation_params.get(
4516 "skip_terminate_primitives"
4517 ) and vca.get("needed_terminate")
4518 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
4519 # pending native charms
4520 destroy_ee = (
4521 True if vca_type in ("helm", "helm-v3", "native_charm") else False
4522 )
4523 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
4524 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
4525 task = asyncio.ensure_future(
4526 self.destroy_N2VC(
4527 logging_text,
4528 db_nslcmop,
4529 vca,
4530 config_descriptor,
4531 vca_index,
4532 destroy_ee,
4533 exec_terminate_primitives,
4534 vca_id=vca_id,
4535 )
4536 )
4537 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4538
4539 # wait for pending tasks of terminate primitives
4540 if tasks_dict_info:
4541 self.logger.debug(
4542 logging_text
4543 + "Waiting for tasks {}".format(list(tasks_dict_info.keys()))
4544 )
4545 error_list = await self._wait_for_tasks(
4546 logging_text,
4547 tasks_dict_info,
4548 min(self.timeout_charm_delete, timeout_ns_terminate),
4549 stage,
4550 nslcmop_id,
4551 )
4552 tasks_dict_info.clear()
4553 if error_list:
4554 return # raise LcmException("; ".join(error_list))
4555
4556 # remove All execution environments at once
4557 stage[0] = "Stage 3/3 delete all."
4558
4559 if nsr_deployed.get("VCA"):
4560 stage[1] = "Deleting all execution environments."
4561 self.logger.debug(logging_text + stage[1])
4562 vca_id = self.get_vca_id({}, db_nsr)
4563 task_delete_ee = asyncio.ensure_future(
4564 asyncio.wait_for(
4565 self._delete_all_N2VC(db_nsr=db_nsr, vca_id=vca_id),
4566 timeout=self.timeout_charm_delete,
4567 )
4568 )
4569 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
4570 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
4571
4572 # Delete from k8scluster
4573 stage[1] = "Deleting KDUs."
4574 self.logger.debug(logging_text + stage[1])
4575 # print(nsr_deployed)
4576 for kdu in get_iterable(nsr_deployed, "K8s"):
4577 if not kdu or not kdu.get("kdu-instance"):
4578 continue
4579 kdu_instance = kdu.get("kdu-instance")
4580 if kdu.get("k8scluster-type") in self.k8scluster_map:
4581 # TODO: Uninstall kdu instances taking into account they could be deployed in different VIMs
4582 vca_id = self.get_vca_id({}, db_nsr)
4583 task_delete_kdu_instance = asyncio.ensure_future(
4584 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
4585 cluster_uuid=kdu.get("k8scluster-uuid"),
4586 kdu_instance=kdu_instance,
4587 vca_id=vca_id,
4588 namespace=kdu.get("namespace"),
4589 )
4590 )
4591 else:
4592 self.logger.error(
4593 logging_text
4594 + "Unknown k8s deployment type {}".format(
4595 kdu.get("k8scluster-type")
4596 )
4597 )
4598 continue
4599 tasks_dict_info[
4600 task_delete_kdu_instance
4601 ] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
4602
4603 # remove from RO
4604 stage[1] = "Deleting ns from VIM."
4605 if self.ng_ro:
4606 task_delete_ro = asyncio.ensure_future(
4607 self._terminate_ng_ro(
4608 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4609 )
4610 )
4611 else:
4612 task_delete_ro = asyncio.ensure_future(
4613 self._terminate_RO(
4614 logging_text, nsr_deployed, nsr_id, nslcmop_id, stage
4615 )
4616 )
4617 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
4618
4619 # rest of staff will be done at finally
4620
4621 except (
4622 ROclient.ROClientException,
4623 DbException,
4624 LcmException,
4625 N2VCException,
4626 ) as e:
4627 self.logger.error(logging_text + "Exit Exception {}".format(e))
4628 exc = e
4629 except asyncio.CancelledError:
4630 self.logger.error(
4631 logging_text + "Cancelled Exception while '{}'".format(stage[1])
4632 )
4633 exc = "Operation was cancelled"
4634 except Exception as e:
4635 exc = traceback.format_exc()
4636 self.logger.critical(
4637 logging_text + "Exit Exception while '{}': {}".format(stage[1], e),
4638 exc_info=True,
4639 )
4640 finally:
4641 if exc:
4642 error_list.append(str(exc))
4643 try:
4644 # wait for pending tasks
4645 if tasks_dict_info:
4646 stage[1] = "Waiting for terminate pending tasks."
4647 self.logger.debug(logging_text + stage[1])
4648 error_list += await self._wait_for_tasks(
4649 logging_text,
4650 tasks_dict_info,
4651 timeout_ns_terminate,
4652 stage,
4653 nslcmop_id,
4654 )
4655 stage[1] = stage[2] = ""
4656 except asyncio.CancelledError:
4657 error_list.append("Cancelled")
4658 # TODO cancell all tasks
4659 except Exception as exc:
4660 error_list.append(str(exc))
4661 # update status at database
4662 if error_list:
4663 error_detail = "; ".join(error_list)
4664 # self.logger.error(logging_text + error_detail)
4665 error_description_nslcmop = "{} Detail: {}".format(
4666 stage[0], error_detail
4667 )
4668 error_description_nsr = "Operation: TERMINATING.{}, {}.".format(
4669 nslcmop_id, stage[0]
4670 )
4671
4672 db_nsr_update["operational-status"] = "failed"
4673 db_nsr_update["detailed-status"] = (
4674 error_description_nsr + " Detail: " + error_detail
4675 )
4676 db_nslcmop_update["detailed-status"] = error_detail
4677 nslcmop_operation_state = "FAILED"
4678 ns_state = "BROKEN"
4679 else:
4680 error_detail = None
4681 error_description_nsr = error_description_nslcmop = None
4682 ns_state = "NOT_INSTANTIATED"
4683 db_nsr_update["operational-status"] = "terminated"
4684 db_nsr_update["detailed-status"] = "Done"
4685 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
4686 db_nslcmop_update["detailed-status"] = "Done"
4687 nslcmop_operation_state = "COMPLETED"
4688
4689 if db_nsr:
4690 self._write_ns_status(
4691 nsr_id=nsr_id,
4692 ns_state=ns_state,
4693 current_operation="IDLE",
4694 current_operation_id=None,
4695 error_description=error_description_nsr,
4696 error_detail=error_detail,
4697 other_update=db_nsr_update,
4698 )
4699 self._write_op_status(
4700 op_id=nslcmop_id,
4701 stage="",
4702 error_message=error_description_nslcmop,
4703 operation_state=nslcmop_operation_state,
4704 other_update=db_nslcmop_update,
4705 )
4706 if ns_state == "NOT_INSTANTIATED":
4707 try:
4708 self.db.set_list(
4709 "vnfrs",
4710 {"nsr-id-ref": nsr_id},
4711 {"_admin.nsState": "NOT_INSTANTIATED"},
4712 )
4713 except DbException as e:
4714 self.logger.warn(
4715 logging_text
4716 + "Error writing VNFR status for nsr-id-ref: {} -> {}".format(
4717 nsr_id, e
4718 )
4719 )
4720 if operation_params:
4721 autoremove = operation_params.get("autoremove", False)
4722 if nslcmop_operation_state:
4723 try:
4724 await self.msg.aiowrite(
4725 "ns",
4726 "terminated",
4727 {
4728 "nsr_id": nsr_id,
4729 "nslcmop_id": nslcmop_id,
4730 "operationState": nslcmop_operation_state,
4731 "autoremove": autoremove,
4732 },
4733 loop=self.loop,
4734 )
4735 except Exception as e:
4736 self.logger.error(
4737 logging_text + "kafka_write notification Exception {}".format(e)
4738 )
4739
4740 self.logger.debug(logging_text + "Exit")
4741 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
4742
4743 async def _wait_for_tasks(
4744 self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None
4745 ):
4746 time_start = time()
4747 error_detail_list = []
4748 error_list = []
4749 pending_tasks = list(created_tasks_info.keys())
4750 num_tasks = len(pending_tasks)
4751 num_done = 0
4752 stage[1] = "{}/{}.".format(num_done, num_tasks)
4753 self._write_op_status(nslcmop_id, stage)
4754 while pending_tasks:
4755 new_error = None
4756 _timeout = timeout + time_start - time()
4757 done, pending_tasks = await asyncio.wait(
4758 pending_tasks, timeout=_timeout, return_when=asyncio.FIRST_COMPLETED
4759 )
4760 num_done += len(done)
4761 if not done: # Timeout
4762 for task in pending_tasks:
4763 new_error = created_tasks_info[task] + ": Timeout"
4764 error_detail_list.append(new_error)
4765 error_list.append(new_error)
4766 break
4767 for task in done:
4768 if task.cancelled():
4769 exc = "Cancelled"
4770 else:
4771 exc = task.exception()
4772 if exc:
4773 if isinstance(exc, asyncio.TimeoutError):
4774 exc = "Timeout"
4775 new_error = created_tasks_info[task] + ": {}".format(exc)
4776 error_list.append(created_tasks_info[task])
4777 error_detail_list.append(new_error)
4778 if isinstance(
4779 exc,
4780 (
4781 str,
4782 DbException,
4783 N2VCException,
4784 ROclient.ROClientException,
4785 LcmException,
4786 K8sException,
4787 NgRoException,
4788 ),
4789 ):
4790 self.logger.error(logging_text + new_error)
4791 else:
4792 exc_traceback = "".join(
4793 traceback.format_exception(None, exc, exc.__traceback__)
4794 )
4795 self.logger.error(
4796 logging_text
4797 + created_tasks_info[task]
4798 + " "
4799 + exc_traceback
4800 )
4801 else:
4802 self.logger.debug(
4803 logging_text + created_tasks_info[task] + ": Done"
4804 )
4805 stage[1] = "{}/{}.".format(num_done, num_tasks)
4806 if new_error:
4807 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
4808 if nsr_id: # update also nsr
4809 self.update_db_2(
4810 "nsrs",
4811 nsr_id,
4812 {
4813 "errorDescription": "Error at: " + ", ".join(error_list),
4814 "errorDetail": ". ".join(error_detail_list),
4815 },
4816 )
4817 self._write_op_status(nslcmop_id, stage)
4818 return error_detail_list
4819
4820 @staticmethod
4821 def _map_primitive_params(primitive_desc, params, instantiation_params):
4822 """
4823 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
4824 The default-value is used. If it is between < > it look for a value at instantiation_params
4825 :param primitive_desc: portion of VNFD/NSD that describes primitive
4826 :param params: Params provided by user
4827 :param instantiation_params: Instantiation params provided by user
4828 :return: a dictionary with the calculated params
4829 """
4830 calculated_params = {}
4831 for parameter in primitive_desc.get("parameter", ()):
4832 param_name = parameter["name"]
4833 if param_name in params:
4834 calculated_params[param_name] = params[param_name]
4835 elif "default-value" in parameter or "value" in parameter:
4836 if "value" in parameter:
4837 calculated_params[param_name] = parameter["value"]
4838 else:
4839 calculated_params[param_name] = parameter["default-value"]
4840 if (
4841 isinstance(calculated_params[param_name], str)
4842 and calculated_params[param_name].startswith("<")
4843 and calculated_params[param_name].endswith(">")
4844 ):
4845 if calculated_params[param_name][1:-1] in instantiation_params:
4846 calculated_params[param_name] = instantiation_params[
4847 calculated_params[param_name][1:-1]
4848 ]
4849 else:
4850 raise LcmException(
4851 "Parameter {} needed to execute primitive {} not provided".format(
4852 calculated_params[param_name], primitive_desc["name"]
4853 )
4854 )
4855 else:
4856 raise LcmException(
4857 "Parameter {} needed to execute primitive {} not provided".format(
4858 param_name, primitive_desc["name"]
4859 )
4860 )
4861
4862 if isinstance(calculated_params[param_name], (dict, list, tuple)):
4863 calculated_params[param_name] = yaml.safe_dump(
4864 calculated_params[param_name], default_flow_style=True, width=256
4865 )
4866 elif isinstance(calculated_params[param_name], str) and calculated_params[
4867 param_name
4868 ].startswith("!!yaml "):
4869 calculated_params[param_name] = calculated_params[param_name][7:]
4870 if parameter.get("data-type") == "INTEGER":
4871 try:
4872 calculated_params[param_name] = int(calculated_params[param_name])
4873 except ValueError: # error converting string to int
4874 raise LcmException(
4875 "Parameter {} of primitive {} must be integer".format(
4876 param_name, primitive_desc["name"]
4877 )
4878 )
4879 elif parameter.get("data-type") == "BOOLEAN":
4880 calculated_params[param_name] = not (
4881 (str(calculated_params[param_name])).lower() == "false"
4882 )
4883
4884 # add always ns_config_info if primitive name is config
4885 if primitive_desc["name"] == "config":
4886 if "ns_config_info" in instantiation_params:
4887 calculated_params["ns_config_info"] = instantiation_params[
4888 "ns_config_info"
4889 ]
4890 return calculated_params
4891
4892 def _look_for_deployed_vca(
4893 self,
4894 deployed_vca,
4895 member_vnf_index,
4896 vdu_id,
4897 vdu_count_index,
4898 kdu_name=None,
4899 ee_descriptor_id=None,
4900 ):
4901 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
4902 for vca in deployed_vca:
4903 if not vca:
4904 continue
4905 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
4906 continue
4907 if (
4908 vdu_count_index is not None
4909 and vdu_count_index != vca["vdu_count_index"]
4910 ):
4911 continue
4912 if kdu_name and kdu_name != vca["kdu_name"]:
4913 continue
4914 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
4915 continue
4916 break
4917 else:
4918 # vca_deployed not found
4919 raise LcmException(
4920 "charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
4921 " is not deployed".format(
4922 member_vnf_index,
4923 vdu_id,
4924 vdu_count_index,
4925 kdu_name,
4926 ee_descriptor_id,
4927 )
4928 )
4929 # get ee_id
4930 ee_id = vca.get("ee_id")
4931 vca_type = vca.get(
4932 "type", "lxc_proxy_charm"
4933 ) # default value for backward compatibility - proxy charm
4934 if not ee_id:
4935 raise LcmException(
4936 "charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
4937 "execution environment".format(
4938 member_vnf_index, vdu_id, kdu_name, vdu_count_index
4939 )
4940 )
4941 return ee_id, vca_type
4942
4943 async def _ns_execute_primitive(
4944 self,
4945 ee_id,
4946 primitive,
4947 primitive_params,
4948 retries=0,
4949 retries_interval=30,
4950 timeout=None,
4951 vca_type=None,
4952 db_dict=None,
4953 vca_id: str = None,
4954 ) -> (str, str):
4955 try:
4956 if primitive == "config":
4957 primitive_params = {"params": primitive_params}
4958
4959 vca_type = vca_type or "lxc_proxy_charm"
4960
4961 while retries >= 0:
4962 try:
4963 output = await asyncio.wait_for(
4964 self.vca_map[vca_type].exec_primitive(
4965 ee_id=ee_id,
4966 primitive_name=primitive,
4967 params_dict=primitive_params,
4968 progress_timeout=self.timeout_progress_primitive,
4969 total_timeout=self.timeout_primitive,
4970 db_dict=db_dict,
4971 vca_id=vca_id,
4972 vca_type=vca_type,
4973 ),
4974 timeout=timeout or self.timeout_primitive,
4975 )
4976 # execution was OK
4977 break
4978 except asyncio.CancelledError:
4979 raise
4980 except Exception as e: # asyncio.TimeoutError
4981 if isinstance(e, asyncio.TimeoutError):
4982 e = "Timeout"
4983 retries -= 1
4984 if retries >= 0:
4985 self.logger.debug(
4986 "Error executing action {} on {} -> {}".format(
4987 primitive, ee_id, e
4988 )
4989 )
4990 # wait and retry
4991 await asyncio.sleep(retries_interval, loop=self.loop)
4992 else:
4993 return "FAILED", str(e)
4994
4995 return "COMPLETED", output
4996
4997 except (LcmException, asyncio.CancelledError):
4998 raise
4999 except Exception as e:
5000 return "FAIL", "Error executing action {}: {}".format(primitive, e)
5001
5002 async def vca_status_refresh(self, nsr_id, nslcmop_id):
5003 """
5004 Updating the vca_status with latest juju information in nsrs record
5005 :param: nsr_id: Id of the nsr
5006 :param: nslcmop_id: Id of the nslcmop
5007 :return: None
5008 """
5009
5010 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
5011 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5012 vca_id = self.get_vca_id({}, db_nsr)
5013 if db_nsr["_admin"]["deployed"]["K8s"]:
5014 for _, k8s in enumerate(db_nsr["_admin"]["deployed"]["K8s"]):
5015 cluster_uuid, kdu_instance, cluster_type = (
5016 k8s["k8scluster-uuid"],
5017 k8s["kdu-instance"],
5018 k8s["k8scluster-type"],
5019 )
5020 await self._on_update_k8s_db(
5021 cluster_uuid=cluster_uuid,
5022 kdu_instance=kdu_instance,
5023 filter={"_id": nsr_id},
5024 vca_id=vca_id,
5025 cluster_type=cluster_type,
5026 )
5027 else:
5028 for vca_index, _ in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
5029 table, filter = "nsrs", {"_id": nsr_id}
5030 path = "_admin.deployed.VCA.{}.".format(vca_index)
5031 await self._on_update_n2vc_db(table, filter, path, {})
5032
5033 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
5034 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
5035
5036 async def action(self, nsr_id, nslcmop_id):
5037 # Try to lock HA task here
5038 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5039 if not task_is_locked_by_me:
5040 return
5041
5042 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
5043 self.logger.debug(logging_text + "Enter")
5044 # get all needed from database
5045 db_nsr = None
5046 db_nslcmop = None
5047 db_nsr_update = {}
5048 db_nslcmop_update = {}
5049 nslcmop_operation_state = None
5050 error_description_nslcmop = None
5051 exc = None
5052 try:
5053 # wait for any previous tasks in process
5054 step = "Waiting for previous operations to terminate"
5055 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5056
5057 self._write_ns_status(
5058 nsr_id=nsr_id,
5059 ns_state=None,
5060 current_operation="RUNNING ACTION",
5061 current_operation_id=nslcmop_id,
5062 )
5063
5064 step = "Getting information from database"
5065 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5066 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5067 if db_nslcmop["operationParams"].get("primitive_params"):
5068 db_nslcmop["operationParams"]["primitive_params"] = json.loads(
5069 db_nslcmop["operationParams"]["primitive_params"]
5070 )
5071
5072 nsr_deployed = db_nsr["_admin"].get("deployed")
5073 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
5074 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
5075 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
5076 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
5077 primitive = db_nslcmop["operationParams"]["primitive"]
5078 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
5079 timeout_ns_action = db_nslcmop["operationParams"].get(
5080 "timeout_ns_action", self.timeout_primitive
5081 )
5082
5083 if vnf_index:
5084 step = "Getting vnfr from database"
5085 db_vnfr = self.db.get_one(
5086 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
5087 )
5088 if db_vnfr.get("kdur"):
5089 kdur_list = []
5090 for kdur in db_vnfr["kdur"]:
5091 if kdur.get("additionalParams"):
5092 kdur["additionalParams"] = json.loads(
5093 kdur["additionalParams"]
5094 )
5095 kdur_list.append(kdur)
5096 db_vnfr["kdur"] = kdur_list
5097 step = "Getting vnfd from database"
5098 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
5099
5100 # Sync filesystem before running a primitive
5101 self.fs.sync(db_vnfr["vnfd-id"])
5102 else:
5103 step = "Getting nsd from database"
5104 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
5105
5106 vca_id = self.get_vca_id(db_vnfr, db_nsr)
5107 # for backward compatibility
5108 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
5109 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
5110 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
5111 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5112
5113 # look for primitive
5114 config_primitive_desc = descriptor_configuration = None
5115 if vdu_id:
5116 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
5117 elif kdu_name:
5118 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
5119 elif vnf_index:
5120 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
5121 else:
5122 descriptor_configuration = db_nsd.get("ns-configuration")
5123
5124 if descriptor_configuration and descriptor_configuration.get(
5125 "config-primitive"
5126 ):
5127 for config_primitive in descriptor_configuration["config-primitive"]:
5128 if config_primitive["name"] == primitive:
5129 config_primitive_desc = config_primitive
5130 break
5131
5132 if not config_primitive_desc:
5133 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
5134 raise LcmException(
5135 "Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".format(
5136 primitive
5137 )
5138 )
5139 primitive_name = primitive
5140 ee_descriptor_id = None
5141 else:
5142 primitive_name = config_primitive_desc.get(
5143 "execution-environment-primitive", primitive
5144 )
5145 ee_descriptor_id = config_primitive_desc.get(
5146 "execution-environment-ref"
5147 )
5148
5149 if vnf_index:
5150 if vdu_id:
5151 vdur = next(
5152 (x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None
5153 )
5154 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
5155 elif kdu_name:
5156 kdur = next(
5157 (x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None
5158 )
5159 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
5160 else:
5161 desc_params = parse_yaml_strings(
5162 db_vnfr.get("additionalParamsForVnf")
5163 )
5164 else:
5165 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
5166 if kdu_name and get_configuration(db_vnfd, kdu_name):
5167 kdu_configuration = get_configuration(db_vnfd, kdu_name)
5168 actions = set()
5169 for primitive in kdu_configuration.get("initial-config-primitive", []):
5170 actions.add(primitive["name"])
5171 for primitive in kdu_configuration.get("config-primitive", []):
5172 actions.add(primitive["name"])
5173 kdu = find_in_list(
5174 nsr_deployed["K8s"],
5175 lambda kdu: kdu_name == kdu["kdu-name"]
5176 and kdu["member-vnf-index"] == vnf_index,
5177 )
5178 kdu_action = (
5179 True
5180 if primitive_name in actions
5181 and kdu["k8scluster-type"] not in ("helm-chart", "helm-chart-v3")
5182 else False
5183 )
5184
5185 # TODO check if ns is in a proper status
5186 if kdu_name and (
5187 primitive_name in ("upgrade", "rollback", "status") or kdu_action
5188 ):
5189 # kdur and desc_params already set from before
5190 if primitive_params:
5191 desc_params.update(primitive_params)
5192 # TODO Check if we will need something at vnf level
5193 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
5194 if (
5195 kdu_name == kdu["kdu-name"]
5196 and kdu["member-vnf-index"] == vnf_index
5197 ):
5198 break
5199 else:
5200 raise LcmException(
5201 "KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index)
5202 )
5203
5204 if kdu.get("k8scluster-type") not in self.k8scluster_map:
5205 msg = "unknown k8scluster-type '{}'".format(
5206 kdu.get("k8scluster-type")
5207 )
5208 raise LcmException(msg)
5209
5210 db_dict = {
5211 "collection": "nsrs",
5212 "filter": {"_id": nsr_id},
5213 "path": "_admin.deployed.K8s.{}".format(index),
5214 }
5215 self.logger.debug(
5216 logging_text
5217 + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name)
5218 )
5219 step = "Executing kdu {}".format(primitive_name)
5220 if primitive_name == "upgrade":
5221 if desc_params.get("kdu_model"):
5222 kdu_model = desc_params.get("kdu_model")
5223 del desc_params["kdu_model"]
5224 else:
5225 kdu_model = kdu.get("kdu-model")
5226 parts = kdu_model.split(sep=":")
5227 if len(parts) == 2:
5228 kdu_model = parts[0]
5229
5230 detailed_status = await asyncio.wait_for(
5231 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
5232 cluster_uuid=kdu.get("k8scluster-uuid"),
5233 kdu_instance=kdu.get("kdu-instance"),
5234 atomic=True,
5235 kdu_model=kdu_model,
5236 params=desc_params,
5237 db_dict=db_dict,
5238 timeout=timeout_ns_action,
5239 ),
5240 timeout=timeout_ns_action + 10,
5241 )
5242 self.logger.debug(
5243 logging_text + " Upgrade of kdu {} done".format(detailed_status)
5244 )
5245 elif primitive_name == "rollback":
5246 detailed_status = await asyncio.wait_for(
5247 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
5248 cluster_uuid=kdu.get("k8scluster-uuid"),
5249 kdu_instance=kdu.get("kdu-instance"),
5250 db_dict=db_dict,
5251 ),
5252 timeout=timeout_ns_action,
5253 )
5254 elif primitive_name == "status":
5255 detailed_status = await asyncio.wait_for(
5256 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
5257 cluster_uuid=kdu.get("k8scluster-uuid"),
5258 kdu_instance=kdu.get("kdu-instance"),
5259 vca_id=vca_id,
5260 ),
5261 timeout=timeout_ns_action,
5262 )
5263 else:
5264 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(
5265 kdu["kdu-name"], nsr_id
5266 )
5267 params = self._map_primitive_params(
5268 config_primitive_desc, primitive_params, desc_params
5269 )
5270
5271 detailed_status = await asyncio.wait_for(
5272 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
5273 cluster_uuid=kdu.get("k8scluster-uuid"),
5274 kdu_instance=kdu_instance,
5275 primitive_name=primitive_name,
5276 params=params,
5277 db_dict=db_dict,
5278 timeout=timeout_ns_action,
5279 vca_id=vca_id,
5280 ),
5281 timeout=timeout_ns_action,
5282 )
5283
5284 if detailed_status:
5285 nslcmop_operation_state = "COMPLETED"
5286 else:
5287 detailed_status = ""
5288 nslcmop_operation_state = "FAILED"
5289 else:
5290 ee_id, vca_type = self._look_for_deployed_vca(
5291 nsr_deployed["VCA"],
5292 member_vnf_index=vnf_index,
5293 vdu_id=vdu_id,
5294 vdu_count_index=vdu_count_index,
5295 ee_descriptor_id=ee_descriptor_id,
5296 )
5297 for vca_index, vca_deployed in enumerate(
5298 db_nsr["_admin"]["deployed"]["VCA"]
5299 ):
5300 if vca_deployed.get("member-vnf-index") == vnf_index:
5301 db_dict = {
5302 "collection": "nsrs",
5303 "filter": {"_id": nsr_id},
5304 "path": "_admin.deployed.VCA.{}.".format(vca_index),
5305 }
5306 break
5307 (
5308 nslcmop_operation_state,
5309 detailed_status,
5310 ) = await self._ns_execute_primitive(
5311 ee_id,
5312 primitive=primitive_name,
5313 primitive_params=self._map_primitive_params(
5314 config_primitive_desc, primitive_params, desc_params
5315 ),
5316 timeout=timeout_ns_action,
5317 vca_type=vca_type,
5318 db_dict=db_dict,
5319 vca_id=vca_id,
5320 )
5321
5322 db_nslcmop_update["detailed-status"] = detailed_status
5323 error_description_nslcmop = (
5324 detailed_status if nslcmop_operation_state == "FAILED" else ""
5325 )
5326 self.logger.debug(
5327 logging_text
5328 + " task Done with result {} {}".format(
5329 nslcmop_operation_state, detailed_status
5330 )
5331 )
5332 return # database update is called inside finally
5333
5334 except (DbException, LcmException, N2VCException, K8sException) as e:
5335 self.logger.error(logging_text + "Exit Exception {}".format(e))
5336 exc = e
5337 except asyncio.CancelledError:
5338 self.logger.error(
5339 logging_text + "Cancelled Exception while '{}'".format(step)
5340 )
5341 exc = "Operation was cancelled"
5342 except asyncio.TimeoutError:
5343 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5344 exc = "Timeout"
5345 except Exception as e:
5346 exc = traceback.format_exc()
5347 self.logger.critical(
5348 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5349 exc_info=True,
5350 )
5351 finally:
5352 if exc:
5353 db_nslcmop_update[
5354 "detailed-status"
5355 ] = (
5356 detailed_status
5357 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5358 nslcmop_operation_state = "FAILED"
5359 if db_nsr:
5360 self._write_ns_status(
5361 nsr_id=nsr_id,
5362 ns_state=db_nsr[
5363 "nsState"
5364 ], # TODO check if degraded. For the moment use previous status
5365 current_operation="IDLE",
5366 current_operation_id=None,
5367 # error_description=error_description_nsr,
5368 # error_detail=error_detail,
5369 other_update=db_nsr_update,
5370 )
5371
5372 self._write_op_status(
5373 op_id=nslcmop_id,
5374 stage="",
5375 error_message=error_description_nslcmop,
5376 operation_state=nslcmop_operation_state,
5377 other_update=db_nslcmop_update,
5378 )
5379
5380 if nslcmop_operation_state:
5381 try:
5382 await self.msg.aiowrite(
5383 "ns",
5384 "actioned",
5385 {
5386 "nsr_id": nsr_id,
5387 "nslcmop_id": nslcmop_id,
5388 "operationState": nslcmop_operation_state,
5389 },
5390 loop=self.loop,
5391 )
5392 except Exception as e:
5393 self.logger.error(
5394 logging_text + "kafka_write notification Exception {}".format(e)
5395 )
5396 self.logger.debug(logging_text + "Exit")
5397 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
5398 return nslcmop_operation_state, detailed_status
5399
5400 async def terminate_vdus(
5401 self, db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text
5402 ):
5403 """This method terminates VDUs
5404
5405 Args:
5406 db_vnfr: VNF instance record
5407 member_vnf_index: VNF index to identify the VDUs to be removed
5408 db_nsr: NS instance record
5409 update_db_nslcmops: Nslcmop update record
5410 """
5411 vca_scaling_info = []
5412 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5413 scaling_info["scaling_direction"] = "IN"
5414 scaling_info["vdu-delete"] = {}
5415 scaling_info["kdu-delete"] = {}
5416 db_vdur = db_vnfr.get("vdur")
5417 vdur_list = copy(db_vdur)
5418 count_index = 0
5419 for index, vdu in enumerate(vdur_list):
5420 vca_scaling_info.append(
5421 {
5422 "osm_vdu_id": vdu["vdu-id-ref"],
5423 "member-vnf-index": member_vnf_index,
5424 "type": "delete",
5425 "vdu_index": count_index,
5426 })
5427 scaling_info["vdu-delete"][vdu["vdu-id-ref"]] = count_index
5428 scaling_info["vdu"].append(
5429 {
5430 "name": vdu.get("name") or vdu.get("vdu-name"),
5431 "vdu_id": vdu["vdu-id-ref"],
5432 "interface": [],
5433 })
5434 for interface in vdu["interfaces"]:
5435 scaling_info["vdu"][index]["interface"].append(
5436 {
5437 "name": interface["name"],
5438 "ip_address": interface["ip-address"],
5439 "mac_address": interface.get("mac-address"),
5440 })
5441 self.logger.info("NS update scaling info{}".format(scaling_info))
5442 stage[2] = "Terminating VDUs"
5443 if scaling_info.get("vdu-delete"):
5444 # scale_process = "RO"
5445 if self.ro_config.get("ng"):
5446 await self._scale_ng_ro(
5447 logging_text, db_nsr, update_db_nslcmops, db_vnfr, scaling_info, stage
5448 )
5449
5450 async def remove_vnf(
5451 self, nsr_id, nslcmop_id, vnf_instance_id
5452 ):
5453 """This method is to Remove VNF instances from NS.
5454
5455 Args:
5456 nsr_id: NS instance id
5457 nslcmop_id: nslcmop id of update
5458 vnf_instance_id: id of the VNF instance to be removed
5459
5460 Returns:
5461 result: (str, str) COMPLETED/FAILED, details
5462 """
5463 try:
5464 db_nsr_update = {}
5465 logging_text = "Task ns={} update ".format(nsr_id)
5466 check_vnfr_count = len(self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}))
5467 self.logger.info("check_vnfr_count {}".format(check_vnfr_count))
5468 if check_vnfr_count > 1:
5469 stage = ["", "", ""]
5470 step = "Getting nslcmop from database"
5471 self.logger.debug(step + " after having waited for previous tasks to be completed")
5472 # db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5473 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5474 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5475 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5476 """ db_vnfr = self.db.get_one(
5477 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}) """
5478
5479 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5480 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5481
5482 constituent_vnfr = db_nsr.get("constituent-vnfr-ref")
5483 constituent_vnfr.remove(db_vnfr.get("_id"))
5484 db_nsr_update["constituent-vnfr-ref"] = db_nsr.get("constituent-vnfr-ref")
5485 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5486 self.db.del_one("vnfrs", {"_id": db_vnfr.get("_id")})
5487 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5488 return "COMPLETED", "Done"
5489 else:
5490 step = "Terminate VNF Failed with"
5491 raise LcmException("{} Cannot terminate the last VNF in this NS.".format(
5492 vnf_instance_id))
5493 except (LcmException, asyncio.CancelledError):
5494 raise
5495 except Exception as e:
5496 self.logger.debug("Error removing VNF {}".format(e))
5497 return "FAILED", "Error removing VNF {}".format(e)
5498
5499 async def _ns_redeploy_vnf(
5500 self, nsr_id, nslcmop_id, db_vnfd, db_vnfr, db_nsr,
5501 ):
5502 """This method updates and redeploys VNF instances
5503
5504 Args:
5505 nsr_id: NS instance id
5506 nslcmop_id: nslcmop id
5507 db_vnfd: VNF descriptor
5508 db_vnfr: VNF instance record
5509 db_nsr: NS instance record
5510
5511 Returns:
5512 result: (str, str) COMPLETED/FAILED, details
5513 """
5514 try:
5515 count_index = 0
5516 stage = ["", "", ""]
5517 logging_text = "Task ns={} update ".format(nsr_id)
5518 latest_vnfd_revision = db_vnfd["_admin"].get("revision")
5519 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5520
5521 # Terminate old VNF resources
5522 update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5523 await self.terminate_vdus(db_vnfr, member_vnf_index, db_nsr, update_db_nslcmops, stage, logging_text)
5524
5525 # old_vnfd_id = db_vnfr["vnfd-id"]
5526 # new_db_vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
5527 new_db_vnfd = db_vnfd
5528 # new_vnfd_ref = new_db_vnfd["id"]
5529 # new_vnfd_id = vnfd_id
5530
5531 # Create VDUR
5532 new_vnfr_cp = []
5533 for cp in new_db_vnfd.get("ext-cpd", ()):
5534 vnf_cp = {
5535 "name": cp.get("id"),
5536 "connection-point-id": cp.get("int-cpd", {}).get("cpd"),
5537 "connection-point-vdu-id": cp.get("int-cpd", {}).get("vdu-id"),
5538 "id": cp.get("id"),
5539 }
5540 new_vnfr_cp.append(vnf_cp)
5541 new_vdur = update_db_nslcmops["operationParams"]["newVdur"]
5542 # new_vdur = self._create_vdur_descriptor_from_vnfd(db_nsd, db_vnfd, old_db_vnfd, vnfd_id, db_nsr, member_vnf_index)
5543 # new_vnfr_update = {"vnfd-ref": new_vnfd_ref, "vnfd-id": new_vnfd_id, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5544 new_vnfr_update = {"revision": latest_vnfd_revision, "connection-point": new_vnfr_cp, "vdur": new_vdur, "ip-address": ""}
5545 self.update_db_2("vnfrs", db_vnfr["_id"], new_vnfr_update)
5546 updated_db_vnfr = self.db.get_one(
5547 "vnfrs", {"member-vnf-index-ref": member_vnf_index, "nsr-id-ref": nsr_id}
5548 )
5549
5550 # Instantiate new VNF resources
5551 # update_db_nslcmops = self.db.get_one("nslcmops", {"_id": nslcmop_id})
5552 vca_scaling_info = []
5553 scaling_info = {"scaling_group_name": "vdu_autoscale", "vdu": [], "kdu": []}
5554 scaling_info["scaling_direction"] = "OUT"
5555 scaling_info["vdu-create"] = {}
5556 scaling_info["kdu-create"] = {}
5557 vdud_instantiate_list = db_vnfd["vdu"]
5558 for index, vdud in enumerate(vdud_instantiate_list):
5559 cloud_init_text = self._get_vdu_cloud_init_content(
5560 vdud, db_vnfd
5561 )
5562 if cloud_init_text:
5563 additional_params = (
5564 self._get_vdu_additional_params(updated_db_vnfr, vdud["id"])
5565 or {}
5566 )
5567 cloud_init_list = []
5568 if cloud_init_text:
5569 # TODO Information of its own ip is not available because db_vnfr is not updated.
5570 additional_params["OSM"] = get_osm_params(
5571 updated_db_vnfr, vdud["id"], 1
5572 )
5573 cloud_init_list.append(
5574 self._parse_cloud_init(
5575 cloud_init_text,
5576 additional_params,
5577 db_vnfd["id"],
5578 vdud["id"],
5579 )
5580 )
5581 vca_scaling_info.append(
5582 {
5583 "osm_vdu_id": vdud["id"],
5584 "member-vnf-index": member_vnf_index,
5585 "type": "create",
5586 "vdu_index": count_index,
5587 }
5588 )
5589 scaling_info["vdu-create"][vdud["id"]] = count_index
5590 if self.ro_config.get("ng"):
5591 self.logger.debug(
5592 "New Resources to be deployed: {}".format(scaling_info))
5593 await self._scale_ng_ro(
5594 logging_text, db_nsr, update_db_nslcmops, updated_db_vnfr, scaling_info, stage
5595 )
5596 return "COMPLETED", "Done"
5597 except (LcmException, asyncio.CancelledError):
5598 raise
5599 except Exception as e:
5600 self.logger.debug("Error updating VNF {}".format(e))
5601 return "FAILED", "Error updating VNF {}".format(e)
5602
5603 async def _ns_charm_upgrade(
5604 self,
5605 ee_id,
5606 charm_id,
5607 charm_type,
5608 path,
5609 timeout: float = None,
5610 ) -> (str, str):
5611 """This method upgrade charms in VNF instances
5612
5613 Args:
5614 ee_id: Execution environment id
5615 path: Local path to the charm
5616 charm_id: charm-id
5617 charm_type: Charm type can be lxc-proxy-charm, native-charm or k8s-proxy-charm
5618 timeout: (Float) Timeout for the ns update operation
5619
5620 Returns:
5621 result: (str, str) COMPLETED/FAILED, details
5622 """
5623 try:
5624 charm_type = charm_type or "lxc_proxy_charm"
5625 output = await self.vca_map[charm_type].upgrade_charm(
5626 ee_id=ee_id,
5627 path=path,
5628 charm_id=charm_id,
5629 charm_type=charm_type,
5630 timeout=timeout or self.timeout_ns_update,
5631 )
5632
5633 if output:
5634 return "COMPLETED", output
5635
5636 except (LcmException, asyncio.CancelledError):
5637 raise
5638
5639 except Exception as e:
5640
5641 self.logger.debug("Error upgrading charm {}".format(path))
5642
5643 return "FAILED", "Error upgrading charm {}: {}".format(path, e)
5644
5645 async def update(self, nsr_id, nslcmop_id):
5646 """Update NS according to different update types
5647
5648 This method performs upgrade of VNF instances then updates the revision
5649 number in VNF record
5650
5651 Args:
5652 nsr_id: Network service will be updated
5653 nslcmop_id: ns lcm operation id
5654
5655 Returns:
5656 It may raise DbException, LcmException, N2VCException, K8sException
5657
5658 """
5659 # Try to lock HA task here
5660 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
5661 if not task_is_locked_by_me:
5662 return
5663
5664 logging_text = "Task ns={} update={} ".format(nsr_id, nslcmop_id)
5665 self.logger.debug(logging_text + "Enter")
5666
5667 # Set the required variables to be filled up later
5668 db_nsr = None
5669 db_nslcmop_update = {}
5670 vnfr_update = {}
5671 nslcmop_operation_state = None
5672 db_nsr_update = {}
5673 error_description_nslcmop = ""
5674 exc = None
5675 change_type = "updated"
5676 detailed_status = ""
5677
5678 try:
5679 # wait for any previous tasks in process
5680 step = "Waiting for previous operations to terminate"
5681 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
5682 self._write_ns_status(
5683 nsr_id=nsr_id,
5684 ns_state=None,
5685 current_operation="UPDATING",
5686 current_operation_id=nslcmop_id,
5687 )
5688
5689 step = "Getting nslcmop from database"
5690 db_nslcmop = self.db.get_one(
5691 "nslcmops", {"_id": nslcmop_id}, fail_on_empty=False
5692 )
5693 update_type = db_nslcmop["operationParams"]["updateType"]
5694
5695 step = "Getting nsr from database"
5696 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
5697 old_operational_status = db_nsr["operational-status"]
5698 db_nsr_update["operational-status"] = "updating"
5699 self.update_db_2("nsrs", nsr_id, db_nsr_update)
5700 nsr_deployed = db_nsr["_admin"].get("deployed")
5701
5702 if update_type == "CHANGE_VNFPKG":
5703
5704 # Get the input parameters given through update request
5705 vnf_instance_id = db_nslcmop["operationParams"][
5706 "changeVnfPackageData"
5707 ].get("vnfInstanceId")
5708
5709 vnfd_id = db_nslcmop["operationParams"]["changeVnfPackageData"].get(
5710 "vnfdId"
5711 )
5712 timeout_seconds = db_nslcmop["operationParams"].get("timeout_ns_update")
5713
5714 step = "Getting vnfr from database"
5715 db_vnfr = self.db.get_one(
5716 "vnfrs", {"_id": vnf_instance_id}, fail_on_empty=False
5717 )
5718
5719 step = "Getting vnfds from database"
5720 # Latest VNFD
5721 latest_vnfd = self.db.get_one(
5722 "vnfds", {"_id": vnfd_id}, fail_on_empty=False
5723 )
5724 latest_vnfd_revision = latest_vnfd["_admin"].get("revision")
5725
5726 # Current VNFD
5727 current_vnf_revision = db_vnfr.get("revision", 1)
5728 current_vnfd = self.db.get_one(
5729 "vnfds_revisions",
5730 {"_id": vnfd_id + ":" + str(current_vnf_revision)},
5731 fail_on_empty=False,
5732 )
5733 # Charm artifact paths will be filled up later
5734 (
5735 current_charm_artifact_path,
5736 target_charm_artifact_path,
5737 charm_artifact_paths,
5738 ) = ([], [], [])
5739
5740 step = "Checking if revision has changed in VNFD"
5741 if current_vnf_revision != latest_vnfd_revision:
5742
5743 change_type = "policy_updated"
5744
5745 # There is new revision of VNFD, update operation is required
5746 current_vnfd_path = vnfd_id + ":" + str(current_vnf_revision)
5747 latest_vnfd_path = vnfd_id + ":" + str(latest_vnfd_revision)
5748
5749 step = "Removing the VNFD packages if they exist in the local path"
5750 shutil.rmtree(self.fs.path + current_vnfd_path, ignore_errors=True)
5751 shutil.rmtree(self.fs.path + latest_vnfd_path, ignore_errors=True)
5752
5753 step = "Get the VNFD packages from FSMongo"
5754 self.fs.sync(from_path=latest_vnfd_path)
5755 self.fs.sync(from_path=current_vnfd_path)
5756
5757 step = (
5758 "Get the charm-type, charm-id, ee-id if there is deployed VCA"
5759 )
5760 base_folder = latest_vnfd["_admin"]["storage"]
5761
5762 for charm_index, charm_deployed in enumerate(
5763 get_iterable(nsr_deployed, "VCA")
5764 ):
5765 vnf_index = db_vnfr.get("member-vnf-index-ref")
5766
5767 # Getting charm-id and charm-type
5768 if charm_deployed.get("member-vnf-index") == vnf_index:
5769 charm_id = self.get_vca_id(db_vnfr, db_nsr)
5770 charm_type = charm_deployed.get("type")
5771
5772 # Getting ee-id
5773 ee_id = charm_deployed.get("ee_id")
5774
5775 step = "Getting descriptor config"
5776 descriptor_config = get_configuration(
5777 current_vnfd, current_vnfd["id"]
5778 )
5779
5780 if "execution-environment-list" in descriptor_config:
5781 ee_list = descriptor_config.get(
5782 "execution-environment-list", []
5783 )
5784 else:
5785 ee_list = []
5786
5787 # There could be several charm used in the same VNF
5788 for ee_item in ee_list:
5789 if ee_item.get("juju"):
5790
5791 step = "Getting charm name"
5792 charm_name = ee_item["juju"].get("charm")
5793
5794 step = "Setting Charm artifact paths"
5795 current_charm_artifact_path.append(
5796 get_charm_artifact_path(
5797 base_folder,
5798 charm_name,
5799 charm_type,
5800 current_vnf_revision,
5801 )
5802 )
5803 target_charm_artifact_path.append(
5804 get_charm_artifact_path(
5805 base_folder,
5806 charm_name,
5807 charm_type,
5808 latest_vnfd_revision,
5809 )
5810 )
5811
5812 charm_artifact_paths = zip(
5813 current_charm_artifact_path, target_charm_artifact_path
5814 )
5815
5816 step = "Checking if software version has changed in VNFD"
5817 if find_software_version(current_vnfd) != find_software_version(
5818 latest_vnfd
5819 ):
5820
5821 step = "Checking if existing VNF has charm"
5822 for current_charm_path, target_charm_path in list(
5823 charm_artifact_paths
5824 ):
5825 if current_charm_path:
5826 raise LcmException(
5827 "Software version change is not supported as VNF instance {} has charm.".format(
5828 vnf_instance_id
5829 )
5830 )
5831
5832 # There is no change in the charm package, then redeploy the VNF
5833 # based on new descriptor
5834 step = "Redeploying VNF"
5835 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5836 (
5837 result,
5838 detailed_status
5839 ) = await self._ns_redeploy_vnf(
5840 nsr_id,
5841 nslcmop_id,
5842 latest_vnfd,
5843 db_vnfr,
5844 db_nsr
5845 )
5846 if result == "FAILED":
5847 nslcmop_operation_state = result
5848 error_description_nslcmop = detailed_status
5849 db_nslcmop_update["detailed-status"] = detailed_status
5850 self.logger.debug(
5851 logging_text
5852 + " step {} Done with result {} {}".format(
5853 step, nslcmop_operation_state, detailed_status
5854 )
5855 )
5856
5857 else:
5858 step = "Checking if any charm package has changed or not"
5859 for current_charm_path, target_charm_path in list(
5860 charm_artifact_paths
5861 ):
5862 if (
5863 current_charm_path
5864 and target_charm_path
5865 and self.check_charm_hash_changed(
5866 current_charm_path, target_charm_path
5867 )
5868 ):
5869
5870 step = "Checking whether VNF uses juju bundle"
5871 if check_juju_bundle_existence(current_vnfd):
5872
5873 raise LcmException(
5874 "Charm upgrade is not supported for the instance which"
5875 " uses juju-bundle: {}".format(
5876 check_juju_bundle_existence(current_vnfd)
5877 )
5878 )
5879
5880 step = "Upgrading Charm"
5881 (
5882 result,
5883 detailed_status,
5884 ) = await self._ns_charm_upgrade(
5885 ee_id=ee_id,
5886 charm_id=charm_id,
5887 charm_type=charm_type,
5888 path=self.fs.path + target_charm_path,
5889 timeout=timeout_seconds,
5890 )
5891
5892 if result == "FAILED":
5893 nslcmop_operation_state = result
5894 error_description_nslcmop = detailed_status
5895
5896 db_nslcmop_update["detailed-status"] = detailed_status
5897 self.logger.debug(
5898 logging_text
5899 + " step {} Done with result {} {}".format(
5900 step, nslcmop_operation_state, detailed_status
5901 )
5902 )
5903
5904 step = "Updating policies"
5905 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5906 result = "COMPLETED"
5907 detailed_status = "Done"
5908 db_nslcmop_update["detailed-status"] = "Done"
5909
5910 # If nslcmop_operation_state is None, so any operation is not failed.
5911 if not nslcmop_operation_state:
5912 nslcmop_operation_state = "COMPLETED"
5913
5914 # If update CHANGE_VNFPKG nslcmop_operation is successful
5915 # vnf revision need to be updated
5916 vnfr_update["revision"] = latest_vnfd_revision
5917 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
5918
5919 self.logger.debug(
5920 logging_text
5921 + " task Done with result {} {}".format(
5922 nslcmop_operation_state, detailed_status
5923 )
5924 )
5925 elif update_type == "REMOVE_VNF":
5926 # This part is included in https://osm.etsi.org/gerrit/11876
5927 vnf_instance_id = db_nslcmop["operationParams"]["removeVnfInstanceId"]
5928 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
5929 member_vnf_index = db_vnfr["member-vnf-index-ref"]
5930 step = "Removing VNF"
5931 (result, detailed_status) = await self.remove_vnf(nsr_id, nslcmop_id, vnf_instance_id)
5932 if result == "FAILED":
5933 nslcmop_operation_state = result
5934 error_description_nslcmop = detailed_status
5935 db_nslcmop_update["detailed-status"] = detailed_status
5936 change_type = "vnf_terminated"
5937 if not nslcmop_operation_state:
5938 nslcmop_operation_state = "COMPLETED"
5939 self.logger.debug(
5940 logging_text
5941 + " task Done with result {} {}".format(
5942 nslcmop_operation_state, detailed_status
5943 )
5944 )
5945
5946 elif update_type == "OPERATE_VNF":
5947 vnf_id = db_nslcmop["operationParams"]["operateVnfData"]["vnfInstanceId"]
5948 operation_type = db_nslcmop["operationParams"]["operateVnfData"]["changeStateTo"]
5949 additional_param = db_nslcmop["operationParams"]["operateVnfData"]["additionalParam"]
5950 (result, detailed_status) = await self.rebuild_start_stop(
5951 nsr_id, nslcmop_id, vnf_id, additional_param, operation_type
5952 )
5953 if result == "FAILED":
5954 nslcmop_operation_state = result
5955 error_description_nslcmop = detailed_status
5956 db_nslcmop_update["detailed-status"] = detailed_status
5957 if not nslcmop_operation_state:
5958 nslcmop_operation_state = "COMPLETED"
5959 self.logger.debug(
5960 logging_text
5961 + " task Done with result {} {}".format(
5962 nslcmop_operation_state, detailed_status
5963 )
5964 )
5965
5966 # If nslcmop_operation_state is None, so any operation is not failed.
5967 # All operations are executed in overall.
5968 if not nslcmop_operation_state:
5969 nslcmop_operation_state = "COMPLETED"
5970 db_nsr_update["operational-status"] = old_operational_status
5971
5972 except (DbException, LcmException, N2VCException, K8sException) as e:
5973 self.logger.error(logging_text + "Exit Exception {}".format(e))
5974 exc = e
5975 except asyncio.CancelledError:
5976 self.logger.error(
5977 logging_text + "Cancelled Exception while '{}'".format(step)
5978 )
5979 exc = "Operation was cancelled"
5980 except asyncio.TimeoutError:
5981 self.logger.error(logging_text + "Timeout while '{}'".format(step))
5982 exc = "Timeout"
5983 except Exception as e:
5984 exc = traceback.format_exc()
5985 self.logger.critical(
5986 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
5987 exc_info=True,
5988 )
5989 finally:
5990 if exc:
5991 db_nslcmop_update[
5992 "detailed-status"
5993 ] = (
5994 detailed_status
5995 ) = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
5996 nslcmop_operation_state = "FAILED"
5997 db_nsr_update["operational-status"] = old_operational_status
5998 if db_nsr:
5999 self._write_ns_status(
6000 nsr_id=nsr_id,
6001 ns_state=db_nsr["nsState"],
6002 current_operation="IDLE",
6003 current_operation_id=None,
6004 other_update=db_nsr_update,
6005 )
6006
6007 self._write_op_status(
6008 op_id=nslcmop_id,
6009 stage="",
6010 error_message=error_description_nslcmop,
6011 operation_state=nslcmop_operation_state,
6012 other_update=db_nslcmop_update,
6013 )
6014
6015 if nslcmop_operation_state:
6016 try:
6017 msg = {
6018 "nsr_id": nsr_id,
6019 "nslcmop_id": nslcmop_id,
6020 "operationState": nslcmop_operation_state,
6021 }
6022 if change_type in ("vnf_terminated", "policy_updated"):
6023 msg.update({"vnf_member_index": member_vnf_index})
6024 await self.msg.aiowrite("ns", change_type, msg, loop=self.loop)
6025 except Exception as e:
6026 self.logger.error(
6027 logging_text + "kafka_write notification Exception {}".format(e)
6028 )
6029 self.logger.debug(logging_text + "Exit")
6030 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_update")
6031 return nslcmop_operation_state, detailed_status
6032
6033 async def scale(self, nsr_id, nslcmop_id):
6034 # Try to lock HA task here
6035 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
6036 if not task_is_locked_by_me:
6037 return
6038
6039 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
6040 stage = ["", "", ""]
6041 tasks_dict_info = {}
6042 # ^ stage, step, VIM progress
6043 self.logger.debug(logging_text + "Enter")
6044 # get all needed from database
6045 db_nsr = None
6046 db_nslcmop_update = {}
6047 db_nsr_update = {}
6048 exc = None
6049 # in case of error, indicates what part of scale was failed to put nsr at error status
6050 scale_process = None
6051 old_operational_status = ""
6052 old_config_status = ""
6053 nsi_id = None
6054 try:
6055 # wait for any previous tasks in process
6056 step = "Waiting for previous operations to terminate"
6057 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
6058 self._write_ns_status(
6059 nsr_id=nsr_id,
6060 ns_state=None,
6061 current_operation="SCALING",
6062 current_operation_id=nslcmop_id,
6063 )
6064
6065 step = "Getting nslcmop from database"
6066 self.logger.debug(
6067 step + " after having waited for previous tasks to be completed"
6068 )
6069 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
6070
6071 step = "Getting nsr from database"
6072 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
6073 old_operational_status = db_nsr["operational-status"]
6074 old_config_status = db_nsr["config-status"]
6075
6076 step = "Parsing scaling parameters"
6077 db_nsr_update["operational-status"] = "scaling"
6078 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6079 nsr_deployed = db_nsr["_admin"].get("deployed")
6080
6081 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"][
6082 "scaleByStepData"
6083 ]["member-vnf-index"]
6084 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"][
6085 "scaleByStepData"
6086 ]["scaling-group-descriptor"]
6087 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
6088 # for backward compatibility
6089 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
6090 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
6091 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
6092 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6093
6094 step = "Getting vnfr from database"
6095 db_vnfr = self.db.get_one(
6096 "vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id}
6097 )
6098
6099 vca_id = self.get_vca_id(db_vnfr, db_nsr)
6100
6101 step = "Getting vnfd from database"
6102 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
6103
6104 base_folder = db_vnfd["_admin"]["storage"]
6105
6106 step = "Getting scaling-group-descriptor"
6107 scaling_descriptor = find_in_list(
6108 get_scaling_aspect(db_vnfd),
6109 lambda scale_desc: scale_desc["name"] == scaling_group,
6110 )
6111 if not scaling_descriptor:
6112 raise LcmException(
6113 "input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
6114 "at vnfd:scaling-group-descriptor".format(scaling_group)
6115 )
6116
6117 step = "Sending scale order to VIM"
6118 # TODO check if ns is in a proper status
6119 nb_scale_op = 0
6120 if not db_nsr["_admin"].get("scaling-group"):
6121 self.update_db_2(
6122 "nsrs",
6123 nsr_id,
6124 {
6125 "_admin.scaling-group": [
6126 {"name": scaling_group, "nb-scale-op": 0}
6127 ]
6128 },
6129 )
6130 admin_scale_index = 0
6131 else:
6132 for admin_scale_index, admin_scale_info in enumerate(
6133 db_nsr["_admin"]["scaling-group"]
6134 ):
6135 if admin_scale_info["name"] == scaling_group:
6136 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
6137 break
6138 else: # not found, set index one plus last element and add new entry with the name
6139 admin_scale_index += 1
6140 db_nsr_update[
6141 "_admin.scaling-group.{}.name".format(admin_scale_index)
6142 ] = scaling_group
6143
6144 vca_scaling_info = []
6145 scaling_info = {"scaling_group_name": scaling_group, "vdu": [], "kdu": []}
6146 if scaling_type == "SCALE_OUT":
6147 if "aspect-delta-details" not in scaling_descriptor:
6148 raise LcmException(
6149 "Aspect delta details not fount in scaling descriptor {}".format(
6150 scaling_descriptor["name"]
6151 )
6152 )
6153 # count if max-instance-count is reached
6154 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6155
6156 scaling_info["scaling_direction"] = "OUT"
6157 scaling_info["vdu-create"] = {}
6158 scaling_info["kdu-create"] = {}
6159 for delta in deltas:
6160 for vdu_delta in delta.get("vdu-delta", {}):
6161 vdud = get_vdu(db_vnfd, vdu_delta["id"])
6162 # vdu_index also provides the number of instance of the targeted vdu
6163 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6164 cloud_init_text = self._get_vdu_cloud_init_content(
6165 vdud, db_vnfd
6166 )
6167 if cloud_init_text:
6168 additional_params = (
6169 self._get_vdu_additional_params(db_vnfr, vdud["id"])
6170 or {}
6171 )
6172 cloud_init_list = []
6173
6174 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6175 max_instance_count = 10
6176 if vdu_profile and "max-number-of-instances" in vdu_profile:
6177 max_instance_count = vdu_profile.get(
6178 "max-number-of-instances", 10
6179 )
6180
6181 default_instance_num = get_number_of_instances(
6182 db_vnfd, vdud["id"]
6183 )
6184 instances_number = vdu_delta.get("number-of-instances", 1)
6185 nb_scale_op += instances_number
6186
6187 new_instance_count = nb_scale_op + default_instance_num
6188 # Control if new count is over max and vdu count is less than max.
6189 # Then assign new instance count
6190 if new_instance_count > max_instance_count > vdu_count:
6191 instances_number = new_instance_count - max_instance_count
6192 else:
6193 instances_number = instances_number
6194
6195 if new_instance_count > max_instance_count:
6196 raise LcmException(
6197 "reached the limit of {} (max-instance-count) "
6198 "scaling-out operations for the "
6199 "scaling-group-descriptor '{}'".format(
6200 nb_scale_op, scaling_group
6201 )
6202 )
6203 for x in range(vdu_delta.get("number-of-instances", 1)):
6204 if cloud_init_text:
6205 # TODO Information of its own ip is not available because db_vnfr is not updated.
6206 additional_params["OSM"] = get_osm_params(
6207 db_vnfr, vdu_delta["id"], vdu_index + x
6208 )
6209 cloud_init_list.append(
6210 self._parse_cloud_init(
6211 cloud_init_text,
6212 additional_params,
6213 db_vnfd["id"],
6214 vdud["id"],
6215 )
6216 )
6217 vca_scaling_info.append(
6218 {
6219 "osm_vdu_id": vdu_delta["id"],
6220 "member-vnf-index": vnf_index,
6221 "type": "create",
6222 "vdu_index": vdu_index + x,
6223 }
6224 )
6225 scaling_info["vdu-create"][vdu_delta["id"]] = instances_number
6226 for kdu_delta in delta.get("kdu-resource-delta", {}):
6227 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6228 kdu_name = kdu_profile["kdu-name"]
6229 resource_name = kdu_profile.get("resource-name", "")
6230
6231 # Might have different kdus in the same delta
6232 # Should have list for each kdu
6233 if not scaling_info["kdu-create"].get(kdu_name, None):
6234 scaling_info["kdu-create"][kdu_name] = []
6235
6236 kdur = get_kdur(db_vnfr, kdu_name)
6237 if kdur.get("helm-chart"):
6238 k8s_cluster_type = "helm-chart-v3"
6239 self.logger.debug("kdur: {}".format(kdur))
6240 if (
6241 kdur.get("helm-version")
6242 and kdur.get("helm-version") == "v2"
6243 ):
6244 k8s_cluster_type = "helm-chart"
6245 elif kdur.get("juju-bundle"):
6246 k8s_cluster_type = "juju-bundle"
6247 else:
6248 raise LcmException(
6249 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6250 "juju-bundle. Maybe an old NBI version is running".format(
6251 db_vnfr["member-vnf-index-ref"], kdu_name
6252 )
6253 )
6254
6255 max_instance_count = 10
6256 if kdu_profile and "max-number-of-instances" in kdu_profile:
6257 max_instance_count = kdu_profile.get(
6258 "max-number-of-instances", 10
6259 )
6260
6261 nb_scale_op += kdu_delta.get("number-of-instances", 1)
6262 deployed_kdu, _ = get_deployed_kdu(
6263 nsr_deployed, kdu_name, vnf_index
6264 )
6265 if deployed_kdu is None:
6266 raise LcmException(
6267 "KDU '{}' for vnf '{}' not deployed".format(
6268 kdu_name, vnf_index
6269 )
6270 )
6271 kdu_instance = deployed_kdu.get("kdu-instance")
6272 instance_num = await self.k8scluster_map[
6273 k8s_cluster_type
6274 ].get_scale_count(
6275 resource_name,
6276 kdu_instance,
6277 vca_id=vca_id,
6278 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6279 kdu_model=deployed_kdu.get("kdu-model"),
6280 )
6281 kdu_replica_count = instance_num + kdu_delta.get(
6282 "number-of-instances", 1
6283 )
6284
6285 # Control if new count is over max and instance_num is less than max.
6286 # Then assign max instance number to kdu replica count
6287 if kdu_replica_count > max_instance_count > instance_num:
6288 kdu_replica_count = max_instance_count
6289 if kdu_replica_count > max_instance_count:
6290 raise LcmException(
6291 "reached the limit of {} (max-instance-count) "
6292 "scaling-out operations for the "
6293 "scaling-group-descriptor '{}'".format(
6294 instance_num, scaling_group
6295 )
6296 )
6297
6298 for x in range(kdu_delta.get("number-of-instances", 1)):
6299 vca_scaling_info.append(
6300 {
6301 "osm_kdu_id": kdu_name,
6302 "member-vnf-index": vnf_index,
6303 "type": "create",
6304 "kdu_index": instance_num + x - 1,
6305 }
6306 )
6307 scaling_info["kdu-create"][kdu_name].append(
6308 {
6309 "member-vnf-index": vnf_index,
6310 "type": "create",
6311 "k8s-cluster-type": k8s_cluster_type,
6312 "resource-name": resource_name,
6313 "scale": kdu_replica_count,
6314 }
6315 )
6316 elif scaling_type == "SCALE_IN":
6317 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
6318
6319 scaling_info["scaling_direction"] = "IN"
6320 scaling_info["vdu-delete"] = {}
6321 scaling_info["kdu-delete"] = {}
6322
6323 for delta in deltas:
6324 for vdu_delta in delta.get("vdu-delta", {}):
6325 vdu_count = vdu_index = get_vdur_index(db_vnfr, vdu_delta)
6326 min_instance_count = 0
6327 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
6328 if vdu_profile and "min-number-of-instances" in vdu_profile:
6329 min_instance_count = vdu_profile["min-number-of-instances"]
6330
6331 default_instance_num = get_number_of_instances(
6332 db_vnfd, vdu_delta["id"]
6333 )
6334 instance_num = vdu_delta.get("number-of-instances", 1)
6335 nb_scale_op -= instance_num
6336
6337 new_instance_count = nb_scale_op + default_instance_num
6338
6339 if new_instance_count < min_instance_count < vdu_count:
6340 instances_number = min_instance_count - new_instance_count
6341 else:
6342 instances_number = instance_num
6343
6344 if new_instance_count < min_instance_count:
6345 raise LcmException(
6346 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6347 "scaling-group-descriptor '{}'".format(
6348 nb_scale_op, scaling_group
6349 )
6350 )
6351 for x in range(vdu_delta.get("number-of-instances", 1)):
6352 vca_scaling_info.append(
6353 {
6354 "osm_vdu_id": vdu_delta["id"],
6355 "member-vnf-index": vnf_index,
6356 "type": "delete",
6357 "vdu_index": vdu_index - 1 - x,
6358 }
6359 )
6360 scaling_info["vdu-delete"][vdu_delta["id"]] = instances_number
6361 for kdu_delta in delta.get("kdu-resource-delta", {}):
6362 kdu_profile = get_kdu_resource_profile(db_vnfd, kdu_delta["id"])
6363 kdu_name = kdu_profile["kdu-name"]
6364 resource_name = kdu_profile.get("resource-name", "")
6365
6366 if not scaling_info["kdu-delete"].get(kdu_name, None):
6367 scaling_info["kdu-delete"][kdu_name] = []
6368
6369 kdur = get_kdur(db_vnfr, kdu_name)
6370 if kdur.get("helm-chart"):
6371 k8s_cluster_type = "helm-chart-v3"
6372 self.logger.debug("kdur: {}".format(kdur))
6373 if (
6374 kdur.get("helm-version")
6375 and kdur.get("helm-version") == "v2"
6376 ):
6377 k8s_cluster_type = "helm-chart"
6378 elif kdur.get("juju-bundle"):
6379 k8s_cluster_type = "juju-bundle"
6380 else:
6381 raise LcmException(
6382 "kdu type for kdu='{}.{}' is neither helm-chart nor "
6383 "juju-bundle. Maybe an old NBI version is running".format(
6384 db_vnfr["member-vnf-index-ref"], kdur["kdu-name"]
6385 )
6386 )
6387
6388 min_instance_count = 0
6389 if kdu_profile and "min-number-of-instances" in kdu_profile:
6390 min_instance_count = kdu_profile["min-number-of-instances"]
6391
6392 nb_scale_op -= kdu_delta.get("number-of-instances", 1)
6393 deployed_kdu, _ = get_deployed_kdu(
6394 nsr_deployed, kdu_name, vnf_index
6395 )
6396 if deployed_kdu is None:
6397 raise LcmException(
6398 "KDU '{}' for vnf '{}' not deployed".format(
6399 kdu_name, vnf_index
6400 )
6401 )
6402 kdu_instance = deployed_kdu.get("kdu-instance")
6403 instance_num = await self.k8scluster_map[
6404 k8s_cluster_type
6405 ].get_scale_count(
6406 resource_name,
6407 kdu_instance,
6408 vca_id=vca_id,
6409 cluster_uuid=deployed_kdu.get("k8scluster-uuid"),
6410 kdu_model=deployed_kdu.get("kdu-model"),
6411 )
6412 kdu_replica_count = instance_num - kdu_delta.get(
6413 "number-of-instances", 1
6414 )
6415
6416 if kdu_replica_count < min_instance_count < instance_num:
6417 kdu_replica_count = min_instance_count
6418 if kdu_replica_count < min_instance_count:
6419 raise LcmException(
6420 "reached the limit of {} (min-instance-count) scaling-in operations for the "
6421 "scaling-group-descriptor '{}'".format(
6422 instance_num, scaling_group
6423 )
6424 )
6425
6426 for x in range(kdu_delta.get("number-of-instances", 1)):
6427 vca_scaling_info.append(
6428 {
6429 "osm_kdu_id": kdu_name,
6430 "member-vnf-index": vnf_index,
6431 "type": "delete",
6432 "kdu_index": instance_num - x - 1,
6433 }
6434 )
6435 scaling_info["kdu-delete"][kdu_name].append(
6436 {
6437 "member-vnf-index": vnf_index,
6438 "type": "delete",
6439 "k8s-cluster-type": k8s_cluster_type,
6440 "resource-name": resource_name,
6441 "scale": kdu_replica_count,
6442 }
6443 )
6444
6445 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
6446 vdu_delete = copy(scaling_info.get("vdu-delete"))
6447 if scaling_info["scaling_direction"] == "IN":
6448 for vdur in reversed(db_vnfr["vdur"]):
6449 if vdu_delete.get(vdur["vdu-id-ref"]):
6450 vdu_delete[vdur["vdu-id-ref"]] -= 1
6451 scaling_info["vdu"].append(
6452 {
6453 "name": vdur.get("name") or vdur.get("vdu-name"),
6454 "vdu_id": vdur["vdu-id-ref"],
6455 "interface": [],
6456 }
6457 )
6458 for interface in vdur["interfaces"]:
6459 scaling_info["vdu"][-1]["interface"].append(
6460 {
6461 "name": interface["name"],
6462 "ip_address": interface["ip-address"],
6463 "mac_address": interface.get("mac-address"),
6464 }
6465 )
6466 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
6467
6468 # PRE-SCALE BEGIN
6469 step = "Executing pre-scale vnf-config-primitive"
6470 if scaling_descriptor.get("scaling-config-action"):
6471 for scaling_config_action in scaling_descriptor[
6472 "scaling-config-action"
6473 ]:
6474 if (
6475 scaling_config_action.get("trigger") == "pre-scale-in"
6476 and scaling_type == "SCALE_IN"
6477 ) or (
6478 scaling_config_action.get("trigger") == "pre-scale-out"
6479 and scaling_type == "SCALE_OUT"
6480 ):
6481 vnf_config_primitive = scaling_config_action[
6482 "vnf-config-primitive-name-ref"
6483 ]
6484 step = db_nslcmop_update[
6485 "detailed-status"
6486 ] = "executing pre-scale scaling-config-action '{}'".format(
6487 vnf_config_primitive
6488 )
6489
6490 # look for primitive
6491 for config_primitive in (
6492 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6493 ).get("config-primitive", ()):
6494 if config_primitive["name"] == vnf_config_primitive:
6495 break
6496 else:
6497 raise LcmException(
6498 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
6499 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
6500 "primitive".format(scaling_group, vnf_config_primitive)
6501 )
6502
6503 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6504 if db_vnfr.get("additionalParamsForVnf"):
6505 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6506
6507 scale_process = "VCA"
6508 db_nsr_update["config-status"] = "configuring pre-scaling"
6509 primitive_params = self._map_primitive_params(
6510 config_primitive, {}, vnfr_params
6511 )
6512
6513 # Pre-scale retry check: Check if this sub-operation has been executed before
6514 op_index = self._check_or_add_scale_suboperation(
6515 db_nslcmop,
6516 vnf_index,
6517 vnf_config_primitive,
6518 primitive_params,
6519 "PRE-SCALE",
6520 )
6521 if op_index == self.SUBOPERATION_STATUS_SKIP:
6522 # Skip sub-operation
6523 result = "COMPLETED"
6524 result_detail = "Done"
6525 self.logger.debug(
6526 logging_text
6527 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6528 vnf_config_primitive, result, result_detail
6529 )
6530 )
6531 else:
6532 if op_index == self.SUBOPERATION_STATUS_NEW:
6533 # New sub-operation: Get index of this sub-operation
6534 op_index = (
6535 len(db_nslcmop.get("_admin", {}).get("operations"))
6536 - 1
6537 )
6538 self.logger.debug(
6539 logging_text
6540 + "vnf_config_primitive={} New sub-operation".format(
6541 vnf_config_primitive
6542 )
6543 )
6544 else:
6545 # retry: Get registered params for this existing sub-operation
6546 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6547 op_index
6548 ]
6549 vnf_index = op.get("member_vnf_index")
6550 vnf_config_primitive = op.get("primitive")
6551 primitive_params = op.get("primitive_params")
6552 self.logger.debug(
6553 logging_text
6554 + "vnf_config_primitive={} Sub-operation retry".format(
6555 vnf_config_primitive
6556 )
6557 )
6558 # Execute the primitive, either with new (first-time) or registered (reintent) args
6559 ee_descriptor_id = config_primitive.get(
6560 "execution-environment-ref"
6561 )
6562 primitive_name = config_primitive.get(
6563 "execution-environment-primitive", vnf_config_primitive
6564 )
6565 ee_id, vca_type = self._look_for_deployed_vca(
6566 nsr_deployed["VCA"],
6567 member_vnf_index=vnf_index,
6568 vdu_id=None,
6569 vdu_count_index=None,
6570 ee_descriptor_id=ee_descriptor_id,
6571 )
6572 result, result_detail = await self._ns_execute_primitive(
6573 ee_id,
6574 primitive_name,
6575 primitive_params,
6576 vca_type=vca_type,
6577 vca_id=vca_id,
6578 )
6579 self.logger.debug(
6580 logging_text
6581 + "vnf_config_primitive={} Done with result {} {}".format(
6582 vnf_config_primitive, result, result_detail
6583 )
6584 )
6585 # Update operationState = COMPLETED | FAILED
6586 self._update_suboperation_status(
6587 db_nslcmop, op_index, result, result_detail
6588 )
6589
6590 if result == "FAILED":
6591 raise LcmException(result_detail)
6592 db_nsr_update["config-status"] = old_config_status
6593 scale_process = None
6594 # PRE-SCALE END
6595
6596 db_nsr_update[
6597 "_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)
6598 ] = nb_scale_op
6599 db_nsr_update[
6600 "_admin.scaling-group.{}.time".format(admin_scale_index)
6601 ] = time()
6602
6603 # SCALE-IN VCA - BEGIN
6604 if vca_scaling_info:
6605 step = db_nslcmop_update[
6606 "detailed-status"
6607 ] = "Deleting the execution environments"
6608 scale_process = "VCA"
6609 for vca_info in vca_scaling_info:
6610 if vca_info["type"] == "delete" and not vca_info.get("osm_kdu_id"):
6611 member_vnf_index = str(vca_info["member-vnf-index"])
6612 self.logger.debug(
6613 logging_text + "vdu info: {}".format(vca_info)
6614 )
6615 if vca_info.get("osm_vdu_id"):
6616 vdu_id = vca_info["osm_vdu_id"]
6617 vdu_index = int(vca_info["vdu_index"])
6618 stage[
6619 1
6620 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6621 member_vnf_index, vdu_id, vdu_index
6622 )
6623 stage[2] = step = "Scaling in VCA"
6624 self._write_op_status(op_id=nslcmop_id, stage=stage)
6625 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
6626 config_update = db_nsr["configurationStatus"]
6627 for vca_index, vca in enumerate(vca_update):
6628 if (
6629 (vca or vca.get("ee_id"))
6630 and vca["member-vnf-index"] == member_vnf_index
6631 and vca["vdu_count_index"] == vdu_index
6632 ):
6633 if vca.get("vdu_id"):
6634 config_descriptor = get_configuration(
6635 db_vnfd, vca.get("vdu_id")
6636 )
6637 elif vca.get("kdu_name"):
6638 config_descriptor = get_configuration(
6639 db_vnfd, vca.get("kdu_name")
6640 )
6641 else:
6642 config_descriptor = get_configuration(
6643 db_vnfd, db_vnfd["id"]
6644 )
6645 operation_params = (
6646 db_nslcmop.get("operationParams") or {}
6647 )
6648 exec_terminate_primitives = not operation_params.get(
6649 "skip_terminate_primitives"
6650 ) and vca.get("needed_terminate")
6651 task = asyncio.ensure_future(
6652 asyncio.wait_for(
6653 self.destroy_N2VC(
6654 logging_text,
6655 db_nslcmop,
6656 vca,
6657 config_descriptor,
6658 vca_index,
6659 destroy_ee=True,
6660 exec_primitives=exec_terminate_primitives,
6661 scaling_in=True,
6662 vca_id=vca_id,
6663 ),
6664 timeout=self.timeout_charm_delete,
6665 )
6666 )
6667 tasks_dict_info[task] = "Terminating VCA {}".format(
6668 vca.get("ee_id")
6669 )
6670 del vca_update[vca_index]
6671 del config_update[vca_index]
6672 # wait for pending tasks of terminate primitives
6673 if tasks_dict_info:
6674 self.logger.debug(
6675 logging_text
6676 + "Waiting for tasks {}".format(
6677 list(tasks_dict_info.keys())
6678 )
6679 )
6680 error_list = await self._wait_for_tasks(
6681 logging_text,
6682 tasks_dict_info,
6683 min(
6684 self.timeout_charm_delete, self.timeout_ns_terminate
6685 ),
6686 stage,
6687 nslcmop_id,
6688 )
6689 tasks_dict_info.clear()
6690 if error_list:
6691 raise LcmException("; ".join(error_list))
6692
6693 db_vca_and_config_update = {
6694 "_admin.deployed.VCA": vca_update,
6695 "configurationStatus": config_update,
6696 }
6697 self.update_db_2(
6698 "nsrs", db_nsr["_id"], db_vca_and_config_update
6699 )
6700 scale_process = None
6701 # SCALE-IN VCA - END
6702
6703 # SCALE RO - BEGIN
6704 if scaling_info.get("vdu-create") or scaling_info.get("vdu-delete"):
6705 scale_process = "RO"
6706 if self.ro_config.get("ng"):
6707 await self._scale_ng_ro(
6708 logging_text, db_nsr, db_nslcmop, db_vnfr, scaling_info, stage
6709 )
6710 scaling_info.pop("vdu-create", None)
6711 scaling_info.pop("vdu-delete", None)
6712
6713 scale_process = None
6714 # SCALE RO - END
6715
6716 # SCALE KDU - BEGIN
6717 if scaling_info.get("kdu-create") or scaling_info.get("kdu-delete"):
6718 scale_process = "KDU"
6719 await self._scale_kdu(
6720 logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
6721 )
6722 scaling_info.pop("kdu-create", None)
6723 scaling_info.pop("kdu-delete", None)
6724
6725 scale_process = None
6726 # SCALE KDU - END
6727
6728 if db_nsr_update:
6729 self.update_db_2("nsrs", nsr_id, db_nsr_update)
6730
6731 # SCALE-UP VCA - BEGIN
6732 if vca_scaling_info:
6733 step = db_nslcmop_update[
6734 "detailed-status"
6735 ] = "Creating new execution environments"
6736 scale_process = "VCA"
6737 for vca_info in vca_scaling_info:
6738 if vca_info["type"] == "create" and not vca_info.get("osm_kdu_id"):
6739 member_vnf_index = str(vca_info["member-vnf-index"])
6740 self.logger.debug(
6741 logging_text + "vdu info: {}".format(vca_info)
6742 )
6743 vnfd_id = db_vnfr["vnfd-ref"]
6744 if vca_info.get("osm_vdu_id"):
6745 vdu_index = int(vca_info["vdu_index"])
6746 deploy_params = {"OSM": get_osm_params(db_vnfr)}
6747 if db_vnfr.get("additionalParamsForVnf"):
6748 deploy_params.update(
6749 parse_yaml_strings(
6750 db_vnfr["additionalParamsForVnf"].copy()
6751 )
6752 )
6753 descriptor_config = get_configuration(
6754 db_vnfd, db_vnfd["id"]
6755 )
6756 if descriptor_config:
6757 vdu_id = None
6758 vdu_name = None
6759 kdu_name = None
6760 self._deploy_n2vc(
6761 logging_text=logging_text
6762 + "member_vnf_index={} ".format(member_vnf_index),
6763 db_nsr=db_nsr,
6764 db_vnfr=db_vnfr,
6765 nslcmop_id=nslcmop_id,
6766 nsr_id=nsr_id,
6767 nsi_id=nsi_id,
6768 vnfd_id=vnfd_id,
6769 vdu_id=vdu_id,
6770 kdu_name=kdu_name,
6771 member_vnf_index=member_vnf_index,
6772 vdu_index=vdu_index,
6773 vdu_name=vdu_name,
6774 deploy_params=deploy_params,
6775 descriptor_config=descriptor_config,
6776 base_folder=base_folder,
6777 task_instantiation_info=tasks_dict_info,
6778 stage=stage,
6779 )
6780 vdu_id = vca_info["osm_vdu_id"]
6781 vdur = find_in_list(
6782 db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id
6783 )
6784 descriptor_config = get_configuration(db_vnfd, vdu_id)
6785 if vdur.get("additionalParams"):
6786 deploy_params_vdu = parse_yaml_strings(
6787 vdur["additionalParams"]
6788 )
6789 else:
6790 deploy_params_vdu = deploy_params
6791 deploy_params_vdu["OSM"] = get_osm_params(
6792 db_vnfr, vdu_id, vdu_count_index=vdu_index
6793 )
6794 if descriptor_config:
6795 vdu_name = None
6796 kdu_name = None
6797 stage[
6798 1
6799 ] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6800 member_vnf_index, vdu_id, vdu_index
6801 )
6802 stage[2] = step = "Scaling out VCA"
6803 self._write_op_status(op_id=nslcmop_id, stage=stage)
6804 self._deploy_n2vc(
6805 logging_text=logging_text
6806 + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
6807 member_vnf_index, vdu_id, vdu_index
6808 ),
6809 db_nsr=db_nsr,
6810 db_vnfr=db_vnfr,
6811 nslcmop_id=nslcmop_id,
6812 nsr_id=nsr_id,
6813 nsi_id=nsi_id,
6814 vnfd_id=vnfd_id,
6815 vdu_id=vdu_id,
6816 kdu_name=kdu_name,
6817 member_vnf_index=member_vnf_index,
6818 vdu_index=vdu_index,
6819 vdu_name=vdu_name,
6820 deploy_params=deploy_params_vdu,
6821 descriptor_config=descriptor_config,
6822 base_folder=base_folder,
6823 task_instantiation_info=tasks_dict_info,
6824 stage=stage,
6825 )
6826 # SCALE-UP VCA - END
6827 scale_process = None
6828
6829 # POST-SCALE BEGIN
6830 # execute primitive service POST-SCALING
6831 step = "Executing post-scale vnf-config-primitive"
6832 if scaling_descriptor.get("scaling-config-action"):
6833 for scaling_config_action in scaling_descriptor[
6834 "scaling-config-action"
6835 ]:
6836 if (
6837 scaling_config_action.get("trigger") == "post-scale-in"
6838 and scaling_type == "SCALE_IN"
6839 ) or (
6840 scaling_config_action.get("trigger") == "post-scale-out"
6841 and scaling_type == "SCALE_OUT"
6842 ):
6843 vnf_config_primitive = scaling_config_action[
6844 "vnf-config-primitive-name-ref"
6845 ]
6846 step = db_nslcmop_update[
6847 "detailed-status"
6848 ] = "executing post-scale scaling-config-action '{}'".format(
6849 vnf_config_primitive
6850 )
6851
6852 vnfr_params = {"VDU_SCALE_INFO": scaling_info}
6853 if db_vnfr.get("additionalParamsForVnf"):
6854 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
6855
6856 # look for primitive
6857 for config_primitive in (
6858 get_configuration(db_vnfd, db_vnfd["id"]) or {}
6859 ).get("config-primitive", ()):
6860 if config_primitive["name"] == vnf_config_primitive:
6861 break
6862 else:
6863 raise LcmException(
6864 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
6865 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
6866 "config-primitive".format(
6867 scaling_group, vnf_config_primitive
6868 )
6869 )
6870 scale_process = "VCA"
6871 db_nsr_update["config-status"] = "configuring post-scaling"
6872 primitive_params = self._map_primitive_params(
6873 config_primitive, {}, vnfr_params
6874 )
6875
6876 # Post-scale retry check: Check if this sub-operation has been executed before
6877 op_index = self._check_or_add_scale_suboperation(
6878 db_nslcmop,
6879 vnf_index,
6880 vnf_config_primitive,
6881 primitive_params,
6882 "POST-SCALE",
6883 )
6884 if op_index == self.SUBOPERATION_STATUS_SKIP:
6885 # Skip sub-operation
6886 result = "COMPLETED"
6887 result_detail = "Done"
6888 self.logger.debug(
6889 logging_text
6890 + "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
6891 vnf_config_primitive, result, result_detail
6892 )
6893 )
6894 else:
6895 if op_index == self.SUBOPERATION_STATUS_NEW:
6896 # New sub-operation: Get index of this sub-operation
6897 op_index = (
6898 len(db_nslcmop.get("_admin", {}).get("operations"))
6899 - 1
6900 )
6901 self.logger.debug(
6902 logging_text
6903 + "vnf_config_primitive={} New sub-operation".format(
6904 vnf_config_primitive
6905 )
6906 )
6907 else:
6908 # retry: Get registered params for this existing sub-operation
6909 op = db_nslcmop.get("_admin", {}).get("operations", [])[
6910 op_index
6911 ]
6912 vnf_index = op.get("member_vnf_index")
6913 vnf_config_primitive = op.get("primitive")
6914 primitive_params = op.get("primitive_params")
6915 self.logger.debug(
6916 logging_text
6917 + "vnf_config_primitive={} Sub-operation retry".format(
6918 vnf_config_primitive
6919 )
6920 )
6921 # Execute the primitive, either with new (first-time) or registered (reintent) args
6922 ee_descriptor_id = config_primitive.get(
6923 "execution-environment-ref"
6924 )
6925 primitive_name = config_primitive.get(
6926 "execution-environment-primitive", vnf_config_primitive
6927 )
6928 ee_id, vca_type = self._look_for_deployed_vca(
6929 nsr_deployed["VCA"],
6930 member_vnf_index=vnf_index,
6931 vdu_id=None,
6932 vdu_count_index=None,
6933 ee_descriptor_id=ee_descriptor_id,
6934 )
6935 result, result_detail = await self._ns_execute_primitive(
6936 ee_id,
6937 primitive_name,
6938 primitive_params,
6939 vca_type=vca_type,
6940 vca_id=vca_id,
6941 )
6942 self.logger.debug(
6943 logging_text
6944 + "vnf_config_primitive={} Done with result {} {}".format(
6945 vnf_config_primitive, result, result_detail
6946 )
6947 )
6948 # Update operationState = COMPLETED | FAILED
6949 self._update_suboperation_status(
6950 db_nslcmop, op_index, result, result_detail
6951 )
6952
6953 if result == "FAILED":
6954 raise LcmException(result_detail)
6955 db_nsr_update["config-status"] = old_config_status
6956 scale_process = None
6957 # POST-SCALE END
6958
6959 db_nsr_update[
6960 "detailed-status"
6961 ] = "" # "scaled {} {}".format(scaling_group, scaling_type)
6962 db_nsr_update["operational-status"] = (
6963 "running"
6964 if old_operational_status == "failed"
6965 else old_operational_status
6966 )
6967 db_nsr_update["config-status"] = old_config_status
6968 return
6969 except (
6970 ROclient.ROClientException,
6971 DbException,
6972 LcmException,
6973 NgRoException,
6974 ) as e:
6975 self.logger.error(logging_text + "Exit Exception {}".format(e))
6976 exc = e
6977 except asyncio.CancelledError:
6978 self.logger.error(
6979 logging_text + "Cancelled Exception while '{}'".format(step)
6980 )
6981 exc = "Operation was cancelled"
6982 except Exception as e:
6983 exc = traceback.format_exc()
6984 self.logger.critical(
6985 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
6986 exc_info=True,
6987 )
6988 finally:
6989 self._write_ns_status(
6990 nsr_id=nsr_id,
6991 ns_state=None,
6992 current_operation="IDLE",
6993 current_operation_id=None,
6994 )
6995 if tasks_dict_info:
6996 stage[1] = "Waiting for instantiate pending tasks."
6997 self.logger.debug(logging_text + stage[1])
6998 exc = await self._wait_for_tasks(
6999 logging_text,
7000 tasks_dict_info,
7001 self.timeout_ns_deploy,
7002 stage,
7003 nslcmop_id,
7004 nsr_id=nsr_id,
7005 )
7006 if exc:
7007 db_nslcmop_update[
7008 "detailed-status"
7009 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7010 nslcmop_operation_state = "FAILED"
7011 if db_nsr:
7012 db_nsr_update["operational-status"] = old_operational_status
7013 db_nsr_update["config-status"] = old_config_status
7014 db_nsr_update["detailed-status"] = ""
7015 if scale_process:
7016 if "VCA" in scale_process:
7017 db_nsr_update["config-status"] = "failed"
7018 if "RO" in scale_process:
7019 db_nsr_update["operational-status"] = "failed"
7020 db_nsr_update[
7021 "detailed-status"
7022 ] = "FAILED scaling nslcmop={} {}: {}".format(
7023 nslcmop_id, step, exc
7024 )
7025 else:
7026 error_description_nslcmop = None
7027 nslcmop_operation_state = "COMPLETED"
7028 db_nslcmop_update["detailed-status"] = "Done"
7029
7030 self._write_op_status(
7031 op_id=nslcmop_id,
7032 stage="",
7033 error_message=error_description_nslcmop,
7034 operation_state=nslcmop_operation_state,
7035 other_update=db_nslcmop_update,
7036 )
7037 if db_nsr:
7038 self._write_ns_status(
7039 nsr_id=nsr_id,
7040 ns_state=None,
7041 current_operation="IDLE",
7042 current_operation_id=None,
7043 other_update=db_nsr_update,
7044 )
7045
7046 if nslcmop_operation_state:
7047 try:
7048 msg = {
7049 "nsr_id": nsr_id,
7050 "nslcmop_id": nslcmop_id,
7051 "operationState": nslcmop_operation_state,
7052 }
7053 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
7054 except Exception as e:
7055 self.logger.error(
7056 logging_text + "kafka_write notification Exception {}".format(e)
7057 )
7058 self.logger.debug(logging_text + "Exit")
7059 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
7060
7061 async def _scale_kdu(
7062 self, logging_text, nsr_id, nsr_deployed, db_vnfd, vca_id, scaling_info
7063 ):
7064 _scaling_info = scaling_info.get("kdu-create") or scaling_info.get("kdu-delete")
7065 for kdu_name in _scaling_info:
7066 for kdu_scaling_info in _scaling_info[kdu_name]:
7067 deployed_kdu, index = get_deployed_kdu(
7068 nsr_deployed, kdu_name, kdu_scaling_info["member-vnf-index"]
7069 )
7070 cluster_uuid = deployed_kdu["k8scluster-uuid"]
7071 kdu_instance = deployed_kdu["kdu-instance"]
7072 kdu_model = deployed_kdu.get("kdu-model")
7073 scale = int(kdu_scaling_info["scale"])
7074 k8s_cluster_type = kdu_scaling_info["k8s-cluster-type"]
7075
7076 db_dict = {
7077 "collection": "nsrs",
7078 "filter": {"_id": nsr_id},
7079 "path": "_admin.deployed.K8s.{}".format(index),
7080 }
7081
7082 step = "scaling application {}".format(
7083 kdu_scaling_info["resource-name"]
7084 )
7085 self.logger.debug(logging_text + step)
7086
7087 if kdu_scaling_info["type"] == "delete":
7088 kdu_config = get_configuration(db_vnfd, kdu_name)
7089 if (
7090 kdu_config
7091 and kdu_config.get("terminate-config-primitive")
7092 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7093 ):
7094 terminate_config_primitive_list = kdu_config.get(
7095 "terminate-config-primitive"
7096 )
7097 terminate_config_primitive_list.sort(
7098 key=lambda val: int(val["seq"])
7099 )
7100
7101 for (
7102 terminate_config_primitive
7103 ) in terminate_config_primitive_list:
7104 primitive_params_ = self._map_primitive_params(
7105 terminate_config_primitive, {}, {}
7106 )
7107 step = "execute terminate config primitive"
7108 self.logger.debug(logging_text + step)
7109 await asyncio.wait_for(
7110 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7111 cluster_uuid=cluster_uuid,
7112 kdu_instance=kdu_instance,
7113 primitive_name=terminate_config_primitive["name"],
7114 params=primitive_params_,
7115 db_dict=db_dict,
7116 vca_id=vca_id,
7117 ),
7118 timeout=600,
7119 )
7120
7121 await asyncio.wait_for(
7122 self.k8scluster_map[k8s_cluster_type].scale(
7123 kdu_instance,
7124 scale,
7125 kdu_scaling_info["resource-name"],
7126 vca_id=vca_id,
7127 cluster_uuid=cluster_uuid,
7128 kdu_model=kdu_model,
7129 atomic=True,
7130 db_dict=db_dict,
7131 ),
7132 timeout=self.timeout_vca_on_error,
7133 )
7134
7135 if kdu_scaling_info["type"] == "create":
7136 kdu_config = get_configuration(db_vnfd, kdu_name)
7137 if (
7138 kdu_config
7139 and kdu_config.get("initial-config-primitive")
7140 and get_juju_ee_ref(db_vnfd, kdu_name) is None
7141 ):
7142 initial_config_primitive_list = kdu_config.get(
7143 "initial-config-primitive"
7144 )
7145 initial_config_primitive_list.sort(
7146 key=lambda val: int(val["seq"])
7147 )
7148
7149 for initial_config_primitive in initial_config_primitive_list:
7150 primitive_params_ = self._map_primitive_params(
7151 initial_config_primitive, {}, {}
7152 )
7153 step = "execute initial config primitive"
7154 self.logger.debug(logging_text + step)
7155 await asyncio.wait_for(
7156 self.k8scluster_map[k8s_cluster_type].exec_primitive(
7157 cluster_uuid=cluster_uuid,
7158 kdu_instance=kdu_instance,
7159 primitive_name=initial_config_primitive["name"],
7160 params=primitive_params_,
7161 db_dict=db_dict,
7162 vca_id=vca_id,
7163 ),
7164 timeout=600,
7165 )
7166
7167 async def _scale_ng_ro(
7168 self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage
7169 ):
7170 nsr_id = db_nslcmop["nsInstanceId"]
7171 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7172 db_vnfrs = {}
7173
7174 # read from db: vnfd's for every vnf
7175 db_vnfds = []
7176
7177 # for each vnf in ns, read vnfd
7178 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
7179 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
7180 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
7181 # if we haven't this vnfd, read it from db
7182 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
7183 # read from db
7184 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7185 db_vnfds.append(vnfd)
7186 n2vc_key = self.n2vc.get_public_key()
7187 n2vc_key_list = [n2vc_key]
7188 self.scale_vnfr(
7189 db_vnfr,
7190 vdu_scaling_info.get("vdu-create"),
7191 vdu_scaling_info.get("vdu-delete"),
7192 mark_delete=True,
7193 )
7194 # db_vnfr has been updated, update db_vnfrs to use it
7195 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
7196 await self._instantiate_ng_ro(
7197 logging_text,
7198 nsr_id,
7199 db_nsd,
7200 db_nsr,
7201 db_nslcmop,
7202 db_vnfrs,
7203 db_vnfds,
7204 n2vc_key_list,
7205 stage=stage,
7206 start_deploy=time(),
7207 timeout_ns_deploy=self.timeout_ns_deploy,
7208 )
7209 if vdu_scaling_info.get("vdu-delete"):
7210 self.scale_vnfr(
7211 db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False
7212 )
7213
7214 async def extract_prometheus_scrape_jobs(
7215 self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip
7216 ):
7217 # look if exist a file called 'prometheus*.j2' and
7218 artifact_content = self.fs.dir_ls(artifact_path)
7219 job_file = next(
7220 (
7221 f
7222 for f in artifact_content
7223 if f.startswith("prometheus") and f.endswith(".j2")
7224 ),
7225 None,
7226 )
7227 if not job_file:
7228 return
7229 with self.fs.file_open((artifact_path, job_file), "r") as f:
7230 job_data = f.read()
7231
7232 # TODO get_service
7233 _, _, service = ee_id.partition(".") # remove prefix "namespace."
7234 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
7235 host_port = "80"
7236 vnfr_id = vnfr_id.replace("-", "")
7237 variables = {
7238 "JOB_NAME": vnfr_id,
7239 "TARGET_IP": target_ip,
7240 "EXPORTER_POD_IP": host_name,
7241 "EXPORTER_POD_PORT": host_port,
7242 }
7243 job_list = parse_job(job_data, variables)
7244 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
7245 for job in job_list:
7246 if (
7247 not isinstance(job.get("job_name"), str)
7248 or vnfr_id not in job["job_name"]
7249 ):
7250 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
7251 job["nsr_id"] = nsr_id
7252 job["vnfr_id"] = vnfr_id
7253 return job_list
7254
7255 async def rebuild_start_stop(self, nsr_id, nslcmop_id, vnf_id, additional_param, operation_type):
7256 logging_text = "Task ns={} {}={} ".format(nsr_id, operation_type, nslcmop_id)
7257 self.logger.info(logging_text + "Enter")
7258 stage = ["Preparing the environment", ""]
7259 # database nsrs record
7260 db_nsr_update = {}
7261 vdu_vim_name = None
7262 vim_vm_id = None
7263 # in case of error, indicates what part of scale was failed to put nsr at error status
7264 start_deploy = time()
7265 try:
7266 db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_id})
7267 vim_account_id = db_vnfr.get("vim-account-id")
7268 vim_info_key = "vim:" + vim_account_id
7269 vdur = find_in_list(
7270 db_vnfr["vdur"], lambda vdu: vdu["count-index"] == additional_param["count-index"]
7271 )
7272 if vdur:
7273 vdu_vim_name = vdur["name"]
7274 vim_vm_id = vdur["vim_info"][vim_info_key]["vim_id"]
7275 target_vim, _ = next(k_v for k_v in vdur["vim_info"].items())
7276 self.logger.info("vdu_vim_name >> {} ".format(vdu_vim_name))
7277 # wait for any previous tasks in process
7278 stage[1] = "Waiting for previous operations to terminate"
7279 self.logger.info(stage[1])
7280 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
7281
7282 stage[1] = "Reading from database."
7283 self.logger.info(stage[1])
7284 self._write_ns_status(
7285 nsr_id=nsr_id,
7286 ns_state=None,
7287 current_operation=operation_type.upper(),
7288 current_operation_id=nslcmop_id
7289 )
7290 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7291
7292 # read from db: ns
7293 stage[1] = "Getting nsr={} from db.".format(nsr_id)
7294 db_nsr_update["operational-status"] = operation_type
7295 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7296 # Payload for RO
7297 desc = {
7298 operation_type: {
7299 "vim_vm_id": vim_vm_id,
7300 "vnf_id": vnf_id,
7301 "vdu_index": additional_param["count-index"],
7302 "vdu_id": vdur["id"],
7303 "target_vim": target_vim,
7304 "vim_account_id": vim_account_id
7305 }
7306 }
7307 stage[1] = "Sending rebuild request to RO... {}".format(desc)
7308 self._write_op_status(op_id=nslcmop_id, stage=stage, queuePosition=0)
7309 self.logger.info("ro nsr id: {}".format(nsr_id))
7310 result_dict = await self.RO.operate(nsr_id, desc, operation_type)
7311 self.logger.info("response from RO: {}".format(result_dict))
7312 action_id = result_dict["action_id"]
7313 await self._wait_ng_ro(
7314 nsr_id, action_id, nslcmop_id, start_deploy,
7315 self.timeout_operate, None, "start_stop_rebuild",
7316 )
7317 return "COMPLETED", "Done"
7318 except (ROclient.ROClientException, DbException, LcmException) as e:
7319 self.logger.error("Exit Exception {}".format(e))
7320 exc = e
7321 except asyncio.CancelledError:
7322 self.logger.error("Cancelled Exception while '{}'".format(stage))
7323 exc = "Operation was cancelled"
7324 except Exception as e:
7325 exc = traceback.format_exc()
7326 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
7327 return "FAILED", "Error in operate VNF {}".format(exc)
7328
7329 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7330 """
7331 Get VCA Cloud and VCA Cloud Credentials for the VIM account
7332
7333 :param: vim_account_id: VIM Account ID
7334
7335 :return: (cloud_name, cloud_credential)
7336 """
7337 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7338 return config.get("vca_cloud"), config.get("vca_cloud_credential")
7339
7340 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
7341 """
7342 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
7343
7344 :param: vim_account_id: VIM Account ID
7345
7346 :return: (cloud_name, cloud_credential)
7347 """
7348 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
7349 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
7350
7351 async def migrate(self, nsr_id, nslcmop_id):
7352 """
7353 Migrate VNFs and VDUs instances in a NS
7354
7355 :param: nsr_id: NS Instance ID
7356 :param: nslcmop_id: nslcmop ID of migrate
7357
7358 """
7359 # Try to lock HA task here
7360 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7361 if not task_is_locked_by_me:
7362 return
7363 logging_text = "Task ns={} migrate ".format(nsr_id)
7364 self.logger.debug(logging_text + "Enter")
7365 # get all needed from database
7366 db_nslcmop = None
7367 db_nslcmop_update = {}
7368 nslcmop_operation_state = None
7369 db_nsr_update = {}
7370 target = {}
7371 exc = None
7372 # in case of error, indicates what part of scale was failed to put nsr at error status
7373 start_deploy = time()
7374
7375 try:
7376 # wait for any previous tasks in process
7377 step = "Waiting for previous operations to terminate"
7378 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7379
7380 self._write_ns_status(
7381 nsr_id=nsr_id,
7382 ns_state=None,
7383 current_operation="MIGRATING",
7384 current_operation_id=nslcmop_id,
7385 )
7386 step = "Getting nslcmop from database"
7387 self.logger.debug(
7388 step + " after having waited for previous tasks to be completed"
7389 )
7390 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7391 migrate_params = db_nslcmop.get("operationParams")
7392
7393 target = {}
7394 target.update(migrate_params)
7395 desc = await self.RO.migrate(nsr_id, target)
7396 self.logger.debug("RO return > {}".format(desc))
7397 action_id = desc["action_id"]
7398 await self._wait_ng_ro(
7399 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_migrate,
7400 operation="migrate"
7401 )
7402 except (ROclient.ROClientException, DbException, LcmException) as e:
7403 self.logger.error("Exit Exception {}".format(e))
7404 exc = e
7405 except asyncio.CancelledError:
7406 self.logger.error("Cancelled Exception while '{}'".format(step))
7407 exc = "Operation was cancelled"
7408 except Exception as e:
7409 exc = traceback.format_exc()
7410 self.logger.critical(
7411 "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True
7412 )
7413 finally:
7414 self._write_ns_status(
7415 nsr_id=nsr_id,
7416 ns_state=None,
7417 current_operation="IDLE",
7418 current_operation_id=None,
7419 )
7420 if exc:
7421 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
7422 nslcmop_operation_state = "FAILED"
7423 else:
7424 nslcmop_operation_state = "COMPLETED"
7425 db_nslcmop_update["detailed-status"] = "Done"
7426 db_nsr_update["detailed-status"] = "Done"
7427
7428 self._write_op_status(
7429 op_id=nslcmop_id,
7430 stage="",
7431 error_message="",
7432 operation_state=nslcmop_operation_state,
7433 other_update=db_nslcmop_update,
7434 )
7435 if nslcmop_operation_state:
7436 try:
7437 msg = {
7438 "nsr_id": nsr_id,
7439 "nslcmop_id": nslcmop_id,
7440 "operationState": nslcmop_operation_state,
7441 }
7442 await self.msg.aiowrite("ns", "migrated", msg, loop=self.loop)
7443 except Exception as e:
7444 self.logger.error(
7445 logging_text + "kafka_write notification Exception {}".format(e)
7446 )
7447 self.logger.debug(logging_text + "Exit")
7448 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_migrate")
7449
7450
7451 async def heal(self, nsr_id, nslcmop_id):
7452 """
7453 Heal NS
7454
7455 :param nsr_id: ns instance to heal
7456 :param nslcmop_id: operation to run
7457 :return:
7458 """
7459
7460 # Try to lock HA task here
7461 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
7462 if not task_is_locked_by_me:
7463 return
7464
7465 logging_text = "Task ns={} heal={} ".format(nsr_id, nslcmop_id)
7466 stage = ["", "", ""]
7467 tasks_dict_info = {}
7468 # ^ stage, step, VIM progress
7469 self.logger.debug(logging_text + "Enter")
7470 # get all needed from database
7471 db_nsr = None
7472 db_nslcmop_update = {}
7473 db_nsr_update = {}
7474 db_vnfrs = {} # vnf's info indexed by _id
7475 exc = None
7476 old_operational_status = ""
7477 old_config_status = ""
7478 nsi_id = None
7479 try:
7480 # wait for any previous tasks in process
7481 step = "Waiting for previous operations to terminate"
7482 await self.lcm_tasks.waitfor_related_HA("ns", "nslcmops", nslcmop_id)
7483 self._write_ns_status(
7484 nsr_id=nsr_id,
7485 ns_state=None,
7486 current_operation="HEALING",
7487 current_operation_id=nslcmop_id,
7488 )
7489
7490 step = "Getting nslcmop from database"
7491 self.logger.debug(
7492 step + " after having waited for previous tasks to be completed"
7493 )
7494 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
7495
7496 step = "Getting nsr from database"
7497 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
7498 old_operational_status = db_nsr["operational-status"]
7499 old_config_status = db_nsr["config-status"]
7500
7501 db_nsr_update = {
7502 "_admin.deployed.RO.operational-status": "healing",
7503 }
7504 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7505
7506 step = "Sending heal order to VIM"
7507 task_ro = asyncio.ensure_future(
7508 self.heal_RO(
7509 logging_text=logging_text,
7510 nsr_id=nsr_id,
7511 db_nslcmop=db_nslcmop,
7512 stage=stage,
7513 )
7514 )
7515 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "heal_RO", task_ro)
7516 tasks_dict_info[task_ro] = "Healing at VIM"
7517
7518 # VCA tasks
7519 # read from db: nsd
7520 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
7521 self.logger.debug(logging_text + stage[1])
7522 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
7523 self.fs.sync(db_nsr["nsd-id"])
7524 db_nsr["nsd"] = nsd
7525 # read from db: vnfr's of this ns
7526 step = "Getting vnfrs from db"
7527 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
7528 for vnfr in db_vnfrs_list:
7529 db_vnfrs[vnfr["_id"]] = vnfr
7530 self.logger.debug("ns.heal db_vnfrs={}".format(db_vnfrs))
7531
7532 # Check for each target VNF
7533 target_list = db_nslcmop.get("operationParams", {}).get("healVnfData", {})
7534 for target_vnf in target_list:
7535 # Find this VNF in the list from DB
7536 vnfr_id = target_vnf.get("vnfInstanceId", None)
7537 if vnfr_id:
7538 db_vnfr = db_vnfrs[vnfr_id]
7539 vnfd_id = db_vnfr.get("vnfd-id")
7540 vnfd_ref = db_vnfr.get("vnfd-ref")
7541 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
7542 base_folder = vnfd["_admin"]["storage"]
7543 vdu_id = None
7544 vdu_index = 0
7545 vdu_name = None
7546 kdu_name = None
7547 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
7548 member_vnf_index = db_vnfr.get("member-vnf-index-ref")
7549
7550 # Check each target VDU and deploy N2VC
7551 for target_vdu in target_vnf["additionalParams"].get("vdu", None):
7552 deploy_params_vdu = target_vdu
7553 # Set run-day1 vnf level value if not vdu level value exists
7554 if not deploy_params_vdu.get("run-day1") and target_vnf["additionalParams"].get("run-day1"):
7555 deploy_params_vdu["run-day1"] = target_vnf["additionalParams"].get("run-day1")
7556 vdu_name = target_vdu.get("vdu-id", None)
7557 # TODO: Get vdu_id from vdud.
7558 vdu_id = vdu_name
7559 # For multi instance VDU count-index is mandatory
7560 # For single session VDU count-indes is 0
7561 vdu_index = target_vdu.get("count-index",0)
7562
7563 # n2vc_redesign STEP 3 to 6 Deploy N2VC
7564 stage[1] = "Deploying Execution Environments."
7565 self.logger.debug(logging_text + stage[1])
7566
7567 # VNF Level charm. Normal case when proxy charms.
7568 # If target instance is management machine continue with actions: recreate EE for native charms or reinject juju key for proxy charms.
7569 descriptor_config = get_configuration(vnfd, vnfd_ref)
7570 if descriptor_config:
7571 # Continue if healed machine is management machine
7572 vnf_ip_address = db_vnfr.get("ip-address")
7573 target_instance = None
7574 for instance in db_vnfr.get("vdur", None):
7575 if ( instance["vdu-name"] == vdu_name and instance["count-index"] == vdu_index ):
7576 target_instance = instance
7577 break
7578 if vnf_ip_address == target_instance.get("ip-address"):
7579 self._heal_n2vc(
7580 logging_text=logging_text
7581 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7582 member_vnf_index, vdu_name, vdu_index
7583 ),
7584 db_nsr=db_nsr,
7585 db_vnfr=db_vnfr,
7586 nslcmop_id=nslcmop_id,
7587 nsr_id=nsr_id,
7588 nsi_id=nsi_id,
7589 vnfd_id=vnfd_ref,
7590 vdu_id=None,
7591 kdu_name=None,
7592 member_vnf_index=member_vnf_index,
7593 vdu_index=0,
7594 vdu_name=None,
7595 deploy_params=deploy_params_vdu,
7596 descriptor_config=descriptor_config,
7597 base_folder=base_folder,
7598 task_instantiation_info=tasks_dict_info,
7599 stage=stage,
7600 )
7601
7602 # VDU Level charm. Normal case with native charms.
7603 descriptor_config = get_configuration(vnfd, vdu_name)
7604 if descriptor_config:
7605 self._heal_n2vc(
7606 logging_text=logging_text
7607 + "member_vnf_index={}, vdu_name={}, vdu_index={} ".format(
7608 member_vnf_index, vdu_name, vdu_index
7609 ),
7610 db_nsr=db_nsr,
7611 db_vnfr=db_vnfr,
7612 nslcmop_id=nslcmop_id,
7613 nsr_id=nsr_id,
7614 nsi_id=nsi_id,
7615 vnfd_id=vnfd_ref,
7616 vdu_id=vdu_id,
7617 kdu_name=kdu_name,
7618 member_vnf_index=member_vnf_index,
7619 vdu_index=vdu_index,
7620 vdu_name=vdu_name,
7621 deploy_params=deploy_params_vdu,
7622 descriptor_config=descriptor_config,
7623 base_folder=base_folder,
7624 task_instantiation_info=tasks_dict_info,
7625 stage=stage,
7626 )
7627
7628 except (
7629 ROclient.ROClientException,
7630 DbException,
7631 LcmException,
7632 NgRoException,
7633 ) as e:
7634 self.logger.error(logging_text + "Exit Exception {}".format(e))
7635 exc = e
7636 except asyncio.CancelledError:
7637 self.logger.error(
7638 logging_text + "Cancelled Exception while '{}'".format(step)
7639 )
7640 exc = "Operation was cancelled"
7641 except Exception as e:
7642 exc = traceback.format_exc()
7643 self.logger.critical(
7644 logging_text + "Exit Exception {} {}".format(type(e).__name__, e),
7645 exc_info=True,
7646 )
7647 finally:
7648 if tasks_dict_info:
7649 stage[1] = "Waiting for healing pending tasks."
7650 self.logger.debug(logging_text + stage[1])
7651 exc = await self._wait_for_tasks(
7652 logging_text,
7653 tasks_dict_info,
7654 self.timeout_ns_deploy,
7655 stage,
7656 nslcmop_id,
7657 nsr_id=nsr_id,
7658 )
7659 if exc:
7660 db_nslcmop_update[
7661 "detailed-status"
7662 ] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
7663 nslcmop_operation_state = "FAILED"
7664 if db_nsr:
7665 db_nsr_update["operational-status"] = old_operational_status
7666 db_nsr_update["config-status"] = old_config_status
7667 db_nsr_update[
7668 "detailed-status"
7669 ] = "FAILED healing nslcmop={} {}: {}".format(
7670 nslcmop_id, step, exc
7671 )
7672 for task, task_name in tasks_dict_info.items():
7673 if not task.done() or task.cancelled() or task.exception():
7674 if task_name.startswith(self.task_name_deploy_vca):
7675 # A N2VC task is pending
7676 db_nsr_update["config-status"] = "failed"
7677 else:
7678 # RO task is pending
7679 db_nsr_update["operational-status"] = "failed"
7680 else:
7681 error_description_nslcmop = None
7682 nslcmop_operation_state = "COMPLETED"
7683 db_nslcmop_update["detailed-status"] = "Done"
7684 db_nsr_update["detailed-status"] = "Done"
7685 db_nsr_update["operational-status"] = "running"
7686 db_nsr_update["config-status"] = "configured"
7687
7688 self._write_op_status(
7689 op_id=nslcmop_id,
7690 stage="",
7691 error_message=error_description_nslcmop,
7692 operation_state=nslcmop_operation_state,
7693 other_update=db_nslcmop_update,
7694 )
7695 if db_nsr:
7696 self._write_ns_status(
7697 nsr_id=nsr_id,
7698 ns_state=None,
7699 current_operation="IDLE",
7700 current_operation_id=None,
7701 other_update=db_nsr_update,
7702 )
7703
7704 if nslcmop_operation_state:
7705 try:
7706 msg = {
7707 "nsr_id": nsr_id,
7708 "nslcmop_id": nslcmop_id,
7709 "operationState": nslcmop_operation_state,
7710 }
7711 await self.msg.aiowrite("ns", "healed", msg, loop=self.loop)
7712 except Exception as e:
7713 self.logger.error(
7714 logging_text + "kafka_write notification Exception {}".format(e)
7715 )
7716 self.logger.debug(logging_text + "Exit")
7717 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_heal")
7718
7719 async def heal_RO(
7720 self,
7721 logging_text,
7722 nsr_id,
7723 db_nslcmop,
7724 stage,
7725 ):
7726 """
7727 Heal at RO
7728 :param logging_text: preffix text to use at logging
7729 :param nsr_id: nsr identity
7730 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
7731 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
7732 :return: None or exception
7733 """
7734 def get_vim_account(vim_account_id):
7735 nonlocal db_vims
7736 if vim_account_id in db_vims:
7737 return db_vims[vim_account_id]
7738 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
7739 db_vims[vim_account_id] = db_vim
7740 return db_vim
7741
7742 try:
7743 start_heal = time()
7744 ns_params = db_nslcmop.get("operationParams")
7745 if ns_params and ns_params.get("timeout_ns_heal"):
7746 timeout_ns_heal = ns_params["timeout_ns_heal"]
7747 else:
7748 timeout_ns_heal = self.timeout.get(
7749 "ns_heal", self.timeout_ns_heal
7750 )
7751
7752 db_vims = {}
7753
7754 nslcmop_id = db_nslcmop["_id"]
7755 target = {
7756 "action_id": nslcmop_id,
7757 }
7758 self.logger.warning("db_nslcmop={} and timeout_ns_heal={}".format(db_nslcmop,timeout_ns_heal))
7759 target.update(db_nslcmop.get("operationParams", {}))
7760
7761 self.logger.debug("Send to RO > nsr_id={} target={}".format(nsr_id, target))
7762 desc = await self.RO.recreate(nsr_id, target)
7763 self.logger.debug("RO return > {}".format(desc))
7764 action_id = desc["action_id"]
7765 # waits for RO to complete because Reinjecting juju key at ro can find VM in state Deleted
7766 await self._wait_ng_ro(
7767 nsr_id, action_id, nslcmop_id, start_heal, timeout_ns_heal, stage,
7768 operation="healing"
7769 )
7770
7771 # Updating NSR
7772 db_nsr_update = {
7773 "_admin.deployed.RO.operational-status": "running",
7774 "detailed-status": " ".join(stage),
7775 }
7776 self.update_db_2("nsrs", nsr_id, db_nsr_update)
7777 self._write_op_status(nslcmop_id, stage)
7778 self.logger.debug(
7779 logging_text + "ns healed at RO. RO_id={}".format(action_id)
7780 )
7781
7782 except Exception as e:
7783 stage[2] = "ERROR healing at VIM"
7784 #self.set_vnfr_at_error(db_vnfrs, str(e))
7785 self.logger.error(
7786 "Error healing at VIM {}".format(e),
7787 exc_info=not isinstance(
7788 e,
7789 (
7790 ROclient.ROClientException,
7791 LcmException,
7792 DbException,
7793 NgRoException,
7794 ),
7795 ),
7796 )
7797 raise
7798
7799 def _heal_n2vc(
7800 self,
7801 logging_text,
7802 db_nsr,
7803 db_vnfr,
7804 nslcmop_id,
7805 nsr_id,
7806 nsi_id,
7807 vnfd_id,
7808 vdu_id,
7809 kdu_name,
7810 member_vnf_index,
7811 vdu_index,
7812 vdu_name,
7813 deploy_params,
7814 descriptor_config,
7815 base_folder,
7816 task_instantiation_info,
7817 stage,
7818 ):
7819 # launch instantiate_N2VC in a asyncio task and register task object
7820 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
7821 # if not found, create one entry and update database
7822 # fill db_nsr._admin.deployed.VCA.<index>
7823
7824 self.logger.debug(
7825 logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id)
7826 )
7827 if "execution-environment-list" in descriptor_config:
7828 ee_list = descriptor_config.get("execution-environment-list", [])
7829 elif "juju" in descriptor_config:
7830 ee_list = [descriptor_config] # ns charms
7831 else: # other types as script are not supported
7832 ee_list = []
7833
7834 for ee_item in ee_list:
7835 self.logger.debug(
7836 logging_text
7837 + "_deploy_n2vc ee_item juju={}, helm={}".format(
7838 ee_item.get("juju"), ee_item.get("helm-chart")
7839 )
7840 )
7841 ee_descriptor_id = ee_item.get("id")
7842 if ee_item.get("juju"):
7843 vca_name = ee_item["juju"].get("charm")
7844 vca_type = (
7845 "lxc_proxy_charm"
7846 if ee_item["juju"].get("charm") is not None
7847 else "native_charm"
7848 )
7849 if ee_item["juju"].get("cloud") == "k8s":
7850 vca_type = "k8s_proxy_charm"
7851 elif ee_item["juju"].get("proxy") is False:
7852 vca_type = "native_charm"
7853 elif ee_item.get("helm-chart"):
7854 vca_name = ee_item["helm-chart"]
7855 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
7856 vca_type = "helm"
7857 else:
7858 vca_type = "helm-v3"
7859 else:
7860 self.logger.debug(
7861 logging_text + "skipping non juju neither charm configuration"
7862 )
7863 continue
7864
7865 vca_index = -1
7866 for vca_index, vca_deployed in enumerate(
7867 db_nsr["_admin"]["deployed"]["VCA"]
7868 ):
7869 if not vca_deployed:
7870 continue
7871 if (
7872 vca_deployed.get("member-vnf-index") == member_vnf_index
7873 and vca_deployed.get("vdu_id") == vdu_id
7874 and vca_deployed.get("kdu_name") == kdu_name
7875 and vca_deployed.get("vdu_count_index", 0) == vdu_index
7876 and vca_deployed.get("ee_descriptor_id") == ee_descriptor_id
7877 ):
7878 break
7879 else:
7880 # not found, create one.
7881 target = (
7882 "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
7883 )
7884 if vdu_id:
7885 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
7886 elif kdu_name:
7887 target += "/kdu/{}".format(kdu_name)
7888 vca_deployed = {
7889 "target_element": target,
7890 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
7891 "member-vnf-index": member_vnf_index,
7892 "vdu_id": vdu_id,
7893 "kdu_name": kdu_name,
7894 "vdu_count_index": vdu_index,
7895 "operational-status": "init", # TODO revise
7896 "detailed-status": "", # TODO revise
7897 "step": "initial-deploy", # TODO revise
7898 "vnfd_id": vnfd_id,
7899 "vdu_name": vdu_name,
7900 "type": vca_type,
7901 "ee_descriptor_id": ee_descriptor_id,
7902 }
7903 vca_index += 1
7904
7905 # create VCA and configurationStatus in db
7906 db_dict = {
7907 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
7908 "configurationStatus.{}".format(vca_index): dict(),
7909 }
7910 self.update_db_2("nsrs", nsr_id, db_dict)
7911
7912 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
7913
7914 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
7915 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
7916 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
7917
7918 # Launch task
7919 task_n2vc = asyncio.ensure_future(
7920 self.heal_N2VC(
7921 logging_text=logging_text,
7922 vca_index=vca_index,
7923 nsi_id=nsi_id,
7924 db_nsr=db_nsr,
7925 db_vnfr=db_vnfr,
7926 vdu_id=vdu_id,
7927 kdu_name=kdu_name,
7928 vdu_index=vdu_index,
7929 deploy_params=deploy_params,
7930 config_descriptor=descriptor_config,
7931 base_folder=base_folder,
7932 nslcmop_id=nslcmop_id,
7933 stage=stage,
7934 vca_type=vca_type,
7935 vca_name=vca_name,
7936 ee_config_descriptor=ee_item,
7937 )
7938 )
7939 self.lcm_tasks.register(
7940 "ns",
7941 nsr_id,
7942 nslcmop_id,
7943 "instantiate_N2VC-{}".format(vca_index),
7944 task_n2vc,
7945 )
7946 task_instantiation_info[
7947 task_n2vc
7948 ] = self.task_name_deploy_vca + " {}.{}".format(
7949 member_vnf_index or "", vdu_id or ""
7950 )
7951
7952 async def heal_N2VC(
7953 self,
7954 logging_text,
7955 vca_index,
7956 nsi_id,
7957 db_nsr,
7958 db_vnfr,
7959 vdu_id,
7960 kdu_name,
7961 vdu_index,
7962 config_descriptor,
7963 deploy_params,
7964 base_folder,
7965 nslcmop_id,
7966 stage,
7967 vca_type,
7968 vca_name,
7969 ee_config_descriptor,
7970 ):
7971 nsr_id = db_nsr["_id"]
7972 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
7973 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
7974 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
7975 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
7976 db_dict = {
7977 "collection": "nsrs",
7978 "filter": {"_id": nsr_id},
7979 "path": db_update_entry,
7980 }
7981 step = ""
7982 try:
7983
7984 element_type = "NS"
7985 element_under_configuration = nsr_id
7986
7987 vnfr_id = None
7988 if db_vnfr:
7989 vnfr_id = db_vnfr["_id"]
7990 osm_config["osm"]["vnf_id"] = vnfr_id
7991
7992 namespace = "{nsi}.{ns}".format(nsi=nsi_id if nsi_id else "", ns=nsr_id)
7993
7994 if vca_type == "native_charm":
7995 index_number = 0
7996 else:
7997 index_number = vdu_index or 0
7998
7999 if vnfr_id:
8000 element_type = "VNF"
8001 element_under_configuration = vnfr_id
8002 namespace += ".{}-{}".format(vnfr_id, index_number)
8003 if vdu_id:
8004 namespace += ".{}-{}".format(vdu_id, index_number)
8005 element_type = "VDU"
8006 element_under_configuration = "{}-{}".format(vdu_id, index_number)
8007 osm_config["osm"]["vdu_id"] = vdu_id
8008 elif kdu_name:
8009 namespace += ".{}".format(kdu_name)
8010 element_type = "KDU"
8011 element_under_configuration = kdu_name
8012 osm_config["osm"]["kdu_name"] = kdu_name
8013
8014 # Get artifact path
8015 if base_folder["pkg-dir"]:
8016 artifact_path = "{}/{}/{}/{}".format(
8017 base_folder["folder"],
8018 base_folder["pkg-dir"],
8019 "charms"
8020 if vca_type
8021 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8022 else "helm-charts",
8023 vca_name,
8024 )
8025 else:
8026 artifact_path = "{}/Scripts/{}/{}/".format(
8027 base_folder["folder"],
8028 "charms"
8029 if vca_type
8030 in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm")
8031 else "helm-charts",
8032 vca_name,
8033 )
8034
8035 self.logger.debug("Artifact path > {}".format(artifact_path))
8036
8037 # get initial_config_primitive_list that applies to this element
8038 initial_config_primitive_list = config_descriptor.get(
8039 "initial-config-primitive"
8040 )
8041
8042 self.logger.debug(
8043 "Initial config primitive list > {}".format(
8044 initial_config_primitive_list
8045 )
8046 )
8047
8048 # add config if not present for NS charm
8049 ee_descriptor_id = ee_config_descriptor.get("id")
8050 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
8051 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(
8052 initial_config_primitive_list, vca_deployed, ee_descriptor_id
8053 )
8054
8055 self.logger.debug(
8056 "Initial config primitive list #2 > {}".format(
8057 initial_config_primitive_list
8058 )
8059 )
8060 # n2vc_redesign STEP 3.1
8061 # find old ee_id if exists
8062 ee_id = vca_deployed.get("ee_id")
8063
8064 vca_id = self.get_vca_id(db_vnfr, db_nsr)
8065 # create or register execution environment in VCA. Only for native charms when healing
8066 if vca_type == "native_charm":
8067 step = "Waiting to VM being up and getting IP address"
8068 self.logger.debug(logging_text + step)
8069 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8070 logging_text,
8071 nsr_id,
8072 vnfr_id,
8073 vdu_id,
8074 vdu_index,
8075 user=None,
8076 pub_key=None,
8077 )
8078 credentials = {"hostname": rw_mgmt_ip}
8079 # get username
8080 username = deep_get(
8081 config_descriptor, ("config-access", "ssh-access", "default-user")
8082 )
8083 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
8084 # merged. Meanwhile let's get username from initial-config-primitive
8085 if not username and initial_config_primitive_list:
8086 for config_primitive in initial_config_primitive_list:
8087 for param in config_primitive.get("parameter", ()):
8088 if param["name"] == "ssh-username":
8089 username = param["value"]
8090 break
8091 if not username:
8092 raise LcmException(
8093 "Cannot determine the username neither with 'initial-config-primitive' nor with "
8094 "'config-access.ssh-access.default-user'"
8095 )
8096 credentials["username"] = username
8097
8098 # n2vc_redesign STEP 3.2
8099 # TODO: Before healing at RO it is needed to destroy native charm units to be deleted.
8100 self._write_configuration_status(
8101 nsr_id=nsr_id,
8102 vca_index=vca_index,
8103 status="REGISTERING",
8104 element_under_configuration=element_under_configuration,
8105 element_type=element_type,
8106 )
8107
8108 step = "register execution environment {}".format(credentials)
8109 self.logger.debug(logging_text + step)
8110 ee_id = await self.vca_map[vca_type].register_execution_environment(
8111 credentials=credentials,
8112 namespace=namespace,
8113 db_dict=db_dict,
8114 vca_id=vca_id,
8115 )
8116
8117 # update ee_id en db
8118 db_dict_ee_id = {
8119 "_admin.deployed.VCA.{}.ee_id".format(vca_index): ee_id,
8120 }
8121 self.update_db_2("nsrs", nsr_id, db_dict_ee_id)
8122
8123 # for compatibility with MON/POL modules, the need model and application name at database
8124 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
8125 # Not sure if this need to be done when healing
8126 """
8127 ee_id_parts = ee_id.split(".")
8128 db_nsr_update = {db_update_entry + "ee_id": ee_id}
8129 if len(ee_id_parts) >= 2:
8130 model_name = ee_id_parts[0]
8131 application_name = ee_id_parts[1]
8132 db_nsr_update[db_update_entry + "model"] = model_name
8133 db_nsr_update[db_update_entry + "application"] = application_name
8134 """
8135
8136 # n2vc_redesign STEP 3.3
8137 # Install configuration software. Only for native charms.
8138 step = "Install configuration Software"
8139
8140 self._write_configuration_status(
8141 nsr_id=nsr_id,
8142 vca_index=vca_index,
8143 status="INSTALLING SW",
8144 element_under_configuration=element_under_configuration,
8145 element_type=element_type,
8146 #other_update=db_nsr_update,
8147 other_update=None,
8148 )
8149
8150 # TODO check if already done
8151 self.logger.debug(logging_text + step)
8152 config = None
8153 if vca_type == "native_charm":
8154 config_primitive = next(
8155 (p for p in initial_config_primitive_list if p["name"] == "config"),
8156 None,
8157 )
8158 if config_primitive:
8159 config = self._map_primitive_params(
8160 config_primitive, {}, deploy_params
8161 )
8162 await self.vca_map[vca_type].install_configuration_sw(
8163 ee_id=ee_id,
8164 artifact_path=artifact_path,
8165 db_dict=db_dict,
8166 config=config,
8167 num_units=1,
8168 vca_id=vca_id,
8169 vca_type=vca_type,
8170 )
8171
8172 # write in db flag of configuration_sw already installed
8173 self.update_db_2(
8174 "nsrs", nsr_id, {db_update_entry + "config_sw_installed": True}
8175 )
8176
8177 # Not sure if this need to be done when healing
8178 """
8179 # add relations for this VCA (wait for other peers related with this VCA)
8180 await self._add_vca_relations(
8181 logging_text=logging_text,
8182 nsr_id=nsr_id,
8183 vca_type=vca_type,
8184 vca_index=vca_index,
8185 )
8186 """
8187
8188 # if SSH access is required, then get execution environment SSH public
8189 # if native charm we have waited already to VM be UP
8190 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
8191 pub_key = None
8192 user = None
8193 # self.logger.debug("get ssh key block")
8194 if deep_get(
8195 config_descriptor, ("config-access", "ssh-access", "required")
8196 ):
8197 # self.logger.debug("ssh key needed")
8198 # Needed to inject a ssh key
8199 user = deep_get(
8200 config_descriptor,
8201 ("config-access", "ssh-access", "default-user"),
8202 )
8203 step = "Install configuration Software, getting public ssh key"
8204 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(
8205 ee_id=ee_id, db_dict=db_dict, vca_id=vca_id
8206 )
8207
8208 step = "Insert public key into VM user={} ssh_key={}".format(
8209 user, pub_key
8210 )
8211 else:
8212 # self.logger.debug("no need to get ssh key")
8213 step = "Waiting to VM being up and getting IP address"
8214 self.logger.debug(logging_text + step)
8215
8216 # n2vc_redesign STEP 5.1
8217 # wait for RO (ip-address) Insert pub_key into VM
8218 # IMPORTANT: We need do wait for RO to complete healing operation.
8219 await self._wait_heal_ro(nsr_id,self.timeout_ns_heal)
8220 if vnfr_id:
8221 if kdu_name:
8222 rw_mgmt_ip = await self.wait_kdu_up(
8223 logging_text, nsr_id, vnfr_id, kdu_name
8224 )
8225 else:
8226 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(
8227 logging_text,
8228 nsr_id,
8229 vnfr_id,
8230 vdu_id,
8231 vdu_index,
8232 user=user,
8233 pub_key=pub_key,
8234 )
8235 else:
8236 rw_mgmt_ip = None # This is for a NS configuration
8237
8238 self.logger.debug(logging_text + " VM_ip_address={}".format(rw_mgmt_ip))
8239
8240 # store rw_mgmt_ip in deploy params for later replacement
8241 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
8242
8243 # Day1 operations.
8244 # get run-day1 operation parameter
8245 runDay1 = deploy_params.get("run-day1",False)
8246 self.logger.debug(" Healing vnf={}, vdu={}, runDay1 ={}".format(vnfr_id,vdu_id,runDay1))
8247 if runDay1:
8248 # n2vc_redesign STEP 6 Execute initial config primitive
8249 step = "execute initial config primitive"
8250
8251 # wait for dependent primitives execution (NS -> VNF -> VDU)
8252 if initial_config_primitive_list:
8253 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
8254
8255 # stage, in function of element type: vdu, kdu, vnf or ns
8256 my_vca = vca_deployed_list[vca_index]
8257 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
8258 # VDU or KDU
8259 stage[0] = "Stage 3/5: running Day-1 primitives for VDU."
8260 elif my_vca.get("member-vnf-index"):
8261 # VNF
8262 stage[0] = "Stage 4/5: running Day-1 primitives for VNF."
8263 else:
8264 # NS
8265 stage[0] = "Stage 5/5: running Day-1 primitives for NS."
8266
8267 self._write_configuration_status(
8268 nsr_id=nsr_id, vca_index=vca_index, status="EXECUTING PRIMITIVE"
8269 )
8270
8271 self._write_op_status(op_id=nslcmop_id, stage=stage)
8272
8273 check_if_terminated_needed = True
8274 for initial_config_primitive in initial_config_primitive_list:
8275 # adding information on the vca_deployed if it is a NS execution environment
8276 if not vca_deployed["member-vnf-index"]:
8277 deploy_params["ns_config_info"] = json.dumps(
8278 self._get_ns_config_info(nsr_id)
8279 )
8280 # TODO check if already done
8281 primitive_params_ = self._map_primitive_params(
8282 initial_config_primitive, {}, deploy_params
8283 )
8284
8285 step = "execute primitive '{}' params '{}'".format(
8286 initial_config_primitive["name"], primitive_params_
8287 )
8288 self.logger.debug(logging_text + step)
8289 await self.vca_map[vca_type].exec_primitive(
8290 ee_id=ee_id,
8291 primitive_name=initial_config_primitive["name"],
8292 params_dict=primitive_params_,
8293 db_dict=db_dict,
8294 vca_id=vca_id,
8295 vca_type=vca_type,
8296 )
8297 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
8298 if check_if_terminated_needed:
8299 if config_descriptor.get("terminate-config-primitive"):
8300 self.update_db_2(
8301 "nsrs", nsr_id, {db_update_entry + "needed_terminate": True}
8302 )
8303 check_if_terminated_needed = False
8304
8305 # TODO register in database that primitive is done
8306
8307 # STEP 7 Configure metrics
8308 # Not sure if this need to be done when healing
8309 """
8310 if vca_type == "helm" or vca_type == "helm-v3":
8311 prometheus_jobs = await self.extract_prometheus_scrape_jobs(
8312 ee_id=ee_id,
8313 artifact_path=artifact_path,
8314 ee_config_descriptor=ee_config_descriptor,
8315 vnfr_id=vnfr_id,
8316 nsr_id=nsr_id,
8317 target_ip=rw_mgmt_ip,
8318 )
8319 if prometheus_jobs:
8320 self.update_db_2(
8321 "nsrs",
8322 nsr_id,
8323 {db_update_entry + "prometheus_jobs": prometheus_jobs},
8324 )
8325
8326 for job in prometheus_jobs:
8327 self.db.set_one(
8328 "prometheus_jobs",
8329 {"job_name": job["job_name"]},
8330 job,
8331 upsert=True,
8332 fail_on_empty=False,
8333 )
8334
8335 """
8336 step = "instantiated at VCA"
8337 self.logger.debug(logging_text + step)
8338
8339 self._write_configuration_status(
8340 nsr_id=nsr_id, vca_index=vca_index, status="READY"
8341 )
8342
8343 except Exception as e: # TODO not use Exception but N2VC exception
8344 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
8345 if not isinstance(
8346 e, (DbException, N2VCException, LcmException, asyncio.CancelledError)
8347 ):
8348 self.logger.error(
8349 "Exception while {} : {}".format(step, e), exc_info=True
8350 )
8351 self._write_configuration_status(
8352 nsr_id=nsr_id, vca_index=vca_index, status="BROKEN"
8353 )
8354 raise LcmException("{} {}".format(step, e)) from e
8355
8356 async def _wait_heal_ro(
8357 self,
8358 nsr_id,
8359 timeout=600,
8360 ):
8361 start_time = time()
8362 while time() <= start_time + timeout:
8363 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
8364 operational_status_ro = db_nsr["_admin"]["deployed"]["RO"]["operational-status"]
8365 self.logger.debug("Wait Heal RO > {}".format(operational_status_ro))
8366 if operational_status_ro != "healing":
8367 break
8368 await asyncio.sleep(15, loop=self.loop)
8369 else: # timeout_ns_deploy
8370 raise NgRoException("Timeout waiting ns to deploy")
8371
8372 async def vertical_scale(self, nsr_id, nslcmop_id):
8373 """
8374 Vertical Scale the VDUs in a NS
8375
8376 :param: nsr_id: NS Instance ID
8377 :param: nslcmop_id: nslcmop ID of migrate
8378
8379 """
8380 # Try to lock HA task here
8381 task_is_locked_by_me = self.lcm_tasks.lock_HA("ns", "nslcmops", nslcmop_id)
8382 if not task_is_locked_by_me:
8383 return
8384 logging_text = "Task ns={} vertical scale ".format(nsr_id)
8385 self.logger.debug(logging_text + "Enter")
8386 # get all needed from database
8387 db_nslcmop = None
8388 db_nslcmop_update = {}
8389 nslcmop_operation_state = None
8390 db_nsr_update = {}
8391 target = {}
8392 exc = None
8393 # in case of error, indicates what part of scale was failed to put nsr at error status
8394 start_deploy = time()
8395
8396 try:
8397 # wait for any previous tasks in process
8398 step = "Waiting for previous operations to terminate"
8399 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
8400
8401 self._write_ns_status(
8402 nsr_id=nsr_id,
8403 ns_state=None,
8404 current_operation="VerticalScale",
8405 current_operation_id=nslcmop_id
8406 )
8407 step = "Getting nslcmop from database"
8408 self.logger.debug(step + " after having waited for previous tasks to be completed")
8409 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
8410 operationParams = db_nslcmop.get("operationParams")
8411 target = {}
8412 target.update(operationParams)
8413 desc = await self.RO.vertical_scale(nsr_id, target)
8414 self.logger.debug("RO return > {}".format(desc))
8415 action_id = desc["action_id"]
8416 await self._wait_ng_ro(
8417 nsr_id, action_id, nslcmop_id, start_deploy, self.timeout_verticalscale,
8418 operation="verticalscale"
8419 )
8420 except (ROclient.ROClientException, DbException, LcmException) as e:
8421 self.logger.error("Exit Exception {}".format(e))
8422 exc = e
8423 except asyncio.CancelledError:
8424 self.logger.error("Cancelled Exception while '{}'".format(step))
8425 exc = "Operation was cancelled"
8426 except Exception as e:
8427 exc = traceback.format_exc()
8428 self.logger.critical("Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
8429 finally:
8430 self._write_ns_status(
8431 nsr_id=nsr_id,
8432 ns_state=None,
8433 current_operation="IDLE",
8434 current_operation_id=None,
8435 )
8436 if exc:
8437 db_nslcmop_update[
8438 "detailed-status"
8439 ] = "FAILED {}: {}".format(step, exc)
8440 nslcmop_operation_state = "FAILED"
8441 else:
8442 nslcmop_operation_state = "COMPLETED"
8443 db_nslcmop_update["detailed-status"] = "Done"
8444 db_nsr_update["detailed-status"] = "Done"
8445
8446 self._write_op_status(
8447 op_id=nslcmop_id,
8448 stage="",
8449 error_message="",
8450 operation_state=nslcmop_operation_state,
8451 other_update=db_nslcmop_update,
8452 )
8453 if nslcmop_operation_state:
8454 try:
8455 msg = {
8456 "nsr_id": nsr_id,
8457 "nslcmop_id": nslcmop_id,
8458 "operationState": nslcmop_operation_state,
8459 }
8460 await self.msg.aiowrite("ns", "verticalscaled", msg, loop=self.loop)
8461 except Exception as e:
8462 self.logger.error(
8463 logging_text + "kafka_write notification Exception {}".format(e)
8464 )
8465 self.logger.debug(logging_text + "Exit")
8466 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_verticalscale")