1bcf4c7a196c9004431f780a36a2acdc4a8d5a75
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError
26
27 from osm_lcm import ROclient
28 from osm_lcm.ng_ro import NgRoClient, NgRoException
29 from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
30 from osm_lcm.data_utils.nsd import get_vnf_profiles
31 from osm_lcm.data_utils.vnfd import get_vdu_list, get_vdu_profile, \
32 get_ee_sorted_initial_config_primitive_list, get_ee_sorted_terminate_config_primitive_list, \
33 get_kdu_list, get_virtual_link_profiles, get_vdu, get_configuration, \
34 get_vdu_index, get_scaling_aspect, get_number_of_instances, get_juju_ee_ref
35 from osm_lcm.data_utils.list_utils import find_in_list
36 from osm_lcm.data_utils.vnfr import get_osm_params, get_vdur_index
37 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
38 from osm_lcm.data_utils.database.vim_account import VimAccountDB
39 from n2vc.k8s_helm_conn import K8sHelmConnector
40 from n2vc.k8s_helm3_conn import K8sHelm3Connector
41 from n2vc.k8s_juju_conn import K8sJujuConnector
42
43 from osm_common.dbbase import DbException
44 from osm_common.fsbase import FsException
45
46 from osm_lcm.data_utils.database.database import Database
47 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
48
49 from n2vc.n2vc_juju_conn import N2VCJujuConnector
50 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
51
52 from osm_lcm.lcm_helm_conn import LCMHelmConn
53
54 from copy import copy, deepcopy
55 from time import time
56 from uuid import uuid4
57
58 from random import randint
59
60 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
61
62
63 class NsLcm(LcmBase):
64 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
65 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
66 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
67 timeout_charm_delete = 10 * 60
68 timeout_primitive = 30 * 60 # timeout for primitive execution
69 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
70
71 SUBOPERATION_STATUS_NOT_FOUND = -1
72 SUBOPERATION_STATUS_NEW = -2
73 SUBOPERATION_STATUS_SKIP = -3
74 task_name_deploy_vca = "Deploying VCA"
75
76 def __init__(self, msg, lcm_tasks, config, loop, prometheus=None):
77 """
78 Init, Connect to database, filesystem storage, and messaging
79 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
80 :return: None
81 """
82 super().__init__(
83 msg=msg,
84 logger=logging.getLogger('lcm.ns')
85 )
86
87 self.db = Database().instance.db
88 self.fs = Filesystem().instance.fs
89 self.loop = loop
90 self.lcm_tasks = lcm_tasks
91 self.timeout = config["timeout"]
92 self.ro_config = config["ro_config"]
93 self.ng_ro = config["ro_config"].get("ng")
94 self.vca_config = config["VCA"].copy()
95
96 # create N2VC connector
97 self.n2vc = N2VCJujuConnector(
98 log=self.logger,
99 loop=self.loop,
100 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
101 username=self.vca_config.get('user', None),
102 vca_config=self.vca_config,
103 on_update_db=self._on_update_n2vc_db,
104 fs=self.fs,
105 db=self.db
106 )
107
108 self.conn_helm_ee = LCMHelmConn(
109 log=self.logger,
110 loop=self.loop,
111 url=None,
112 username=None,
113 vca_config=self.vca_config,
114 on_update_db=self._on_update_n2vc_db
115 )
116
117 self.k8sclusterhelm2 = K8sHelmConnector(
118 kubectl_command=self.vca_config.get("kubectlpath"),
119 helm_command=self.vca_config.get("helmpath"),
120 log=self.logger,
121 on_update_db=None,
122 fs=self.fs,
123 db=self.db
124 )
125
126 self.k8sclusterhelm3 = K8sHelm3Connector(
127 kubectl_command=self.vca_config.get("kubectlpath"),
128 helm_command=self.vca_config.get("helm3path"),
129 fs=self.fs,
130 log=self.logger,
131 db=self.db,
132 on_update_db=None,
133 )
134
135 self.k8sclusterjuju = K8sJujuConnector(
136 kubectl_command=self.vca_config.get("kubectlpath"),
137 juju_command=self.vca_config.get("jujupath"),
138 log=self.logger,
139 loop=self.loop,
140 on_update_db=self._on_update_k8s_db,
141 vca_config=self.vca_config,
142 fs=self.fs,
143 db=self.db
144 )
145
146 self.k8scluster_map = {
147 "helm-chart": self.k8sclusterhelm2,
148 "helm-chart-v3": self.k8sclusterhelm3,
149 "chart": self.k8sclusterhelm3,
150 "juju-bundle": self.k8sclusterjuju,
151 "juju": self.k8sclusterjuju,
152 }
153
154 self.vca_map = {
155 "lxc_proxy_charm": self.n2vc,
156 "native_charm": self.n2vc,
157 "k8s_proxy_charm": self.n2vc,
158 "helm": self.conn_helm_ee,
159 "helm-v3": self.conn_helm_ee
160 }
161
162 self.prometheus = prometheus
163
164 # create RO client
165 self.RO = NgRoClient(self.loop, **self.ro_config)
166
167 @staticmethod
168 def increment_ip_mac(ip_mac, vm_index=1):
169 if not isinstance(ip_mac, str):
170 return ip_mac
171 try:
172 # try with ipv4 look for last dot
173 i = ip_mac.rfind(".")
174 if i > 0:
175 i += 1
176 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
177 # try with ipv6 or mac look for last colon. Operate in hex
178 i = ip_mac.rfind(":")
179 if i > 0:
180 i += 1
181 # format in hex, len can be 2 for mac or 4 for ipv6
182 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(ip_mac[:i], int(ip_mac[i:], 16) + vm_index)
183 except Exception:
184 pass
185 return None
186
187 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
188
189 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
190
191 try:
192 # TODO filter RO descriptor fields...
193
194 # write to database
195 db_dict = dict()
196 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
197 db_dict['deploymentStatus'] = ro_descriptor
198 self.update_db_2("nsrs", nsrs_id, db_dict)
199
200 except Exception as e:
201 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
202
203 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
204
205 # remove last dot from path (if exists)
206 if path.endswith('.'):
207 path = path[:-1]
208
209 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
210 # .format(table, filter, path, updated_data))
211 try:
212
213 nsr_id = filter.get('_id')
214
215 # read ns record from database
216 nsr = self.db.get_one(table='nsrs', q_filter=filter)
217 current_ns_status = nsr.get('nsState')
218
219 # get vca status for NS
220 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
221
222 # vcaStatus
223 db_dict = dict()
224 db_dict['vcaStatus'] = status_dict
225 await self.n2vc.update_vca_status(db_dict['vcaStatus'])
226
227 # update configurationStatus for this VCA
228 try:
229 vca_index = int(path[path.rfind(".")+1:])
230
231 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
232 vca_status = vca_list[vca_index].get('status')
233
234 configuration_status_list = nsr.get('configurationStatus')
235 config_status = configuration_status_list[vca_index].get('status')
236
237 if config_status == 'BROKEN' and vca_status != 'failed':
238 db_dict['configurationStatus'][vca_index] = 'READY'
239 elif config_status != 'BROKEN' and vca_status == 'failed':
240 db_dict['configurationStatus'][vca_index] = 'BROKEN'
241 except Exception as e:
242 # not update configurationStatus
243 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
244
245 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
246 # if nsState = 'DEGRADED' check if all is OK
247 is_degraded = False
248 if current_ns_status in ('READY', 'DEGRADED'):
249 error_description = ''
250 # check machines
251 if status_dict.get('machines'):
252 for machine_id in status_dict.get('machines'):
253 machine = status_dict.get('machines').get(machine_id)
254 # check machine agent-status
255 if machine.get('agent-status'):
256 s = machine.get('agent-status').get('status')
257 if s != 'started':
258 is_degraded = True
259 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
260 # check machine instance status
261 if machine.get('instance-status'):
262 s = machine.get('instance-status').get('status')
263 if s != 'running':
264 is_degraded = True
265 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
266 # check applications
267 if status_dict.get('applications'):
268 for app_id in status_dict.get('applications'):
269 app = status_dict.get('applications').get(app_id)
270 # check application status
271 if app.get('status'):
272 s = app.get('status').get('status')
273 if s != 'active':
274 is_degraded = True
275 error_description += 'application {} status={} ; '.format(app_id, s)
276
277 if error_description:
278 db_dict['errorDescription'] = error_description
279 if current_ns_status == 'READY' and is_degraded:
280 db_dict['nsState'] = 'DEGRADED'
281 if current_ns_status == 'DEGRADED' and not is_degraded:
282 db_dict['nsState'] = 'READY'
283
284 # write to database
285 self.update_db_2("nsrs", nsr_id, db_dict)
286
287 except (asyncio.CancelledError, asyncio.TimeoutError):
288 raise
289 except Exception as e:
290 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
291
292 async def _on_update_k8s_db(self, cluster_uuid, kdu_instance, filter=None):
293 """
294 Updating vca status in NSR record
295 :param cluster_uuid: UUID of a k8s cluster
296 :param kdu_instance: The unique name of the KDU instance
297 :param filter: To get nsr_id
298 :return: none
299 """
300
301 # self.logger.debug("_on_update_k8s_db(cluster_uuid={}, kdu_instance={}, filter={}"
302 # .format(cluster_uuid, kdu_instance, filter))
303
304 try:
305 nsr_id = filter.get('_id')
306
307 # get vca status for NS
308 vca_status = await self.k8sclusterjuju.status_kdu(cluster_uuid,
309 kdu_instance,
310 complete_status=True,
311 yaml_format=False)
312 # vcaStatus
313 db_dict = dict()
314 db_dict['vcaStatus'] = {nsr_id: vca_status}
315
316 await self.k8sclusterjuju.update_vca_status(db_dict['vcaStatus'], kdu_instance)
317
318 # write to database
319 self.update_db_2("nsrs", nsr_id, db_dict)
320
321 except (asyncio.CancelledError, asyncio.TimeoutError):
322 raise
323 except Exception as e:
324 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
325
326 @staticmethod
327 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
328 try:
329 env = Environment(undefined=StrictUndefined)
330 template = env.from_string(cloud_init_text)
331 return template.render(additional_params or {})
332 except UndefinedError as e:
333 raise LcmException("Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
334 "file, must be provided in the instantiation parameters inside the "
335 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id))
336 except (TemplateError, TemplateNotFound) as e:
337 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
338 format(vnfd_id, vdu_id, e))
339
340 def _get_vdu_cloud_init_content(self, vdu, vnfd):
341 cloud_init_content = cloud_init_file = None
342 try:
343 if vdu.get("cloud-init-file"):
344 base_folder = vnfd["_admin"]["storage"]
345 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
346 vdu["cloud-init-file"])
347 with self.fs.file_open(cloud_init_file, "r") as ci_file:
348 cloud_init_content = ci_file.read()
349 elif vdu.get("cloud-init"):
350 cloud_init_content = vdu["cloud-init"]
351
352 return cloud_init_content
353 except FsException as e:
354 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
355 format(vnfd["id"], vdu["id"], cloud_init_file, e))
356
357 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
358 vdur = next(vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"])
359 additional_params = vdur.get("additionalParams")
360 return parse_yaml_strings(additional_params)
361
362 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
363 """
364 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
365 :param vnfd: input vnfd
366 :param new_id: overrides vnf id if provided
367 :param additionalParams: Instantiation params for VNFs provided
368 :param nsrId: Id of the NSR
369 :return: copy of vnfd
370 """
371 vnfd_RO = deepcopy(vnfd)
372 # remove unused by RO configuration, monitoring, scaling and internal keys
373 vnfd_RO.pop("_id", None)
374 vnfd_RO.pop("_admin", None)
375 vnfd_RO.pop("monitoring-param", None)
376 vnfd_RO.pop("scaling-group-descriptor", None)
377 vnfd_RO.pop("kdu", None)
378 vnfd_RO.pop("k8s-cluster", None)
379 if new_id:
380 vnfd_RO["id"] = new_id
381
382 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
383 for vdu in get_iterable(vnfd_RO, "vdu"):
384 vdu.pop("cloud-init-file", None)
385 vdu.pop("cloud-init", None)
386 return vnfd_RO
387
388 @staticmethod
389 def ip_profile_2_RO(ip_profile):
390 RO_ip_profile = deepcopy(ip_profile)
391 if "dns-server" in RO_ip_profile:
392 if isinstance(RO_ip_profile["dns-server"], list):
393 RO_ip_profile["dns-address"] = []
394 for ds in RO_ip_profile.pop("dns-server"):
395 RO_ip_profile["dns-address"].append(ds['address'])
396 else:
397 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
398 if RO_ip_profile.get("ip-version") == "ipv4":
399 RO_ip_profile["ip-version"] = "IPv4"
400 if RO_ip_profile.get("ip-version") == "ipv6":
401 RO_ip_profile["ip-version"] = "IPv6"
402 if "dhcp-params" in RO_ip_profile:
403 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
404 return RO_ip_profile
405
406 def _get_ro_vim_id_for_vim_account(self, vim_account):
407 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
408 if db_vim["_admin"]["operationalState"] != "ENABLED":
409 raise LcmException("VIM={} is not available. operationalState={}".format(
410 vim_account, db_vim["_admin"]["operationalState"]))
411 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
412 return RO_vim_id
413
414 def get_ro_wim_id_for_wim_account(self, wim_account):
415 if isinstance(wim_account, str):
416 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
417 if db_wim["_admin"]["operationalState"] != "ENABLED":
418 raise LcmException("WIM={} is not available. operationalState={}".format(
419 wim_account, db_wim["_admin"]["operationalState"]))
420 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
421 return RO_wim_id
422 else:
423 return wim_account
424
425 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
426
427 db_vdu_push_list = []
428 db_update = {"_admin.modified": time()}
429 if vdu_create:
430 for vdu_id, vdu_count in vdu_create.items():
431 vdur = next((vdur for vdur in reversed(db_vnfr["vdur"]) if vdur["vdu-id-ref"] == vdu_id), None)
432 if not vdur:
433 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".
434 format(vdu_id))
435
436 for count in range(vdu_count):
437 vdur_copy = deepcopy(vdur)
438 vdur_copy["status"] = "BUILD"
439 vdur_copy["status-detailed"] = None
440 vdur_copy["ip-address"]: None
441 vdur_copy["_id"] = str(uuid4())
442 vdur_copy["count-index"] += count + 1
443 vdur_copy["id"] = "{}-{}".format(vdur_copy["vdu-id-ref"], vdur_copy["count-index"])
444 vdur_copy.pop("vim_info", None)
445 for iface in vdur_copy["interfaces"]:
446 if iface.get("fixed-ip"):
447 iface["ip-address"] = self.increment_ip_mac(iface["ip-address"], count+1)
448 else:
449 iface.pop("ip-address", None)
450 if iface.get("fixed-mac"):
451 iface["mac-address"] = self.increment_ip_mac(iface["mac-address"], count+1)
452 else:
453 iface.pop("mac-address", None)
454 iface.pop("mgmt_vnf", None) # only first vdu can be managment of vnf
455 db_vdu_push_list.append(vdur_copy)
456 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
457 if vdu_delete:
458 for vdu_id, vdu_count in vdu_delete.items():
459 if mark_delete:
460 indexes_to_delete = [iv[0] for iv in enumerate(db_vnfr["vdur"]) if iv[1]["vdu-id-ref"] == vdu_id]
461 db_update.update({"vdur.{}.status".format(i): "DELETING" for i in indexes_to_delete[-vdu_count:]})
462 else:
463 # it must be deleted one by one because common.db does not allow otherwise
464 vdus_to_delete = [v for v in reversed(db_vnfr["vdur"]) if v["vdu-id-ref"] == vdu_id]
465 for vdu in vdus_to_delete[:vdu_count]:
466 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, None, pull={"vdur": {"_id": vdu["_id"]}})
467 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
468 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
469 # modify passed dictionary db_vnfr
470 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
471 db_vnfr["vdur"] = db_vnfr_["vdur"]
472
473 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
474 """
475 Updates database nsr with the RO info for the created vld
476 :param ns_update_nsr: dictionary to be filled with the updated info
477 :param db_nsr: content of db_nsr. This is also modified
478 :param nsr_desc_RO: nsr descriptor from RO
479 :return: Nothing, LcmException is raised on errors
480 """
481
482 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
483 for net_RO in get_iterable(nsr_desc_RO, "nets"):
484 if vld["id"] != net_RO.get("ns_net_osm_id"):
485 continue
486 vld["vim-id"] = net_RO.get("vim_net_id")
487 vld["name"] = net_RO.get("vim_name")
488 vld["status"] = net_RO.get("status")
489 vld["status-detailed"] = net_RO.get("error_msg")
490 ns_update_nsr["vld.{}".format(vld_index)] = vld
491 break
492 else:
493 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
494
495 def set_vnfr_at_error(self, db_vnfrs, error_text):
496 try:
497 for db_vnfr in db_vnfrs.values():
498 vnfr_update = {"status": "ERROR"}
499 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
500 if "status" not in vdur:
501 vdur["status"] = "ERROR"
502 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
503 if error_text:
504 vdur["status-detailed"] = str(error_text)
505 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
506 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
507 except DbException as e:
508 self.logger.error("Cannot update vnf. {}".format(e))
509
510 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
511 """
512 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
513 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
514 :param nsr_desc_RO: nsr descriptor from RO
515 :return: Nothing, LcmException is raised on errors
516 """
517 for vnf_index, db_vnfr in db_vnfrs.items():
518 for vnf_RO in nsr_desc_RO["vnfs"]:
519 if vnf_RO["member_vnf_index"] != vnf_index:
520 continue
521 vnfr_update = {}
522 if vnf_RO.get("ip_address"):
523 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
524 elif not db_vnfr.get("ip-address"):
525 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
526 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
527
528 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
529 vdur_RO_count_index = 0
530 if vdur.get("pdu-type"):
531 continue
532 for vdur_RO in get_iterable(vnf_RO, "vms"):
533 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
534 continue
535 if vdur["count-index"] != vdur_RO_count_index:
536 vdur_RO_count_index += 1
537 continue
538 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
539 if vdur_RO.get("ip_address"):
540 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
541 else:
542 vdur["ip-address"] = None
543 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
544 vdur["name"] = vdur_RO.get("vim_name")
545 vdur["status"] = vdur_RO.get("status")
546 vdur["status-detailed"] = vdur_RO.get("error_msg")
547 for ifacer in get_iterable(vdur, "interfaces"):
548 for interface_RO in get_iterable(vdur_RO, "interfaces"):
549 if ifacer["name"] == interface_RO.get("internal_name"):
550 ifacer["ip-address"] = interface_RO.get("ip_address")
551 ifacer["mac-address"] = interface_RO.get("mac_address")
552 break
553 else:
554 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
555 "from VIM info"
556 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
557 vnfr_update["vdur.{}".format(vdu_index)] = vdur
558 break
559 else:
560 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
561 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
562
563 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
564 for net_RO in get_iterable(nsr_desc_RO, "nets"):
565 if vld["id"] != net_RO.get("vnf_net_osm_id"):
566 continue
567 vld["vim-id"] = net_RO.get("vim_net_id")
568 vld["name"] = net_RO.get("vim_name")
569 vld["status"] = net_RO.get("status")
570 vld["status-detailed"] = net_RO.get("error_msg")
571 vnfr_update["vld.{}".format(vld_index)] = vld
572 break
573 else:
574 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
575 vnf_index, vld["id"]))
576
577 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
578 break
579
580 else:
581 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
582
583 def _get_ns_config_info(self, nsr_id):
584 """
585 Generates a mapping between vnf,vdu elements and the N2VC id
586 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
587 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
588 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
589 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
590 """
591 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
592 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
593 mapping = {}
594 ns_config_info = {"osm-config-mapping": mapping}
595 for vca in vca_deployed_list:
596 if not vca["member-vnf-index"]:
597 continue
598 if not vca["vdu_id"]:
599 mapping[vca["member-vnf-index"]] = vca["application"]
600 else:
601 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
602 vca["application"]
603 return ns_config_info
604
605 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds,
606 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
607
608 db_vims = {}
609
610 def get_vim_account(vim_account_id):
611 nonlocal db_vims
612 if vim_account_id in db_vims:
613 return db_vims[vim_account_id]
614 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
615 db_vims[vim_account_id] = db_vim
616 return db_vim
617
618 # modify target_vld info with instantiation parameters
619 def parse_vld_instantiation_params(target_vim, target_vld, vld_params, target_sdn):
620 if vld_params.get("ip-profile"):
621 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params["ip-profile"]
622 if vld_params.get("provider-network"):
623 target_vld["vim_info"][target_vim]["provider_network"] = vld_params["provider-network"]
624 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
625 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params["provider-network"]["sdn-ports"]
626 if vld_params.get("wimAccountId"):
627 target_wim = "wim:{}".format(vld_params["wimAccountId"])
628 target_vld["vim_info"][target_wim] = {}
629 for param in ("vim-network-name", "vim-network-id"):
630 if vld_params.get(param):
631 if isinstance(vld_params[param], dict):
632 for vim, vim_net in vld_params[param].items():
633 other_target_vim = "vim:" + vim
634 populate_dict(target_vld["vim_info"], (other_target_vim, param.replace("-", "_")), vim_net)
635 else: # isinstance str
636 target_vld["vim_info"][target_vim][param.replace("-", "_")] = vld_params[param]
637 if vld_params.get("common_id"):
638 target_vld["common_id"] = vld_params.get("common_id")
639
640 nslcmop_id = db_nslcmop["_id"]
641 target = {
642 "name": db_nsr["name"],
643 "ns": {"vld": []},
644 "vnf": [],
645 "image": deepcopy(db_nsr["image"]),
646 "flavor": deepcopy(db_nsr["flavor"]),
647 "action_id": nslcmop_id,
648 "cloud_init_content": {},
649 }
650 for image in target["image"]:
651 image["vim_info"] = {}
652 for flavor in target["flavor"]:
653 flavor["vim_info"] = {}
654
655 if db_nslcmop.get("lcmOperationType") != "instantiate":
656 # get parameters of instantiation:
657 db_nslcmop_instantiate = self.db.get_list("nslcmops", {"nsInstanceId": db_nslcmop["nsInstanceId"],
658 "lcmOperationType": "instantiate"})[-1]
659 ns_params = db_nslcmop_instantiate.get("operationParams")
660 else:
661 ns_params = db_nslcmop.get("operationParams")
662 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
663 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
664
665 cp2target = {}
666 for vld_index, vld in enumerate(db_nsr.get("vld")):
667 target_vim = "vim:{}".format(ns_params["vimAccountId"])
668 target_vld = {
669 "id": vld["id"],
670 "name": vld["name"],
671 "mgmt-network": vld.get("mgmt-network", False),
672 "type": vld.get("type"),
673 "vim_info": {
674 target_vim: {
675 "vim_network_name": vld.get("vim-network-name"),
676 "vim_account_id": ns_params["vimAccountId"]
677 }
678 }
679 }
680 # check if this network needs SDN assist
681 if vld.get("pci-interfaces"):
682 db_vim = get_vim_account(ns_params["vimAccountId"])
683 sdnc_id = db_vim["config"].get("sdn-controller")
684 if sdnc_id:
685 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
686 target_sdn = "sdn:{}".format(sdnc_id)
687 target_vld["vim_info"][target_sdn] = {
688 "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")}
689
690 nsd_vnf_profiles = get_vnf_profiles(nsd)
691 for nsd_vnf_profile in nsd_vnf_profiles:
692 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
693 if cp["virtual-link-profile-id"] == vld["id"]:
694 cp2target["member_vnf:{}.{}".format(
695 cp["constituent-cpd-id"][0]["constituent-base-element-id"],
696 cp["constituent-cpd-id"][0]["constituent-cpd-id"]
697 )] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
698
699 # check at nsd descriptor, if there is an ip-profile
700 vld_params = {}
701 nsd_vlp = find_in_list(
702 get_virtual_link_profiles(nsd),
703 lambda a_link_profile: a_link_profile["virtual-link-desc-id"] == vld["id"])
704 if nsd_vlp and nsd_vlp.get("virtual-link-protocol-data") and \
705 nsd_vlp["virtual-link-protocol-data"].get("l3-protocol-data"):
706 ip_profile_source_data = nsd_vlp["virtual-link-protocol-data"]["l3-protocol-data"]
707 ip_profile_dest_data = {}
708 if "ip-version" in ip_profile_source_data:
709 ip_profile_dest_data["ip-version"] = ip_profile_source_data["ip-version"]
710 if "cidr" in ip_profile_source_data:
711 ip_profile_dest_data["subnet-address"] = ip_profile_source_data["cidr"]
712 if "gateway-ip" in ip_profile_source_data:
713 ip_profile_dest_data["gateway-address"] = ip_profile_source_data["gateway-ip"]
714 if "dhcp-enabled" in ip_profile_source_data:
715 ip_profile_dest_data["dhcp-params"] = {
716 "enabled": ip_profile_source_data["dhcp-enabled"]
717 }
718 vld_params["ip-profile"] = ip_profile_dest_data
719
720 # update vld_params with instantiation params
721 vld_instantiation_params = find_in_list(get_iterable(ns_params, "vld"),
722 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]))
723 if vld_instantiation_params:
724 vld_params.update(vld_instantiation_params)
725 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
726 target["ns"]["vld"].append(target_vld)
727
728 for vnfr in db_vnfrs.values():
729 vnfd = find_in_list(db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"])
730 vnf_params = find_in_list(get_iterable(ns_params, "vnf"),
731 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"])
732 target_vnf = deepcopy(vnfr)
733 target_vim = "vim:{}".format(vnfr["vim-account-id"])
734 for vld in target_vnf.get("vld", ()):
735 # check if connected to a ns.vld, to fill target'
736 vnf_cp = find_in_list(vnfd.get("int-virtual-link-desc", ()),
737 lambda cpd: cpd.get("id") == vld["id"])
738 if vnf_cp:
739 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
740 if cp2target.get(ns_cp):
741 vld["target"] = cp2target[ns_cp]
742
743 vld["vim_info"] = {target_vim: {"vim_network_name": vld.get("vim-network-name")}}
744 # check if this network needs SDN assist
745 target_sdn = None
746 if vld.get("pci-interfaces"):
747 db_vim = get_vim_account(vnfr["vim-account-id"])
748 sdnc_id = db_vim["config"].get("sdn-controller")
749 if sdnc_id:
750 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
751 target_sdn = "sdn:{}".format(sdnc_id)
752 vld["vim_info"][target_sdn] = {
753 "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")}
754
755 # check at vnfd descriptor, if there is an ip-profile
756 vld_params = {}
757 vnfd_vlp = find_in_list(
758 get_virtual_link_profiles(vnfd),
759 lambda a_link_profile: a_link_profile["id"] == vld["id"]
760 )
761 if vnfd_vlp and vnfd_vlp.get("virtual-link-protocol-data") and \
762 vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data"):
763 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"]["l3-protocol-data"]
764 ip_profile_dest_data = {}
765 if "ip-version" in ip_profile_source_data:
766 ip_profile_dest_data["ip-version"] = ip_profile_source_data["ip-version"]
767 if "cidr" in ip_profile_source_data:
768 ip_profile_dest_data["subnet-address"] = ip_profile_source_data["cidr"]
769 if "gateway-ip" in ip_profile_source_data:
770 ip_profile_dest_data["gateway-address"] = ip_profile_source_data["gateway-ip"]
771 if "dhcp-enabled" in ip_profile_source_data:
772 ip_profile_dest_data["dhcp-params"] = {
773 "enabled": ip_profile_source_data["dhcp-enabled"]
774 }
775
776 vld_params["ip-profile"] = ip_profile_dest_data
777 # update vld_params with instantiation params
778 if vnf_params:
779 vld_instantiation_params = find_in_list(get_iterable(vnf_params, "internal-vld"),
780 lambda i_vld: i_vld["name"] == vld["id"])
781 if vld_instantiation_params:
782 vld_params.update(vld_instantiation_params)
783 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
784
785 vdur_list = []
786 for vdur in target_vnf.get("vdur", ()):
787 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
788 continue # This vdu must not be created
789 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
790
791 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
792
793 if ssh_keys_all:
794 vdu_configuration = get_configuration(vnfd, vdur["vdu-id-ref"])
795 vnf_configuration = get_configuration(vnfd, vnfd["id"])
796 if vdu_configuration and vdu_configuration.get("config-access") and \
797 vdu_configuration.get("config-access").get("ssh-access"):
798 vdur["ssh-keys"] = ssh_keys_all
799 vdur["ssh-access-required"] = vdu_configuration["config-access"]["ssh-access"]["required"]
800 elif vnf_configuration and vnf_configuration.get("config-access") and \
801 vnf_configuration.get("config-access").get("ssh-access") and \
802 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
803 vdur["ssh-keys"] = ssh_keys_all
804 vdur["ssh-access-required"] = vnf_configuration["config-access"]["ssh-access"]["required"]
805 elif ssh_keys_instantiation and \
806 find_in_list(vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")):
807 vdur["ssh-keys"] = ssh_keys_instantiation
808
809 self.logger.debug("NS > vdur > {}".format(vdur))
810
811 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
812 # cloud-init
813 if vdud.get("cloud-init-file"):
814 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
815 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
816 if vdur["cloud-init"] not in target["cloud_init_content"]:
817 base_folder = vnfd["_admin"]["storage"]
818 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
819 vdud.get("cloud-init-file"))
820 with self.fs.file_open(cloud_init_file, "r") as ci_file:
821 target["cloud_init_content"][vdur["cloud-init"]] = ci_file.read()
822 elif vdud.get("cloud-init"):
823 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"]))
824 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
825 target["cloud_init_content"][vdur["cloud-init"]] = vdud["cloud-init"]
826 vdur["additionalParams"] = vdur.get("additionalParams") or {}
827 deploy_params_vdu = self._format_additional_params(vdur.get("additionalParams") or {})
828 deploy_params_vdu["OSM"] = get_osm_params(vnfr, vdur["vdu-id-ref"], vdur["count-index"])
829 vdur["additionalParams"] = deploy_params_vdu
830
831 # flavor
832 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
833 if target_vim not in ns_flavor["vim_info"]:
834 ns_flavor["vim_info"][target_vim] = {}
835
836 # deal with images
837 # in case alternative images are provided we must check if they should be applied
838 # for the vim_type, modify the vim_type taking into account
839 ns_image_id = int(vdur["ns-image-id"])
840 if vdur.get("alt-image-ids"):
841 db_vim = get_vim_account(vnfr["vim-account-id"])
842 vim_type = db_vim["vim_type"]
843 for alt_image_id in vdur.get("alt-image-ids"):
844 ns_alt_image = target["image"][int(alt_image_id)]
845 if vim_type == ns_alt_image.get("vim-type"):
846 # must use alternative image
847 self.logger.debug("use alternative image id: {}".format(alt_image_id))
848 ns_image_id = alt_image_id
849 vdur["ns-image-id"] = ns_image_id
850 break
851 ns_image = target["image"][int(ns_image_id)]
852 if target_vim not in ns_image["vim_info"]:
853 ns_image["vim_info"][target_vim] = {}
854
855 vdur["vim_info"] = {target_vim: {}}
856 # instantiation parameters
857 # if vnf_params:
858 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
859 # vdud["id"]), None)
860 vdur_list.append(vdur)
861 target_vnf["vdur"] = vdur_list
862 target["vnf"].append(target_vnf)
863
864 desc = await self.RO.deploy(nsr_id, target)
865 self.logger.debug("RO return > {}".format(desc))
866 action_id = desc["action_id"]
867 await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
868
869 # Updating NSR
870 db_nsr_update = {
871 "_admin.deployed.RO.operational-status": "running",
872 "detailed-status": " ".join(stage)
873 }
874 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
875 self.update_db_2("nsrs", nsr_id, db_nsr_update)
876 self._write_op_status(nslcmop_id, stage)
877 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
878 return
879
880 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id=None, start_time=None, timeout=600, stage=None):
881 detailed_status_old = None
882 db_nsr_update = {}
883 start_time = start_time or time()
884 while time() <= start_time + timeout:
885 desc_status = await self.RO.status(nsr_id, action_id)
886 self.logger.debug("Wait NG RO > {}".format(desc_status))
887 if desc_status["status"] == "FAILED":
888 raise NgRoException(desc_status["details"])
889 elif desc_status["status"] == "BUILD":
890 if stage:
891 stage[2] = "VIM: ({})".format(desc_status["details"])
892 elif desc_status["status"] == "DONE":
893 if stage:
894 stage[2] = "Deployed at VIM"
895 break
896 else:
897 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
898 if stage and nslcmop_id and stage[2] != detailed_status_old:
899 detailed_status_old = stage[2]
900 db_nsr_update["detailed-status"] = " ".join(stage)
901 self.update_db_2("nsrs", nsr_id, db_nsr_update)
902 self._write_op_status(nslcmop_id, stage)
903 await asyncio.sleep(15, loop=self.loop)
904 else: # timeout_ns_deploy
905 raise NgRoException("Timeout waiting ns to deploy")
906
907 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
908 db_nsr_update = {}
909 failed_detail = []
910 action_id = None
911 start_deploy = time()
912 try:
913 target = {
914 "ns": {"vld": []},
915 "vnf": [],
916 "image": [],
917 "flavor": [],
918 "action_id": nslcmop_id
919 }
920 desc = await self.RO.deploy(nsr_id, target)
921 action_id = desc["action_id"]
922 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
923 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
924 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
925
926 # wait until done
927 delete_timeout = 20 * 60 # 20 minutes
928 await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
929
930 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
931 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
932 # delete all nsr
933 await self.RO.delete(nsr_id)
934 except Exception as e:
935 if isinstance(e, NgRoException) and e.http_code == 404: # not found
936 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
937 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
938 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
939 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
940 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
941 failed_detail.append("delete conflict: {}".format(e))
942 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
943 else:
944 failed_detail.append("delete error: {}".format(e))
945 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
946
947 if failed_detail:
948 stage[2] = "Error deleting from VIM"
949 else:
950 stage[2] = "Deleted from VIM"
951 db_nsr_update["detailed-status"] = " ".join(stage)
952 self.update_db_2("nsrs", nsr_id, db_nsr_update)
953 self._write_op_status(nslcmop_id, stage)
954
955 if failed_detail:
956 raise LcmException("; ".join(failed_detail))
957 return
958
959 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds,
960 n2vc_key_list, stage):
961 """
962 Instantiate at RO
963 :param logging_text: preffix text to use at logging
964 :param nsr_id: nsr identity
965 :param nsd: database content of ns descriptor
966 :param db_nsr: database content of ns record
967 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
968 :param db_vnfrs:
969 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
970 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
971 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
972 :return: None or exception
973 """
974 try:
975 start_deploy = time()
976 ns_params = db_nslcmop.get("operationParams")
977 if ns_params and ns_params.get("timeout_ns_deploy"):
978 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
979 else:
980 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
981
982 # Check for and optionally request placement optimization. Database will be updated if placement activated
983 stage[2] = "Waiting for Placement."
984 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
985 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
986 for vnfr in db_vnfrs.values():
987 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
988 break
989 else:
990 ns_params["vimAccountId"] == vnfr["vim-account-id"]
991
992 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
993 db_vnfds, n2vc_key_list, stage, start_deploy, timeout_ns_deploy)
994 except Exception as e:
995 stage[2] = "ERROR deploying at VIM"
996 self.set_vnfr_at_error(db_vnfrs, str(e))
997 self.logger.error("Error deploying at VIM {}".format(e),
998 exc_info=not isinstance(e, (ROclient.ROClientException, LcmException, DbException,
999 NgRoException)))
1000 raise
1001
1002 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1003 """
1004 Wait for kdu to be up, get ip address
1005 :param logging_text: prefix use for logging
1006 :param nsr_id:
1007 :param vnfr_id:
1008 :param kdu_name:
1009 :return: IP address
1010 """
1011
1012 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1013 nb_tries = 0
1014
1015 while nb_tries < 360:
1016 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1017 kdur = next((x for x in get_iterable(db_vnfr, "kdur") if x.get("kdu-name") == kdu_name), None)
1018 if not kdur:
1019 raise LcmException("Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name))
1020 if kdur.get("status"):
1021 if kdur["status"] in ("READY", "ENABLED"):
1022 return kdur.get("ip-address")
1023 else:
1024 raise LcmException("target KDU={} is in error state".format(kdu_name))
1025
1026 await asyncio.sleep(10, loop=self.loop)
1027 nb_tries += 1
1028 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1029
1030 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
1031 """
1032 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1033 :param logging_text: prefix use for logging
1034 :param nsr_id:
1035 :param vnfr_id:
1036 :param vdu_id:
1037 :param vdu_index:
1038 :param pub_key: public ssh key to inject, None to skip
1039 :param user: user to apply the public ssh key
1040 :return: IP address
1041 """
1042
1043 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1044 ro_nsr_id = None
1045 ip_address = None
1046 nb_tries = 0
1047 target_vdu_id = None
1048 ro_retries = 0
1049
1050 while True:
1051
1052 ro_retries += 1
1053 if ro_retries >= 360: # 1 hour
1054 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
1055
1056 await asyncio.sleep(10, loop=self.loop)
1057
1058 # get ip address
1059 if not target_vdu_id:
1060 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1061
1062 if not vdu_id: # for the VNF case
1063 if db_vnfr.get("status") == "ERROR":
1064 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
1065 ip_address = db_vnfr.get("ip-address")
1066 if not ip_address:
1067 continue
1068 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1069 else: # VDU case
1070 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1071 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1072
1073 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1074 vdur = db_vnfr["vdur"][0]
1075 if not vdur:
1076 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1077 vdu_index))
1078 # New generation RO stores information at "vim_info"
1079 ng_ro_status = None
1080 target_vim = None
1081 if vdur.get("vim_info"):
1082 target_vim = next(t for t in vdur["vim_info"]) # there should be only one key
1083 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1084 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE" or ng_ro_status == "ACTIVE":
1085 ip_address = vdur.get("ip-address")
1086 if not ip_address:
1087 continue
1088 target_vdu_id = vdur["vdu-id-ref"]
1089 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1090 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1091
1092 if not target_vdu_id:
1093 continue
1094
1095 # inject public key into machine
1096 if pub_key and user:
1097 self.logger.debug(logging_text + "Inserting RO key")
1098 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1099 if vdur.get("pdu-type"):
1100 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1101 return ip_address
1102 try:
1103 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
1104 if self.ng_ro:
1105 target = {"action": {"action": "inject_ssh_key", "key": pub_key, "user": user},
1106 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1107 }
1108 desc = await self.RO.deploy(nsr_id, target)
1109 action_id = desc["action_id"]
1110 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1111 break
1112 else:
1113 # wait until NS is deployed at RO
1114 if not ro_nsr_id:
1115 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1116 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1117 if not ro_nsr_id:
1118 continue
1119 result_dict = await self.RO.create_action(
1120 item="ns",
1121 item_id_name=ro_nsr_id,
1122 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1123 )
1124 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1125 if not result_dict or not isinstance(result_dict, dict):
1126 raise LcmException("Unknown response from RO when injecting key")
1127 for result in result_dict.values():
1128 if result.get("vim_result") == 200:
1129 break
1130 else:
1131 raise ROclient.ROClientException("error injecting key: {}".format(
1132 result.get("description")))
1133 break
1134 except NgRoException as e:
1135 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
1136 except ROclient.ROClientException as e:
1137 if not nb_tries:
1138 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1139 format(e, 20*10))
1140 nb_tries += 1
1141 if nb_tries >= 20:
1142 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
1143 else:
1144 break
1145
1146 return ip_address
1147
1148 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1149 """
1150 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1151 """
1152 my_vca = vca_deployed_list[vca_index]
1153 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1154 # vdu or kdu: no dependencies
1155 return
1156 timeout = 300
1157 while timeout >= 0:
1158 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1159 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1160 configuration_status_list = db_nsr["configurationStatus"]
1161 for index, vca_deployed in enumerate(configuration_status_list):
1162 if index == vca_index:
1163 # myself
1164 continue
1165 if not my_vca.get("member-vnf-index") or \
1166 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
1167 internal_status = configuration_status_list[index].get("status")
1168 if internal_status == 'READY':
1169 continue
1170 elif internal_status == 'BROKEN':
1171 raise LcmException("Configuration aborted because dependent charm/s has failed")
1172 else:
1173 break
1174 else:
1175 # no dependencies, return
1176 return
1177 await asyncio.sleep(10)
1178 timeout -= 1
1179
1180 raise LcmException("Configuration aborted because dependent charm/s timeout")
1181
1182 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
1183 config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
1184 ee_config_descriptor):
1185 nsr_id = db_nsr["_id"]
1186 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1187 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1188 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1189 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1190 db_dict = {
1191 'collection': 'nsrs',
1192 'filter': {'_id': nsr_id},
1193 'path': db_update_entry
1194 }
1195 step = ""
1196 try:
1197
1198 element_type = 'NS'
1199 element_under_configuration = nsr_id
1200
1201 vnfr_id = None
1202 if db_vnfr:
1203 vnfr_id = db_vnfr["_id"]
1204 osm_config["osm"]["vnf_id"] = vnfr_id
1205
1206 namespace = "{nsi}.{ns}".format(
1207 nsi=nsi_id if nsi_id else "",
1208 ns=nsr_id)
1209
1210 if vnfr_id:
1211 element_type = 'VNF'
1212 element_under_configuration = vnfr_id
1213 namespace += ".{}-{}".format(vnfr_id, vdu_index or 0)
1214 if vdu_id:
1215 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
1216 element_type = 'VDU'
1217 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
1218 osm_config["osm"]["vdu_id"] = vdu_id
1219 elif kdu_name:
1220 namespace += ".{}.{}".format(kdu_name, vdu_index or 0)
1221 element_type = 'KDU'
1222 element_under_configuration = kdu_name
1223 osm_config["osm"]["kdu_name"] = kdu_name
1224
1225 # Get artifact path
1226 artifact_path = "{}/{}/{}/{}".format(
1227 base_folder["folder"],
1228 base_folder["pkg-dir"],
1229 "charms" if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts",
1230 vca_name
1231 )
1232
1233 self.logger.debug("Artifact path > {}".format(artifact_path))
1234
1235 # get initial_config_primitive_list that applies to this element
1236 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1237
1238 self.logger.debug("Initial config primitive list > {}".format(initial_config_primitive_list))
1239
1240 # add config if not present for NS charm
1241 ee_descriptor_id = ee_config_descriptor.get("id")
1242 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1243 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(initial_config_primitive_list,
1244 vca_deployed, ee_descriptor_id)
1245
1246 self.logger.debug("Initial config primitive list #2 > {}".format(initial_config_primitive_list))
1247 # n2vc_redesign STEP 3.1
1248 # find old ee_id if exists
1249 ee_id = vca_deployed.get("ee_id")
1250
1251 vim_account_id = (
1252 deep_get(db_vnfr, ("vim-account-id",)) or
1253 deep_get(deploy_params, ("OSM", "vim_account_id"))
1254 )
1255 vca_cloud, vca_cloud_credential = self.get_vca_cloud_and_credentials(vim_account_id)
1256 vca_k8s_cloud, vca_k8s_cloud_credential = self.get_vca_k8s_cloud_and_credentials(vim_account_id)
1257 # create or register execution environment in VCA
1258 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1259
1260 self._write_configuration_status(
1261 nsr_id=nsr_id,
1262 vca_index=vca_index,
1263 status='CREATING',
1264 element_under_configuration=element_under_configuration,
1265 element_type=element_type
1266 )
1267
1268 step = "create execution environment"
1269 self.logger.debug(logging_text + step)
1270
1271 ee_id = None
1272 credentials = None
1273 if vca_type == "k8s_proxy_charm":
1274 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1275 charm_name=artifact_path[artifact_path.rfind("/") + 1:],
1276 namespace=namespace,
1277 artifact_path=artifact_path,
1278 db_dict=db_dict,
1279 cloud_name=vca_k8s_cloud,
1280 credential_name=vca_k8s_cloud_credential,
1281 )
1282 elif vca_type == "helm" or vca_type == "helm-v3":
1283 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1284 namespace=namespace,
1285 reuse_ee_id=ee_id,
1286 db_dict=db_dict,
1287 config=osm_config,
1288 artifact_path=artifact_path,
1289 vca_type=vca_type
1290 )
1291 else:
1292 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1293 namespace=namespace,
1294 reuse_ee_id=ee_id,
1295 db_dict=db_dict,
1296 cloud_name=vca_cloud,
1297 credential_name=vca_cloud_credential,
1298 )
1299
1300 elif vca_type == "native_charm":
1301 step = "Waiting to VM being up and getting IP address"
1302 self.logger.debug(logging_text + step)
1303 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1304 user=None, pub_key=None)
1305 credentials = {"hostname": rw_mgmt_ip}
1306 # get username
1307 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1308 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1309 # merged. Meanwhile let's get username from initial-config-primitive
1310 if not username and initial_config_primitive_list:
1311 for config_primitive in initial_config_primitive_list:
1312 for param in config_primitive.get("parameter", ()):
1313 if param["name"] == "ssh-username":
1314 username = param["value"]
1315 break
1316 if not username:
1317 raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with "
1318 "'config-access.ssh-access.default-user'")
1319 credentials["username"] = username
1320 # n2vc_redesign STEP 3.2
1321
1322 self._write_configuration_status(
1323 nsr_id=nsr_id,
1324 vca_index=vca_index,
1325 status='REGISTERING',
1326 element_under_configuration=element_under_configuration,
1327 element_type=element_type
1328 )
1329
1330 step = "register execution environment {}".format(credentials)
1331 self.logger.debug(logging_text + step)
1332 ee_id = await self.vca_map[vca_type].register_execution_environment(
1333 credentials=credentials,
1334 namespace=namespace,
1335 db_dict=db_dict,
1336 cloud_name=vca_cloud,
1337 credential_name=vca_cloud_credential,
1338 )
1339
1340 # for compatibility with MON/POL modules, the need model and application name at database
1341 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1342 ee_id_parts = ee_id.split('.')
1343 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1344 if len(ee_id_parts) >= 2:
1345 model_name = ee_id_parts[0]
1346 application_name = ee_id_parts[1]
1347 db_nsr_update[db_update_entry + "model"] = model_name
1348 db_nsr_update[db_update_entry + "application"] = application_name
1349
1350 # n2vc_redesign STEP 3.3
1351 step = "Install configuration Software"
1352
1353 self._write_configuration_status(
1354 nsr_id=nsr_id,
1355 vca_index=vca_index,
1356 status='INSTALLING SW',
1357 element_under_configuration=element_under_configuration,
1358 element_type=element_type,
1359 other_update=db_nsr_update
1360 )
1361
1362 # TODO check if already done
1363 self.logger.debug(logging_text + step)
1364 config = None
1365 if vca_type == "native_charm":
1366 config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None)
1367 if config_primitive:
1368 config = self._map_primitive_params(
1369 config_primitive,
1370 {},
1371 deploy_params
1372 )
1373 num_units = 1
1374 if vca_type == "lxc_proxy_charm":
1375 if element_type == "NS":
1376 num_units = db_nsr.get("config-units") or 1
1377 elif element_type == "VNF":
1378 num_units = db_vnfr.get("config-units") or 1
1379 elif element_type == "VDU":
1380 for v in db_vnfr["vdur"]:
1381 if vdu_id == v["vdu-id-ref"]:
1382 num_units = v.get("config-units") or 1
1383 break
1384 if vca_type != "k8s_proxy_charm":
1385 await self.vca_map[vca_type].install_configuration_sw(
1386 ee_id=ee_id,
1387 artifact_path=artifact_path,
1388 db_dict=db_dict,
1389 config=config,
1390 num_units=num_units,
1391 )
1392
1393 # write in db flag of configuration_sw already installed
1394 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1395
1396 # add relations for this VCA (wait for other peers related with this VCA)
1397 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
1398 vca_index=vca_index, vca_type=vca_type)
1399
1400 # if SSH access is required, then get execution environment SSH public
1401 # if native charm we have waited already to VM be UP
1402 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1403 pub_key = None
1404 user = None
1405 # self.logger.debug("get ssh key block")
1406 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
1407 # self.logger.debug("ssh key needed")
1408 # Needed to inject a ssh key
1409 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1410 step = "Install configuration Software, getting public ssh key"
1411 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
1412
1413 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
1414 else:
1415 # self.logger.debug("no need to get ssh key")
1416 step = "Waiting to VM being up and getting IP address"
1417 self.logger.debug(logging_text + step)
1418
1419 # n2vc_redesign STEP 5.1
1420 # wait for RO (ip-address) Insert pub_key into VM
1421 if vnfr_id:
1422 if kdu_name:
1423 rw_mgmt_ip = await self.wait_kdu_up(logging_text, nsr_id, vnfr_id, kdu_name)
1424 else:
1425 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id,
1426 vdu_index, user=user, pub_key=pub_key)
1427 else:
1428 rw_mgmt_ip = None # This is for a NS configuration
1429
1430 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
1431
1432 # store rw_mgmt_ip in deploy params for later replacement
1433 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1434
1435 # n2vc_redesign STEP 6 Execute initial config primitive
1436 step = 'execute initial config primitive'
1437
1438 # wait for dependent primitives execution (NS -> VNF -> VDU)
1439 if initial_config_primitive_list:
1440 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1441
1442 # stage, in function of element type: vdu, kdu, vnf or ns
1443 my_vca = vca_deployed_list[vca_index]
1444 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1445 # VDU or KDU
1446 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
1447 elif my_vca.get("member-vnf-index"):
1448 # VNF
1449 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
1450 else:
1451 # NS
1452 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
1453
1454 self._write_configuration_status(
1455 nsr_id=nsr_id,
1456 vca_index=vca_index,
1457 status='EXECUTING PRIMITIVE'
1458 )
1459
1460 self._write_op_status(
1461 op_id=nslcmop_id,
1462 stage=stage
1463 )
1464
1465 check_if_terminated_needed = True
1466 for initial_config_primitive in initial_config_primitive_list:
1467 # adding information on the vca_deployed if it is a NS execution environment
1468 if not vca_deployed["member-vnf-index"]:
1469 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
1470 # TODO check if already done
1471 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
1472
1473 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1474 self.logger.debug(logging_text + step)
1475 await self.vca_map[vca_type].exec_primitive(
1476 ee_id=ee_id,
1477 primitive_name=initial_config_primitive["name"],
1478 params_dict=primitive_params_,
1479 db_dict=db_dict
1480 )
1481 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1482 if check_if_terminated_needed:
1483 if config_descriptor.get('terminate-config-primitive'):
1484 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1485 check_if_terminated_needed = False
1486
1487 # TODO register in database that primitive is done
1488
1489 # STEP 7 Configure metrics
1490 if vca_type == "helm" or vca_type == "helm-v3":
1491 prometheus_jobs = await self.add_prometheus_metrics(
1492 ee_id=ee_id,
1493 artifact_path=artifact_path,
1494 ee_config_descriptor=ee_config_descriptor,
1495 vnfr_id=vnfr_id,
1496 nsr_id=nsr_id,
1497 target_ip=rw_mgmt_ip,
1498 )
1499 if prometheus_jobs:
1500 self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs})
1501
1502 step = "instantiated at VCA"
1503 self.logger.debug(logging_text + step)
1504
1505 self._write_configuration_status(
1506 nsr_id=nsr_id,
1507 vca_index=vca_index,
1508 status='READY'
1509 )
1510
1511 except Exception as e: # TODO not use Exception but N2VC exception
1512 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1513 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1514 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
1515 self._write_configuration_status(
1516 nsr_id=nsr_id,
1517 vca_index=vca_index,
1518 status='BROKEN'
1519 )
1520 raise LcmException("{} {}".format(step, e)) from e
1521
1522 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
1523 error_description: str = None, error_detail: str = None, other_update: dict = None):
1524 """
1525 Update db_nsr fields.
1526 :param nsr_id:
1527 :param ns_state:
1528 :param current_operation:
1529 :param current_operation_id:
1530 :param error_description:
1531 :param error_detail:
1532 :param other_update: Other required changes at database if provided, will be cleared
1533 :return:
1534 """
1535 try:
1536 db_dict = other_update or {}
1537 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1538 db_dict["_admin.current-operation"] = current_operation_id
1539 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
1540 db_dict["currentOperation"] = current_operation
1541 db_dict["currentOperationID"] = current_operation_id
1542 db_dict["errorDescription"] = error_description
1543 db_dict["errorDetail"] = error_detail
1544
1545 if ns_state:
1546 db_dict["nsState"] = ns_state
1547 self.update_db_2("nsrs", nsr_id, db_dict)
1548 except DbException as e:
1549 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1550
1551 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1552 operation_state: str = None, other_update: dict = None):
1553 try:
1554 db_dict = other_update or {}
1555 db_dict['queuePosition'] = queuePosition
1556 if isinstance(stage, list):
1557 db_dict['stage'] = stage[0]
1558 db_dict['detailed-status'] = " ".join(stage)
1559 elif stage is not None:
1560 db_dict['stage'] = str(stage)
1561
1562 if error_message is not None:
1563 db_dict['errorMessage'] = error_message
1564 if operation_state is not None:
1565 db_dict['operationState'] = operation_state
1566 db_dict["statusEnteredTime"] = time()
1567 self.update_db_2("nslcmops", op_id, db_dict)
1568 except DbException as e:
1569 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1570
1571 def _write_all_config_status(self, db_nsr: dict, status: str):
1572 try:
1573 nsr_id = db_nsr["_id"]
1574 # configurationStatus
1575 config_status = db_nsr.get('configurationStatus')
1576 if config_status:
1577 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1578 enumerate(config_status) if v}
1579 # update status
1580 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1581
1582 except DbException as e:
1583 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1584
1585 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
1586 element_under_configuration: str = None, element_type: str = None,
1587 other_update: dict = None):
1588
1589 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1590 # .format(vca_index, status))
1591
1592 try:
1593 db_path = 'configurationStatus.{}.'.format(vca_index)
1594 db_dict = other_update or {}
1595 if status:
1596 db_dict[db_path + 'status'] = status
1597 if element_under_configuration:
1598 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
1599 if element_type:
1600 db_dict[db_path + 'elementType'] = element_type
1601 self.update_db_2("nsrs", nsr_id, db_dict)
1602 except DbException as e:
1603 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
1604 .format(status, nsr_id, vca_index, e))
1605
1606 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
1607 """
1608 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
1609 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
1610 Database is used because the result can be obtained from a different LCM worker in case of HA.
1611 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
1612 :param db_nslcmop: database content of nslcmop
1613 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
1614 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
1615 computed 'vim-account-id'
1616 """
1617 modified = False
1618 nslcmop_id = db_nslcmop['_id']
1619 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
1620 if placement_engine == "PLA":
1621 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
1622 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
1623 db_poll_interval = 5
1624 wait = db_poll_interval * 10
1625 pla_result = None
1626 while not pla_result and wait >= 0:
1627 await asyncio.sleep(db_poll_interval)
1628 wait -= db_poll_interval
1629 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
1630 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
1631
1632 if not pla_result:
1633 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
1634
1635 for pla_vnf in pla_result['vnf']:
1636 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
1637 if not pla_vnf.get('vimAccountId') or not vnfr:
1638 continue
1639 modified = True
1640 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
1641 # Modifies db_vnfrs
1642 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
1643 return modified
1644
1645 def update_nsrs_with_pla_result(self, params):
1646 try:
1647 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
1648 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
1649 except Exception as e:
1650 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
1651
1652 async def instantiate(self, nsr_id, nslcmop_id):
1653 """
1654
1655 :param nsr_id: ns instance to deploy
1656 :param nslcmop_id: operation to run
1657 :return:
1658 """
1659
1660 # Try to lock HA task here
1661 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
1662 if not task_is_locked_by_me:
1663 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
1664 return
1665
1666 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
1667 self.logger.debug(logging_text + "Enter")
1668
1669 # get all needed from database
1670
1671 # database nsrs record
1672 db_nsr = None
1673
1674 # database nslcmops record
1675 db_nslcmop = None
1676
1677 # update operation on nsrs
1678 db_nsr_update = {}
1679 # update operation on nslcmops
1680 db_nslcmop_update = {}
1681
1682 nslcmop_operation_state = None
1683 db_vnfrs = {} # vnf's info indexed by member-index
1684 # n2vc_info = {}
1685 tasks_dict_info = {} # from task to info text
1686 exc = None
1687 error_list = []
1688 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
1689 # ^ stage, step, VIM progress
1690 try:
1691 # wait for any previous tasks in process
1692 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
1693
1694 stage[1] = "Sync filesystem from database."
1695 self.fs.sync() # TODO, make use of partial sync, only for the needed packages
1696
1697 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
1698 stage[1] = "Reading from database."
1699 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
1700 db_nsr_update["detailed-status"] = "creating"
1701 db_nsr_update["operational-status"] = "init"
1702 self._write_ns_status(
1703 nsr_id=nsr_id,
1704 ns_state="BUILDING",
1705 current_operation="INSTANTIATING",
1706 current_operation_id=nslcmop_id,
1707 other_update=db_nsr_update
1708 )
1709 self._write_op_status(
1710 op_id=nslcmop_id,
1711 stage=stage,
1712 queuePosition=0
1713 )
1714
1715 # read from db: operation
1716 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
1717 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
1718 ns_params = db_nslcmop.get("operationParams")
1719 if ns_params and ns_params.get("timeout_ns_deploy"):
1720 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1721 else:
1722 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
1723
1724 # read from db: ns
1725 stage[1] = "Getting nsr={} from db.".format(nsr_id)
1726 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1727 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
1728 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
1729 db_nsr["nsd"] = nsd
1730 # nsr_name = db_nsr["name"] # TODO short-name??
1731
1732 # read from db: vnf's of this ns
1733 stage[1] = "Getting vnfrs from db."
1734 self.logger.debug(logging_text + stage[1])
1735 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
1736
1737 # read from db: vnfd's for every vnf
1738 db_vnfds = [] # every vnfd data
1739
1740 # for each vnf in ns, read vnfd
1741 for vnfr in db_vnfrs_list:
1742 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
1743 vnfd_id = vnfr["vnfd-id"]
1744 vnfd_ref = vnfr["vnfd-ref"]
1745
1746 # if we haven't this vnfd, read it from db
1747 if vnfd_id not in db_vnfds:
1748 # read from db
1749 stage[1] = "Getting vnfd={} id='{}' from db.".format(vnfd_id, vnfd_ref)
1750 self.logger.debug(logging_text + stage[1])
1751 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
1752
1753 # store vnfd
1754 db_vnfds.append(vnfd)
1755
1756 # Get or generates the _admin.deployed.VCA list
1757 vca_deployed_list = None
1758 if db_nsr["_admin"].get("deployed"):
1759 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
1760 if vca_deployed_list is None:
1761 vca_deployed_list = []
1762 configuration_status_list = []
1763 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
1764 db_nsr_update["configurationStatus"] = configuration_status_list
1765 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
1766 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
1767 elif isinstance(vca_deployed_list, dict):
1768 # maintain backward compatibility. Change a dict to list at database
1769 vca_deployed_list = list(vca_deployed_list.values())
1770 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
1771 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
1772
1773 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
1774 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
1775 db_nsr_update["_admin.deployed.RO.vnfd"] = []
1776
1777 # set state to INSTANTIATED. When instantiated NBI will not delete directly
1778 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1779 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1780 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"})
1781
1782 # n2vc_redesign STEP 2 Deploy Network Scenario
1783 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
1784 self._write_op_status(
1785 op_id=nslcmop_id,
1786 stage=stage
1787 )
1788
1789 stage[1] = "Deploying KDUs."
1790 # self.logger.debug(logging_text + "Before deploy_kdus")
1791 # Call to deploy_kdus in case exists the "vdu:kdu" param
1792 await self.deploy_kdus(
1793 logging_text=logging_text,
1794 nsr_id=nsr_id,
1795 nslcmop_id=nslcmop_id,
1796 db_vnfrs=db_vnfrs,
1797 db_vnfds=db_vnfds,
1798 task_instantiation_info=tasks_dict_info,
1799 )
1800
1801 stage[1] = "Getting VCA public key."
1802 # n2vc_redesign STEP 1 Get VCA public ssh-key
1803 # feature 1429. Add n2vc public key to needed VMs
1804 n2vc_key = self.n2vc.get_public_key()
1805 n2vc_key_list = [n2vc_key]
1806 if self.vca_config.get("public_key"):
1807 n2vc_key_list.append(self.vca_config["public_key"])
1808
1809 stage[1] = "Deploying NS at VIM."
1810 task_ro = asyncio.ensure_future(
1811 self.instantiate_RO(
1812 logging_text=logging_text,
1813 nsr_id=nsr_id,
1814 nsd=nsd,
1815 db_nsr=db_nsr,
1816 db_nslcmop=db_nslcmop,
1817 db_vnfrs=db_vnfrs,
1818 db_vnfds=db_vnfds,
1819 n2vc_key_list=n2vc_key_list,
1820 stage=stage
1821 )
1822 )
1823 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
1824 tasks_dict_info[task_ro] = "Deploying at VIM"
1825
1826 # n2vc_redesign STEP 3 to 6 Deploy N2VC
1827 stage[1] = "Deploying Execution Environments."
1828 self.logger.debug(logging_text + stage[1])
1829
1830 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
1831 for vnf_profile in get_vnf_profiles(nsd):
1832 vnfd_id = vnf_profile["vnfd-id"]
1833 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
1834 member_vnf_index = str(vnf_profile["id"])
1835 db_vnfr = db_vnfrs[member_vnf_index]
1836 base_folder = vnfd["_admin"]["storage"]
1837 vdu_id = None
1838 vdu_index = 0
1839 vdu_name = None
1840 kdu_name = None
1841
1842 # Get additional parameters
1843 deploy_params = {"OSM": get_osm_params(db_vnfr)}
1844 if db_vnfr.get("additionalParamsForVnf"):
1845 deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy()))
1846
1847 descriptor_config = get_configuration(vnfd, vnfd["id"])
1848 if descriptor_config:
1849 self._deploy_n2vc(
1850 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
1851 db_nsr=db_nsr,
1852 db_vnfr=db_vnfr,
1853 nslcmop_id=nslcmop_id,
1854 nsr_id=nsr_id,
1855 nsi_id=nsi_id,
1856 vnfd_id=vnfd_id,
1857 vdu_id=vdu_id,
1858 kdu_name=kdu_name,
1859 member_vnf_index=member_vnf_index,
1860 vdu_index=vdu_index,
1861 vdu_name=vdu_name,
1862 deploy_params=deploy_params,
1863 descriptor_config=descriptor_config,
1864 base_folder=base_folder,
1865 task_instantiation_info=tasks_dict_info,
1866 stage=stage
1867 )
1868
1869 # Deploy charms for each VDU that supports one.
1870 for vdud in get_vdu_list(vnfd):
1871 vdu_id = vdud["id"]
1872 descriptor_config = get_configuration(vnfd, vdu_id)
1873 vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id)
1874
1875 if vdur.get("additionalParams"):
1876 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
1877 else:
1878 deploy_params_vdu = deploy_params
1879 deploy_params_vdu["OSM"] = get_osm_params(db_vnfr, vdu_id, vdu_count_index=0)
1880 vdud_count = get_vdu_profile(vnfd, vdu_id).get("max-number-of-instances", 1)
1881
1882 self.logger.debug("VDUD > {}".format(vdud))
1883 self.logger.debug("Descriptor config > {}".format(descriptor_config))
1884 if descriptor_config:
1885 vdu_name = None
1886 kdu_name = None
1887 for vdu_index in range(vdud_count):
1888 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
1889 self._deploy_n2vc(
1890 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
1891 member_vnf_index, vdu_id, vdu_index),
1892 db_nsr=db_nsr,
1893 db_vnfr=db_vnfr,
1894 nslcmop_id=nslcmop_id,
1895 nsr_id=nsr_id,
1896 nsi_id=nsi_id,
1897 vnfd_id=vnfd_id,
1898 vdu_id=vdu_id,
1899 kdu_name=kdu_name,
1900 member_vnf_index=member_vnf_index,
1901 vdu_index=vdu_index,
1902 vdu_name=vdu_name,
1903 deploy_params=deploy_params_vdu,
1904 descriptor_config=descriptor_config,
1905 base_folder=base_folder,
1906 task_instantiation_info=tasks_dict_info,
1907 stage=stage
1908 )
1909 for kdud in get_kdu_list(vnfd):
1910 kdu_name = kdud["name"]
1911 descriptor_config = get_configuration(vnfd, kdu_name)
1912 if descriptor_config:
1913 vdu_id = None
1914 vdu_index = 0
1915 vdu_name = None
1916 kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name)
1917 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
1918 if kdur.get("additionalParams"):
1919 deploy_params_kdu = parse_yaml_strings(kdur["additionalParams"])
1920
1921 self._deploy_n2vc(
1922 logging_text=logging_text,
1923 db_nsr=db_nsr,
1924 db_vnfr=db_vnfr,
1925 nslcmop_id=nslcmop_id,
1926 nsr_id=nsr_id,
1927 nsi_id=nsi_id,
1928 vnfd_id=vnfd_id,
1929 vdu_id=vdu_id,
1930 kdu_name=kdu_name,
1931 member_vnf_index=member_vnf_index,
1932 vdu_index=vdu_index,
1933 vdu_name=vdu_name,
1934 deploy_params=deploy_params_kdu,
1935 descriptor_config=descriptor_config,
1936 base_folder=base_folder,
1937 task_instantiation_info=tasks_dict_info,
1938 stage=stage
1939 )
1940
1941 # Check if this NS has a charm configuration
1942 descriptor_config = nsd.get("ns-configuration")
1943 if descriptor_config and descriptor_config.get("juju"):
1944 vnfd_id = None
1945 db_vnfr = None
1946 member_vnf_index = None
1947 vdu_id = None
1948 kdu_name = None
1949 vdu_index = 0
1950 vdu_name = None
1951
1952 # Get additional parameters
1953 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
1954 if db_nsr.get("additionalParamsForNs"):
1955 deploy_params.update(parse_yaml_strings(db_nsr["additionalParamsForNs"].copy()))
1956 base_folder = nsd["_admin"]["storage"]
1957 self._deploy_n2vc(
1958 logging_text=logging_text,
1959 db_nsr=db_nsr,
1960 db_vnfr=db_vnfr,
1961 nslcmop_id=nslcmop_id,
1962 nsr_id=nsr_id,
1963 nsi_id=nsi_id,
1964 vnfd_id=vnfd_id,
1965 vdu_id=vdu_id,
1966 kdu_name=kdu_name,
1967 member_vnf_index=member_vnf_index,
1968 vdu_index=vdu_index,
1969 vdu_name=vdu_name,
1970 deploy_params=deploy_params,
1971 descriptor_config=descriptor_config,
1972 base_folder=base_folder,
1973 task_instantiation_info=tasks_dict_info,
1974 stage=stage
1975 )
1976
1977 # rest of staff will be done at finally
1978
1979 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
1980 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
1981 exc = e
1982 except asyncio.CancelledError:
1983 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
1984 exc = "Operation was cancelled"
1985 except Exception as e:
1986 exc = traceback.format_exc()
1987 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
1988 finally:
1989 if exc:
1990 error_list.append(str(exc))
1991 try:
1992 # wait for pending tasks
1993 if tasks_dict_info:
1994 stage[1] = "Waiting for instantiate pending tasks."
1995 self.logger.debug(logging_text + stage[1])
1996 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
1997 stage, nslcmop_id, nsr_id=nsr_id)
1998 stage[1] = stage[2] = ""
1999 except asyncio.CancelledError:
2000 error_list.append("Cancelled")
2001 # TODO cancel all tasks
2002 except Exception as exc:
2003 error_list.append(str(exc))
2004
2005 # update operation-status
2006 db_nsr_update["operational-status"] = "running"
2007 # let's begin with VCA 'configured' status (later we can change it)
2008 db_nsr_update["config-status"] = "configured"
2009 for task, task_name in tasks_dict_info.items():
2010 if not task.done() or task.cancelled() or task.exception():
2011 if task_name.startswith(self.task_name_deploy_vca):
2012 # A N2VC task is pending
2013 db_nsr_update["config-status"] = "failed"
2014 else:
2015 # RO or KDU task is pending
2016 db_nsr_update["operational-status"] = "failed"
2017
2018 # update status at database
2019 if error_list:
2020 error_detail = ". ".join(error_list)
2021 self.logger.error(logging_text + error_detail)
2022 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
2023 error_description_nsr = 'Operation: INSTANTIATING.{}, {}'.format(nslcmop_id, stage[0])
2024
2025 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
2026 db_nslcmop_update["detailed-status"] = error_detail
2027 nslcmop_operation_state = "FAILED"
2028 ns_state = "BROKEN"
2029 else:
2030 error_detail = None
2031 error_description_nsr = error_description_nslcmop = None
2032 ns_state = "READY"
2033 db_nsr_update["detailed-status"] = "Done"
2034 db_nslcmop_update["detailed-status"] = "Done"
2035 nslcmop_operation_state = "COMPLETED"
2036
2037 if db_nsr:
2038 self._write_ns_status(
2039 nsr_id=nsr_id,
2040 ns_state=ns_state,
2041 current_operation="IDLE",
2042 current_operation_id=None,
2043 error_description=error_description_nsr,
2044 error_detail=error_detail,
2045 other_update=db_nsr_update
2046 )
2047 self._write_op_status(
2048 op_id=nslcmop_id,
2049 stage="",
2050 error_message=error_description_nslcmop,
2051 operation_state=nslcmop_operation_state,
2052 other_update=db_nslcmop_update,
2053 )
2054
2055 if nslcmop_operation_state:
2056 try:
2057 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
2058 "operationState": nslcmop_operation_state},
2059 loop=self.loop)
2060 except Exception as e:
2061 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
2062
2063 self.logger.debug(logging_text + "Exit")
2064 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2065
2066 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
2067 timeout: int = 3600, vca_type: str = None) -> bool:
2068
2069 # steps:
2070 # 1. find all relations for this VCA
2071 # 2. wait for other peers related
2072 # 3. add relations
2073
2074 try:
2075 vca_type = vca_type or "lxc_proxy_charm"
2076
2077 # STEP 1: find all relations for this VCA
2078
2079 # read nsr record
2080 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2081 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2082
2083 # this VCA data
2084 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2085
2086 # read all ns-configuration relations
2087 ns_relations = list()
2088 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
2089 if db_ns_relations:
2090 for r in db_ns_relations:
2091 # check if this VCA is in the relation
2092 if my_vca.get('member-vnf-index') in\
2093 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2094 ns_relations.append(r)
2095
2096 # read all vnf-configuration relations
2097 vnf_relations = list()
2098 db_vnfd_list = db_nsr.get('vnfd-id')
2099 if db_vnfd_list:
2100 for vnfd in db_vnfd_list:
2101 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2102 db_vnf_relations = get_configuration(db_vnfd, db_vnfd["id"]).get("relation", [])
2103 if db_vnf_relations:
2104 for r in db_vnf_relations:
2105 # check if this VCA is in the relation
2106 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2107 vnf_relations.append(r)
2108
2109 # if no relations, terminate
2110 if not ns_relations and not vnf_relations:
2111 self.logger.debug(logging_text + ' No relations')
2112 return True
2113
2114 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2115
2116 # add all relations
2117 start = time()
2118 while True:
2119 # check timeout
2120 now = time()
2121 if now - start >= timeout:
2122 self.logger.error(logging_text + ' : timeout adding relations')
2123 return False
2124
2125 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2126 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2127
2128 # for each defined NS relation, find the VCA's related
2129 for r in ns_relations.copy():
2130 from_vca_ee_id = None
2131 to_vca_ee_id = None
2132 from_vca_endpoint = None
2133 to_vca_endpoint = None
2134 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2135 for vca in vca_list:
2136 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2137 and vca.get('config_sw_installed'):
2138 from_vca_ee_id = vca.get('ee_id')
2139 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2140 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2141 and vca.get('config_sw_installed'):
2142 to_vca_ee_id = vca.get('ee_id')
2143 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2144 if from_vca_ee_id and to_vca_ee_id:
2145 # add relation
2146 await self.vca_map[vca_type].add_relation(
2147 ee_id_1=from_vca_ee_id,
2148 ee_id_2=to_vca_ee_id,
2149 endpoint_1=from_vca_endpoint,
2150 endpoint_2=to_vca_endpoint)
2151 # remove entry from relations list
2152 ns_relations.remove(r)
2153 else:
2154 # check failed peers
2155 try:
2156 vca_status_list = db_nsr.get('configurationStatus')
2157 if vca_status_list:
2158 for i in range(len(vca_list)):
2159 vca = vca_list[i]
2160 vca_status = vca_status_list[i]
2161 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2162 if vca_status.get('status') == 'BROKEN':
2163 # peer broken: remove relation from list
2164 ns_relations.remove(r)
2165 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2166 if vca_status.get('status') == 'BROKEN':
2167 # peer broken: remove relation from list
2168 ns_relations.remove(r)
2169 except Exception:
2170 # ignore
2171 pass
2172
2173 # for each defined VNF relation, find the VCA's related
2174 for r in vnf_relations.copy():
2175 from_vca_ee_id = None
2176 to_vca_ee_id = None
2177 from_vca_endpoint = None
2178 to_vca_endpoint = None
2179 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2180 for vca in vca_list:
2181 key_to_check = "vdu_id"
2182 if vca.get("vdu_id") is None:
2183 key_to_check = "vnfd_id"
2184 if vca.get(key_to_check) == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
2185 from_vca_ee_id = vca.get('ee_id')
2186 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2187 if vca.get(key_to_check) == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
2188 to_vca_ee_id = vca.get('ee_id')
2189 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2190 if from_vca_ee_id and to_vca_ee_id:
2191 # add relation
2192 await self.vca_map[vca_type].add_relation(
2193 ee_id_1=from_vca_ee_id,
2194 ee_id_2=to_vca_ee_id,
2195 endpoint_1=from_vca_endpoint,
2196 endpoint_2=to_vca_endpoint)
2197 # remove entry from relations list
2198 vnf_relations.remove(r)
2199 else:
2200 # check failed peers
2201 try:
2202 vca_status_list = db_nsr.get('configurationStatus')
2203 if vca_status_list:
2204 for i in range(len(vca_list)):
2205 vca = vca_list[i]
2206 vca_status = vca_status_list[i]
2207 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2208 if vca_status.get('status') == 'BROKEN':
2209 # peer broken: remove relation from list
2210 vnf_relations.remove(r)
2211 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2212 if vca_status.get('status') == 'BROKEN':
2213 # peer broken: remove relation from list
2214 vnf_relations.remove(r)
2215 except Exception:
2216 # ignore
2217 pass
2218
2219 # wait for next try
2220 await asyncio.sleep(5.0)
2221
2222 if not ns_relations and not vnf_relations:
2223 self.logger.debug('Relations added')
2224 break
2225
2226 return True
2227
2228 except Exception as e:
2229 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2230 return False
2231
2232 async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict,
2233 vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600):
2234
2235 try:
2236 k8sclustertype = k8s_instance_info["k8scluster-type"]
2237 # Instantiate kdu
2238 db_dict_install = {"collection": "nsrs",
2239 "filter": {"_id": nsr_id},
2240 "path": nsr_db_path}
2241
2242 kdu_instance = self.k8scluster_map[k8sclustertype].generate_kdu_instance_name(
2243 db_dict=db_dict_install,
2244 kdu_model=k8s_instance_info["kdu-model"],
2245 kdu_name=k8s_instance_info["kdu-name"],
2246 )
2247 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
2248 await self.k8scluster_map[k8sclustertype].install(
2249 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2250 kdu_model=k8s_instance_info["kdu-model"],
2251 atomic=True,
2252 params=k8params,
2253 db_dict=db_dict_install,
2254 timeout=timeout,
2255 kdu_name=k8s_instance_info["kdu-name"],
2256 namespace=k8s_instance_info["namespace"],
2257 kdu_instance=kdu_instance,
2258 )
2259 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
2260
2261 # Obtain services to obtain management service ip
2262 services = await self.k8scluster_map[k8sclustertype].get_services(
2263 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2264 kdu_instance=kdu_instance,
2265 namespace=k8s_instance_info["namespace"])
2266
2267 # Obtain management service info (if exists)
2268 vnfr_update_dict = {}
2269 if services:
2270 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
2271 mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")]
2272 for mgmt_service in mgmt_services:
2273 for service in services:
2274 if service["name"].startswith(mgmt_service["name"]):
2275 # Mgmt service found, Obtain service ip
2276 ip = service.get("external_ip", service.get("cluster_ip"))
2277 if isinstance(ip, list) and len(ip) == 1:
2278 ip = ip[0]
2279
2280 vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip
2281
2282 # Check if must update also mgmt ip at the vnf
2283 service_external_cp = mgmt_service.get("external-connection-point-ref")
2284 if service_external_cp:
2285 if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp:
2286 vnfr_update_dict["ip-address"] = ip
2287
2288 break
2289 else:
2290 self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"]))
2291
2292 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2293 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2294
2295 kdu_config = get_configuration(vnfd, k8s_instance_info["kdu-name"])
2296 if kdu_config and kdu_config.get("initial-config-primitive") and \
2297 get_juju_ee_ref(vnfd, k8s_instance_info["kdu-name"]) is None:
2298 initial_config_primitive_list = kdu_config.get("initial-config-primitive")
2299 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2300
2301 for initial_config_primitive in initial_config_primitive_list:
2302 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, {})
2303
2304 await asyncio.wait_for(
2305 self.k8scluster_map[k8sclustertype].exec_primitive(
2306 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2307 kdu_instance=kdu_instance,
2308 primitive_name=initial_config_primitive["name"],
2309 params=primitive_params_, db_dict=db_dict_install),
2310 timeout=timeout)
2311
2312 except Exception as e:
2313 # Prepare update db with error and raise exception
2314 try:
2315 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)})
2316 self.update_db_2("vnfrs", vnfr_data.get("_id"), {"kdur.{}.status".format(kdu_index): "ERROR"})
2317 except Exception:
2318 # ignore to keep original exception
2319 pass
2320 # reraise original error
2321 raise
2322
2323 return kdu_instance
2324
2325 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
2326 # Launch kdus if present in the descriptor
2327
2328 k8scluster_id_2_uuic = {"helm-chart-v3": {}, "helm-chart": {}, "juju-bundle": {}}
2329
2330 async def _get_cluster_id(cluster_id, cluster_type):
2331 nonlocal k8scluster_id_2_uuic
2332 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2333 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2334
2335 # check if K8scluster is creating and wait look if previous tasks in process
2336 task_name, task_dependency = self.lcm_tasks.lookfor_related("k8scluster", cluster_id)
2337 if task_dependency:
2338 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(task_name, cluster_id)
2339 self.logger.debug(logging_text + text)
2340 await asyncio.wait(task_dependency, timeout=3600)
2341
2342 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2343 if not db_k8scluster:
2344 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
2345
2346 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2347 if not k8s_id:
2348 if cluster_type == "helm-chart-v3":
2349 try:
2350 # backward compatibility for existing clusters that have not been initialized for helm v3
2351 k8s_credentials = yaml.safe_dump(db_k8scluster.get("credentials"))
2352 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(k8s_credentials,
2353 reuse_cluster_uuid=cluster_id)
2354 db_k8scluster_update = {}
2355 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
2356 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
2357 db_k8scluster_update["_admin.helm-chart-v3.created"] = uninstall_sw
2358 db_k8scluster_update["_admin.helm-chart-v3.operationalState"] = "ENABLED"
2359 self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update)
2360 except Exception as e:
2361 self.logger.error(logging_text + "error initializing helm-v3 cluster: {}".format(str(e)))
2362 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id,
2363 cluster_type))
2364 else:
2365 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".
2366 format(cluster_id, cluster_type))
2367 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2368 return k8s_id
2369
2370 logging_text += "Deploy kdus: "
2371 step = ""
2372 try:
2373 db_nsr_update = {"_admin.deployed.K8s": []}
2374 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2375
2376 index = 0
2377 updated_cluster_list = []
2378 updated_v3_cluster_list = []
2379
2380 for vnfr_data in db_vnfrs.values():
2381 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
2382 # Step 0: Prepare and set parameters
2383 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
2384 vnfd_id = vnfr_data.get('vnfd-id')
2385 vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id)
2386 kdud = next(kdud for kdud in vnfd_with_id["kdu"] if kdud["name"] == kdur["kdu-name"])
2387 namespace = kdur.get("k8s-namespace")
2388 if kdur.get("helm-chart"):
2389 kdumodel = kdur["helm-chart"]
2390 # Default version: helm3, if helm-version is v2 assign v2
2391 k8sclustertype = "helm-chart-v3"
2392 self.logger.debug("kdur: {}".format(kdur))
2393 if kdur.get("helm-version") and kdur.get("helm-version") == "v2":
2394 k8sclustertype = "helm-chart"
2395 elif kdur.get("juju-bundle"):
2396 kdumodel = kdur["juju-bundle"]
2397 k8sclustertype = "juju-bundle"
2398 else:
2399 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2400 "juju-bundle. Maybe an old NBI version is running".
2401 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
2402 # check if kdumodel is a file and exists
2403 try:
2404 vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id)
2405 storage = deep_get(vnfd_with_id, ('_admin', 'storage'))
2406 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2407 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
2408 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
2409 kdumodel)
2410 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2411 kdumodel = self.fs.path + filename
2412 except (asyncio.TimeoutError, asyncio.CancelledError):
2413 raise
2414 except Exception: # it is not a file
2415 pass
2416
2417 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2418 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
2419 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
2420
2421 # Synchronize repos
2422 if (k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list)\
2423 or (k8sclustertype == "helm-chart-v3" and cluster_uuid not in updated_v3_cluster_list):
2424 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2425 self.k8scluster_map[k8sclustertype].synchronize_repos(cluster_uuid=cluster_uuid))
2426 if del_repo_list or added_repo_dict:
2427 if k8sclustertype == "helm-chart":
2428 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2429 updated = {'_admin.helm_charts_added.' +
2430 item: name for item, name in added_repo_dict.items()}
2431 updated_cluster_list.append(cluster_uuid)
2432 elif k8sclustertype == "helm-chart-v3":
2433 unset = {'_admin.helm_charts_v3_added.' + item: None for item in del_repo_list}
2434 updated = {'_admin.helm_charts_v3_added.' +
2435 item: name for item, name in added_repo_dict.items()}
2436 updated_v3_cluster_list.append(cluster_uuid)
2437 self.logger.debug(logging_text + "repos synchronized on k8s cluster "
2438 "'{}' to_delete: {}, to_add: {}".
2439 format(k8s_cluster_id, del_repo_list, added_repo_dict))
2440 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2441
2442 # Instantiate kdu
2443 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2444 kdur["kdu-name"], k8s_cluster_id)
2445 k8s_instance_info = {"kdu-instance": None,
2446 "k8scluster-uuid": cluster_uuid,
2447 "k8scluster-type": k8sclustertype,
2448 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2449 "kdu-name": kdur["kdu-name"],
2450 "kdu-model": kdumodel,
2451 "namespace": namespace}
2452 db_path = "_admin.deployed.K8s.{}".format(index)
2453 db_nsr_update[db_path] = k8s_instance_info
2454 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2455 vnfd_with_id = find_in_list(db_vnfds, lambda vnf: vnf["_id"] == vnfd_id)
2456 task = asyncio.ensure_future(
2457 self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, vnfd_with_id,
2458 k8s_instance_info, k8params=desc_params, timeout=600))
2459 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
2460 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
2461
2462 index += 1
2463
2464 except (LcmException, asyncio.CancelledError):
2465 raise
2466 except Exception as e:
2467 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2468 if isinstance(e, (N2VCException, DbException)):
2469 self.logger.error(logging_text + msg)
2470 else:
2471 self.logger.critical(logging_text + msg, exc_info=True)
2472 raise LcmException(msg)
2473 finally:
2474 if db_nsr_update:
2475 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2476
2477 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
2478 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
2479 base_folder, task_instantiation_info, stage):
2480 # launch instantiate_N2VC in a asyncio task and register task object
2481 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2482 # if not found, create one entry and update database
2483 # fill db_nsr._admin.deployed.VCA.<index>
2484
2485 self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
2486 if "execution-environment-list" in descriptor_config:
2487 ee_list = descriptor_config.get("execution-environment-list", [])
2488 else: # other types as script are not supported
2489 ee_list = []
2490
2491 for ee_item in ee_list:
2492 self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
2493 ee_item.get("helm-chart")))
2494 ee_descriptor_id = ee_item.get("id")
2495 if ee_item.get("juju"):
2496 vca_name = ee_item['juju'].get('charm')
2497 vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
2498 if ee_item['juju'].get('cloud') == "k8s":
2499 vca_type = "k8s_proxy_charm"
2500 elif ee_item['juju'].get('proxy') is False:
2501 vca_type = "native_charm"
2502 elif ee_item.get("helm-chart"):
2503 vca_name = ee_item['helm-chart']
2504 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
2505 vca_type = "helm"
2506 else:
2507 vca_type = "helm-v3"
2508 else:
2509 self.logger.debug(logging_text + "skipping non juju neither charm configuration")
2510 continue
2511
2512 vca_index = -1
2513 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2514 if not vca_deployed:
2515 continue
2516 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2517 vca_deployed.get("vdu_id") == vdu_id and \
2518 vca_deployed.get("kdu_name") == kdu_name and \
2519 vca_deployed.get("vdu_count_index", 0) == vdu_index and \
2520 vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
2521 break
2522 else:
2523 # not found, create one.
2524 target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
2525 if vdu_id:
2526 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
2527 elif kdu_name:
2528 target += "/kdu/{}".format(kdu_name)
2529 vca_deployed = {
2530 "target_element": target,
2531 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
2532 "member-vnf-index": member_vnf_index,
2533 "vdu_id": vdu_id,
2534 "kdu_name": kdu_name,
2535 "vdu_count_index": vdu_index,
2536 "operational-status": "init", # TODO revise
2537 "detailed-status": "", # TODO revise
2538 "step": "initial-deploy", # TODO revise
2539 "vnfd_id": vnfd_id,
2540 "vdu_name": vdu_name,
2541 "type": vca_type,
2542 "ee_descriptor_id": ee_descriptor_id
2543 }
2544 vca_index += 1
2545
2546 # create VCA and configurationStatus in db
2547 db_dict = {
2548 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2549 "configurationStatus.{}".format(vca_index): dict()
2550 }
2551 self.update_db_2("nsrs", nsr_id, db_dict)
2552
2553 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2554
2555 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
2556 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
2557 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
2558
2559 # Launch task
2560 task_n2vc = asyncio.ensure_future(
2561 self.instantiate_N2VC(
2562 logging_text=logging_text,
2563 vca_index=vca_index,
2564 nsi_id=nsi_id,
2565 db_nsr=db_nsr,
2566 db_vnfr=db_vnfr,
2567 vdu_id=vdu_id,
2568 kdu_name=kdu_name,
2569 vdu_index=vdu_index,
2570 deploy_params=deploy_params,
2571 config_descriptor=descriptor_config,
2572 base_folder=base_folder,
2573 nslcmop_id=nslcmop_id,
2574 stage=stage,
2575 vca_type=vca_type,
2576 vca_name=vca_name,
2577 ee_config_descriptor=ee_item
2578 )
2579 )
2580 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
2581 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2582 member_vnf_index or "", vdu_id or "")
2583
2584 @staticmethod
2585 def _create_nslcmop(nsr_id, operation, params):
2586 """
2587 Creates a ns-lcm-opp content to be stored at database.
2588 :param nsr_id: internal id of the instance
2589 :param operation: instantiate, terminate, scale, action, ...
2590 :param params: user parameters for the operation
2591 :return: dictionary following SOL005 format
2592 """
2593 # Raise exception if invalid arguments
2594 if not (nsr_id and operation and params):
2595 raise LcmException(
2596 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
2597 now = time()
2598 _id = str(uuid4())
2599 nslcmop = {
2600 "id": _id,
2601 "_id": _id,
2602 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
2603 "operationState": "PROCESSING",
2604 "statusEnteredTime": now,
2605 "nsInstanceId": nsr_id,
2606 "lcmOperationType": operation,
2607 "startTime": now,
2608 "isAutomaticInvocation": False,
2609 "operationParams": params,
2610 "isCancelPending": False,
2611 "links": {
2612 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
2613 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
2614 }
2615 }
2616 return nslcmop
2617
2618 def _format_additional_params(self, params):
2619 params = params or {}
2620 for key, value in params.items():
2621 if str(value).startswith("!!yaml "):
2622 params[key] = yaml.safe_load(value[7:])
2623 return params
2624
2625 def _get_terminate_primitive_params(self, seq, vnf_index):
2626 primitive = seq.get('name')
2627 primitive_params = {}
2628 params = {
2629 "member_vnf_index": vnf_index,
2630 "primitive": primitive,
2631 "primitive_params": primitive_params,
2632 }
2633 desc_params = {}
2634 return self._map_primitive_params(seq, params, desc_params)
2635
2636 # sub-operations
2637
2638 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
2639 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
2640 if op.get('operationState') == 'COMPLETED':
2641 # b. Skip sub-operation
2642 # _ns_execute_primitive() or RO.create_action() will NOT be executed
2643 return self.SUBOPERATION_STATUS_SKIP
2644 else:
2645 # c. retry executing sub-operation
2646 # The sub-operation exists, and operationState != 'COMPLETED'
2647 # Update operationState = 'PROCESSING' to indicate a retry.
2648 operationState = 'PROCESSING'
2649 detailed_status = 'In progress'
2650 self._update_suboperation_status(
2651 db_nslcmop, op_index, operationState, detailed_status)
2652 # Return the sub-operation index
2653 # _ns_execute_primitive() or RO.create_action() will be called from scale()
2654 # with arguments extracted from the sub-operation
2655 return op_index
2656
2657 # Find a sub-operation where all keys in a matching dictionary must match
2658 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
2659 def _find_suboperation(self, db_nslcmop, match):
2660 if db_nslcmop and match:
2661 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
2662 for i, op in enumerate(op_list):
2663 if all(op.get(k) == match[k] for k in match):
2664 return i
2665 return self.SUBOPERATION_STATUS_NOT_FOUND
2666
2667 # Update status for a sub-operation given its index
2668 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
2669 # Update DB for HA tasks
2670 q_filter = {'_id': db_nslcmop['_id']}
2671 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
2672 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
2673 self.db.set_one("nslcmops",
2674 q_filter=q_filter,
2675 update_dict=update_dict,
2676 fail_on_empty=False)
2677
2678 # Add sub-operation, return the index of the added sub-operation
2679 # Optionally, set operationState, detailed-status, and operationType
2680 # Status and type are currently set for 'scale' sub-operations:
2681 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
2682 # 'detailed-status' : status message
2683 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
2684 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
2685 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
2686 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
2687 RO_nsr_id=None, RO_scaling_info=None):
2688 if not db_nslcmop:
2689 return self.SUBOPERATION_STATUS_NOT_FOUND
2690 # Get the "_admin.operations" list, if it exists
2691 db_nslcmop_admin = db_nslcmop.get('_admin', {})
2692 op_list = db_nslcmop_admin.get('operations')
2693 # Create or append to the "_admin.operations" list
2694 new_op = {'member_vnf_index': vnf_index,
2695 'vdu_id': vdu_id,
2696 'vdu_count_index': vdu_count_index,
2697 'primitive': primitive,
2698 'primitive_params': mapped_primitive_params}
2699 if operationState:
2700 new_op['operationState'] = operationState
2701 if detailed_status:
2702 new_op['detailed-status'] = detailed_status
2703 if operationType:
2704 new_op['lcmOperationType'] = operationType
2705 if RO_nsr_id:
2706 new_op['RO_nsr_id'] = RO_nsr_id
2707 if RO_scaling_info:
2708 new_op['RO_scaling_info'] = RO_scaling_info
2709 if not op_list:
2710 # No existing operations, create key 'operations' with current operation as first list element
2711 db_nslcmop_admin.update({'operations': [new_op]})
2712 op_list = db_nslcmop_admin.get('operations')
2713 else:
2714 # Existing operations, append operation to list
2715 op_list.append(new_op)
2716
2717 db_nslcmop_update = {'_admin.operations': op_list}
2718 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
2719 op_index = len(op_list) - 1
2720 return op_index
2721
2722 # Helper methods for scale() sub-operations
2723
2724 # pre-scale/post-scale:
2725 # Check for 3 different cases:
2726 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
2727 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
2728 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
2729 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
2730 operationType, RO_nsr_id=None, RO_scaling_info=None):
2731 # Find this sub-operation
2732 if RO_nsr_id and RO_scaling_info:
2733 operationType = 'SCALE-RO'
2734 match = {
2735 'member_vnf_index': vnf_index,
2736 'RO_nsr_id': RO_nsr_id,
2737 'RO_scaling_info': RO_scaling_info,
2738 }
2739 else:
2740 match = {
2741 'member_vnf_index': vnf_index,
2742 'primitive': vnf_config_primitive,
2743 'primitive_params': primitive_params,
2744 'lcmOperationType': operationType
2745 }
2746 op_index = self._find_suboperation(db_nslcmop, match)
2747 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
2748 # a. New sub-operation
2749 # The sub-operation does not exist, add it.
2750 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
2751 # The following parameters are set to None for all kind of scaling:
2752 vdu_id = None
2753 vdu_count_index = None
2754 vdu_name = None
2755 if RO_nsr_id and RO_scaling_info:
2756 vnf_config_primitive = None
2757 primitive_params = None
2758 else:
2759 RO_nsr_id = None
2760 RO_scaling_info = None
2761 # Initial status for sub-operation
2762 operationState = 'PROCESSING'
2763 detailed_status = 'In progress'
2764 # Add sub-operation for pre/post-scaling (zero or more operations)
2765 self._add_suboperation(db_nslcmop,
2766 vnf_index,
2767 vdu_id,
2768 vdu_count_index,
2769 vdu_name,
2770 vnf_config_primitive,
2771 primitive_params,
2772 operationState,
2773 detailed_status,
2774 operationType,
2775 RO_nsr_id,
2776 RO_scaling_info)
2777 return self.SUBOPERATION_STATUS_NEW
2778 else:
2779 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
2780 # or op_index (operationState != 'COMPLETED')
2781 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
2782
2783 # Function to return execution_environment id
2784
2785 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
2786 # TODO vdu_index_count
2787 for vca in vca_deployed_list:
2788 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
2789 return vca["ee_id"]
2790
2791 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
2792 vca_index, destroy_ee=True, exec_primitives=True, scaling_in=False):
2793 """
2794 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
2795 :param logging_text:
2796 :param db_nslcmop:
2797 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
2798 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
2799 :param vca_index: index in the database _admin.deployed.VCA
2800 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
2801 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
2802 not executed properly
2803 :param scaling_in: True destroys the application, False destroys the model
2804 :return: None or exception
2805 """
2806
2807 self.logger.debug(
2808 logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
2809 vca_index, vca_deployed, config_descriptor, destroy_ee
2810 )
2811 )
2812
2813 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
2814
2815 # execute terminate_primitives
2816 if exec_primitives:
2817 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
2818 config_descriptor.get("terminate-config-primitive"), vca_deployed.get("ee_descriptor_id"))
2819 vdu_id = vca_deployed.get("vdu_id")
2820 vdu_count_index = vca_deployed.get("vdu_count_index")
2821 vdu_name = vca_deployed.get("vdu_name")
2822 vnf_index = vca_deployed.get("member-vnf-index")
2823 if terminate_primitives and vca_deployed.get("needed_terminate"):
2824 for seq in terminate_primitives:
2825 # For each sequence in list, get primitive and call _ns_execute_primitive()
2826 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
2827 vnf_index, seq.get("name"))
2828 self.logger.debug(logging_text + step)
2829 # Create the primitive for each sequence, i.e. "primitive": "touch"
2830 primitive = seq.get('name')
2831 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
2832
2833 # Add sub-operation
2834 self._add_suboperation(db_nslcmop,
2835 vnf_index,
2836 vdu_id,
2837 vdu_count_index,
2838 vdu_name,
2839 primitive,
2840 mapped_primitive_params)
2841 # Sub-operations: Call _ns_execute_primitive() instead of action()
2842 try:
2843 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
2844 mapped_primitive_params,
2845 vca_type=vca_type)
2846 except LcmException:
2847 # this happens when VCA is not deployed. In this case it is not needed to terminate
2848 continue
2849 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
2850 if result not in result_ok:
2851 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
2852 "error {}".format(seq.get("name"), vnf_index, result_detail))
2853 # set that this VCA do not need terminated
2854 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
2855 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
2856
2857 if vca_deployed.get("prometheus_jobs") and self.prometheus:
2858 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
2859
2860 if destroy_ee:
2861 await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"], scaling_in=scaling_in)
2862
2863 async def _delete_all_N2VC(self, db_nsr: dict):
2864 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
2865 namespace = "." + db_nsr["_id"]
2866 try:
2867 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
2868 except N2VCNotFound: # already deleted. Skip
2869 pass
2870 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
2871
2872 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
2873 """
2874 Terminates a deployment from RO
2875 :param logging_text:
2876 :param nsr_deployed: db_nsr._admin.deployed
2877 :param nsr_id:
2878 :param nslcmop_id:
2879 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
2880 this method will update only the index 2, but it will write on database the concatenated content of the list
2881 :return:
2882 """
2883 db_nsr_update = {}
2884 failed_detail = []
2885 ro_nsr_id = ro_delete_action = None
2886 if nsr_deployed and nsr_deployed.get("RO"):
2887 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
2888 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
2889 try:
2890 if ro_nsr_id:
2891 stage[2] = "Deleting ns from VIM."
2892 db_nsr_update["detailed-status"] = " ".join(stage)
2893 self._write_op_status(nslcmop_id, stage)
2894 self.logger.debug(logging_text + stage[2])
2895 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2896 self._write_op_status(nslcmop_id, stage)
2897 desc = await self.RO.delete("ns", ro_nsr_id)
2898 ro_delete_action = desc["action_id"]
2899 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
2900 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2901 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2902 if ro_delete_action:
2903 # wait until NS is deleted from VIM
2904 stage[2] = "Waiting ns deleted from VIM."
2905 detailed_status_old = None
2906 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
2907 ro_delete_action))
2908 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2909 self._write_op_status(nslcmop_id, stage)
2910
2911 delete_timeout = 20 * 60 # 20 minutes
2912 while delete_timeout > 0:
2913 desc = await self.RO.show(
2914 "ns",
2915 item_id_name=ro_nsr_id,
2916 extra_item="action",
2917 extra_item_id=ro_delete_action)
2918
2919 # deploymentStatus
2920 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
2921
2922 ns_status, ns_status_info = self.RO.check_action_status(desc)
2923 if ns_status == "ERROR":
2924 raise ROclient.ROClientException(ns_status_info)
2925 elif ns_status == "BUILD":
2926 stage[2] = "Deleting from VIM {}".format(ns_status_info)
2927 elif ns_status == "ACTIVE":
2928 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2929 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2930 break
2931 else:
2932 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
2933 if stage[2] != detailed_status_old:
2934 detailed_status_old = stage[2]
2935 db_nsr_update["detailed-status"] = " ".join(stage)
2936 self._write_op_status(nslcmop_id, stage)
2937 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2938 await asyncio.sleep(5, loop=self.loop)
2939 delete_timeout -= 5
2940 else: # delete_timeout <= 0:
2941 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
2942
2943 except Exception as e:
2944 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2945 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2946 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2947 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2948 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2949 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
2950 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2951 failed_detail.append("delete conflict: {}".format(e))
2952 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
2953 else:
2954 failed_detail.append("delete error: {}".format(e))
2955 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
2956
2957 # Delete nsd
2958 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
2959 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
2960 try:
2961 stage[2] = "Deleting nsd from RO."
2962 db_nsr_update["detailed-status"] = " ".join(stage)
2963 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2964 self._write_op_status(nslcmop_id, stage)
2965 await self.RO.delete("nsd", ro_nsd_id)
2966 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
2967 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2968 except Exception as e:
2969 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2970 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2971 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
2972 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2973 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
2974 self.logger.debug(logging_text + failed_detail[-1])
2975 else:
2976 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
2977 self.logger.error(logging_text + failed_detail[-1])
2978
2979 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
2980 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
2981 if not vnf_deployed or not vnf_deployed["id"]:
2982 continue
2983 try:
2984 ro_vnfd_id = vnf_deployed["id"]
2985 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
2986 vnf_deployed["member-vnf-index"], ro_vnfd_id)
2987 db_nsr_update["detailed-status"] = " ".join(stage)
2988 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2989 self._write_op_status(nslcmop_id, stage)
2990 await self.RO.delete("vnfd", ro_vnfd_id)
2991 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
2992 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2993 except Exception as e:
2994 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2995 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2996 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
2997 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2998 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
2999 self.logger.debug(logging_text + failed_detail[-1])
3000 else:
3001 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
3002 self.logger.error(logging_text + failed_detail[-1])
3003
3004 if failed_detail:
3005 stage[2] = "Error deleting from VIM"
3006 else:
3007 stage[2] = "Deleted from VIM"
3008 db_nsr_update["detailed-status"] = " ".join(stage)
3009 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3010 self._write_op_status(nslcmop_id, stage)
3011
3012 if failed_detail:
3013 raise LcmException("; ".join(failed_detail))
3014
3015 async def terminate(self, nsr_id, nslcmop_id):
3016 # Try to lock HA task here
3017 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3018 if not task_is_locked_by_me:
3019 return
3020
3021 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3022 self.logger.debug(logging_text + "Enter")
3023 timeout_ns_terminate = self.timeout_ns_terminate
3024 db_nsr = None
3025 db_nslcmop = None
3026 operation_params = None
3027 exc = None
3028 error_list = [] # annotates all failed error messages
3029 db_nslcmop_update = {}
3030 autoremove = False # autoremove after terminated
3031 tasks_dict_info = {}
3032 db_nsr_update = {}
3033 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
3034 # ^ contains [stage, step, VIM-status]
3035 try:
3036 # wait for any previous tasks in process
3037 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
3038
3039 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3040 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3041 operation_params = db_nslcmop.get("operationParams") or {}
3042 if operation_params.get("timeout_ns_terminate"):
3043 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3044 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3045 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3046
3047 db_nsr_update["operational-status"] = "terminating"
3048 db_nsr_update["config-status"] = "terminating"
3049 self._write_ns_status(
3050 nsr_id=nsr_id,
3051 ns_state="TERMINATING",
3052 current_operation="TERMINATING",
3053 current_operation_id=nslcmop_id,
3054 other_update=db_nsr_update
3055 )
3056 self._write_op_status(
3057 op_id=nslcmop_id,
3058 queuePosition=0,
3059 stage=stage
3060 )
3061 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
3062 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
3063 return
3064
3065 stage[1] = "Getting vnf descriptors from db."
3066 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
3067 db_vnfds_from_id = {}
3068 db_vnfds_from_member_index = {}
3069 # Loop over VNFRs
3070 for vnfr in db_vnfrs_list:
3071 vnfd_id = vnfr["vnfd-id"]
3072 if vnfd_id not in db_vnfds_from_id:
3073 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3074 db_vnfds_from_id[vnfd_id] = vnfd
3075 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
3076
3077 # Destroy individual execution environments when there are terminating primitives.
3078 # Rest of EE will be deleted at once
3079 # TODO - check before calling _destroy_N2VC
3080 # if not operation_params.get("skip_terminate_primitives"):#
3081 # or not vca.get("needed_terminate"):
3082 stage[0] = "Stage 2/3 execute terminating primitives."
3083 self.logger.debug(logging_text + stage[0])
3084 stage[1] = "Looking execution environment that needs terminate."
3085 self.logger.debug(logging_text + stage[1])
3086
3087 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
3088 config_descriptor = None
3089 if not vca or not vca.get("ee_id"):
3090 continue
3091 if not vca.get("member-vnf-index"):
3092 # ns
3093 config_descriptor = db_nsr.get("ns-configuration")
3094 elif vca.get("vdu_id"):
3095 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3096 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
3097 elif vca.get("kdu_name"):
3098 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3099 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
3100 else:
3101 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3102 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
3103 vca_type = vca.get("type")
3104 exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
3105 vca.get("needed_terminate"))
3106 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
3107 # pending native charms
3108 destroy_ee = True if vca_type in ("helm", "helm-v3", "native_charm") else False
3109 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
3110 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
3111 task = asyncio.ensure_future(
3112 self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
3113 destroy_ee, exec_terminate_primitives))
3114 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
3115
3116 # wait for pending tasks of terminate primitives
3117 if tasks_dict_info:
3118 self.logger.debug(logging_text + 'Waiting for tasks {}'.format(list(tasks_dict_info.keys())))
3119 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3120 min(self.timeout_charm_delete, timeout_ns_terminate),
3121 stage, nslcmop_id)
3122 tasks_dict_info.clear()
3123 if error_list:
3124 return # raise LcmException("; ".join(error_list))
3125
3126 # remove All execution environments at once
3127 stage[0] = "Stage 3/3 delete all."
3128
3129 if nsr_deployed.get("VCA"):
3130 stage[1] = "Deleting all execution environments."
3131 self.logger.debug(logging_text + stage[1])
3132 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3133 timeout=self.timeout_charm_delete))
3134 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3135 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
3136
3137 # Delete from k8scluster
3138 stage[1] = "Deleting KDUs."
3139 self.logger.debug(logging_text + stage[1])
3140 # print(nsr_deployed)
3141 for kdu in get_iterable(nsr_deployed, "K8s"):
3142 if not kdu or not kdu.get("kdu-instance"):
3143 continue
3144 kdu_instance = kdu.get("kdu-instance")
3145 if kdu.get("k8scluster-type") in self.k8scluster_map:
3146 task_delete_kdu_instance = asyncio.ensure_future(
3147 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3148 cluster_uuid=kdu.get("k8scluster-uuid"),
3149 kdu_instance=kdu_instance))
3150 else:
3151 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3152 format(kdu.get("k8scluster-type")))
3153 continue
3154 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
3155
3156 # remove from RO
3157 stage[1] = "Deleting ns from VIM."
3158 if self.ng_ro:
3159 task_delete_ro = asyncio.ensure_future(
3160 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3161 else:
3162 task_delete_ro = asyncio.ensure_future(
3163 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3164 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
3165
3166 # rest of staff will be done at finally
3167
3168 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3169 self.logger.error(logging_text + "Exit Exception {}".format(e))
3170 exc = e
3171 except asyncio.CancelledError:
3172 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3173 exc = "Operation was cancelled"
3174 except Exception as e:
3175 exc = traceback.format_exc()
3176 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3177 finally:
3178 if exc:
3179 error_list.append(str(exc))
3180 try:
3181 # wait for pending tasks
3182 if tasks_dict_info:
3183 stage[1] = "Waiting for terminate pending tasks."
3184 self.logger.debug(logging_text + stage[1])
3185 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3186 stage, nslcmop_id)
3187 stage[1] = stage[2] = ""
3188 except asyncio.CancelledError:
3189 error_list.append("Cancelled")
3190 # TODO cancell all tasks
3191 except Exception as exc:
3192 error_list.append(str(exc))
3193 # update status at database
3194 if error_list:
3195 error_detail = "; ".join(error_list)
3196 # self.logger.error(logging_text + error_detail)
3197 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
3198 error_description_nsr = 'Operation: TERMINATING.{}, {}.'.format(nslcmop_id, stage[0])
3199
3200 db_nsr_update["operational-status"] = "failed"
3201 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
3202 db_nslcmop_update["detailed-status"] = error_detail
3203 nslcmop_operation_state = "FAILED"
3204 ns_state = "BROKEN"
3205 else:
3206 error_detail = None
3207 error_description_nsr = error_description_nslcmop = None
3208 ns_state = "NOT_INSTANTIATED"
3209 db_nsr_update["operational-status"] = "terminated"
3210 db_nsr_update["detailed-status"] = "Done"
3211 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3212 db_nslcmop_update["detailed-status"] = "Done"
3213 nslcmop_operation_state = "COMPLETED"
3214
3215 if db_nsr:
3216 self._write_ns_status(
3217 nsr_id=nsr_id,
3218 ns_state=ns_state,
3219 current_operation="IDLE",
3220 current_operation_id=None,
3221 error_description=error_description_nsr,
3222 error_detail=error_detail,
3223 other_update=db_nsr_update
3224 )
3225 self._write_op_status(
3226 op_id=nslcmop_id,
3227 stage="",
3228 error_message=error_description_nslcmop,
3229 operation_state=nslcmop_operation_state,
3230 other_update=db_nslcmop_update,
3231 )
3232 if ns_state == "NOT_INSTANTIATED":
3233 try:
3234 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "NOT_INSTANTIATED"})
3235 except DbException as e:
3236 self.logger.warn(logging_text + 'Error writing VNFR status for nsr-id-ref: {} -> {}'.
3237 format(nsr_id, e))
3238 if operation_params:
3239 autoremove = operation_params.get("autoremove", False)
3240 if nslcmop_operation_state:
3241 try:
3242 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
3243 "operationState": nslcmop_operation_state,
3244 "autoremove": autoremove},
3245 loop=self.loop)
3246 except Exception as e:
3247 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3248
3249 self.logger.debug(logging_text + "Exit")
3250 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3251
3252 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3253 time_start = time()
3254 error_detail_list = []
3255 error_list = []
3256 pending_tasks = list(created_tasks_info.keys())
3257 num_tasks = len(pending_tasks)
3258 num_done = 0
3259 stage[1] = "{}/{}.".format(num_done, num_tasks)
3260 self._write_op_status(nslcmop_id, stage)
3261 while pending_tasks:
3262 new_error = None
3263 _timeout = timeout + time_start - time()
3264 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3265 return_when=asyncio.FIRST_COMPLETED)
3266 num_done += len(done)
3267 if not done: # Timeout
3268 for task in pending_tasks:
3269 new_error = created_tasks_info[task] + ": Timeout"
3270 error_detail_list.append(new_error)
3271 error_list.append(new_error)
3272 break
3273 for task in done:
3274 if task.cancelled():
3275 exc = "Cancelled"
3276 else:
3277 exc = task.exception()
3278 if exc:
3279 if isinstance(exc, asyncio.TimeoutError):
3280 exc = "Timeout"
3281 new_error = created_tasks_info[task] + ": {}".format(exc)
3282 error_list.append(created_tasks_info[task])
3283 error_detail_list.append(new_error)
3284 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
3285 K8sException, NgRoException)):
3286 self.logger.error(logging_text + new_error)
3287 else:
3288 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
3289 self.logger.error(logging_text + created_tasks_info[task] + " " + exc_traceback)
3290 else:
3291 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
3292 stage[1] = "{}/{}.".format(num_done, num_tasks)
3293 if new_error:
3294 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
3295 if nsr_id: # update also nsr
3296 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3297 "errorDetail": ". ".join(error_detail_list)})
3298 self._write_op_status(nslcmop_id, stage)
3299 return error_detail_list
3300
3301 @staticmethod
3302 def _map_primitive_params(primitive_desc, params, instantiation_params):
3303 """
3304 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3305 The default-value is used. If it is between < > it look for a value at instantiation_params
3306 :param primitive_desc: portion of VNFD/NSD that describes primitive
3307 :param params: Params provided by user
3308 :param instantiation_params: Instantiation params provided by user
3309 :return: a dictionary with the calculated params
3310 """
3311 calculated_params = {}
3312 for parameter in primitive_desc.get("parameter", ()):
3313 param_name = parameter["name"]
3314 if param_name in params:
3315 calculated_params[param_name] = params[param_name]
3316 elif "default-value" in parameter or "value" in parameter:
3317 if "value" in parameter:
3318 calculated_params[param_name] = parameter["value"]
3319 else:
3320 calculated_params[param_name] = parameter["default-value"]
3321 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3322 and calculated_params[param_name].endswith(">"):
3323 if calculated_params[param_name][1:-1] in instantiation_params:
3324 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
3325 else:
3326 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3327 format(calculated_params[param_name], primitive_desc["name"]))
3328 else:
3329 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3330 format(param_name, primitive_desc["name"]))
3331
3332 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3333 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name],
3334 default_flow_style=True, width=256)
3335 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3336 calculated_params[param_name] = calculated_params[param_name][7:]
3337 if parameter.get("data-type") == "INTEGER":
3338 try:
3339 calculated_params[param_name] = int(calculated_params[param_name])
3340 except ValueError: # error converting string to int
3341 raise LcmException(
3342 "Parameter {} of primitive {} must be integer".format(param_name, primitive_desc["name"]))
3343 elif parameter.get("data-type") == "BOOLEAN":
3344 calculated_params[param_name] = not ((str(calculated_params[param_name])).lower() == 'false')
3345
3346 # add always ns_config_info if primitive name is config
3347 if primitive_desc["name"] == "config":
3348 if "ns_config_info" in instantiation_params:
3349 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
3350 return calculated_params
3351
3352 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None,
3353 ee_descriptor_id=None):
3354 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3355 for vca in deployed_vca:
3356 if not vca:
3357 continue
3358 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3359 continue
3360 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3361 continue
3362 if kdu_name and kdu_name != vca["kdu_name"]:
3363 continue
3364 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
3365 continue
3366 break
3367 else:
3368 # vca_deployed not found
3369 raise LcmException("charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
3370 " is not deployed".format(member_vnf_index, vdu_id, vdu_count_index, kdu_name,
3371 ee_descriptor_id))
3372 # get ee_id
3373 ee_id = vca.get("ee_id")
3374 vca_type = vca.get("type", "lxc_proxy_charm") # default value for backward compatibility - proxy charm
3375 if not ee_id:
3376 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
3377 "execution environment"
3378 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
3379 return ee_id, vca_type
3380
3381 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0, retries_interval=30,
3382 timeout=None, vca_type=None, db_dict=None) -> (str, str):
3383 try:
3384 if primitive == "config":
3385 primitive_params = {"params": primitive_params}
3386
3387 vca_type = vca_type or "lxc_proxy_charm"
3388
3389 while retries >= 0:
3390 try:
3391 output = await asyncio.wait_for(
3392 self.vca_map[vca_type].exec_primitive(
3393 ee_id=ee_id,
3394 primitive_name=primitive,
3395 params_dict=primitive_params,
3396 progress_timeout=self.timeout_progress_primitive,
3397 total_timeout=self.timeout_primitive,
3398 db_dict=db_dict),
3399 timeout=timeout or self.timeout_primitive)
3400 # execution was OK
3401 break
3402 except asyncio.CancelledError:
3403 raise
3404 except Exception as e: # asyncio.TimeoutError
3405 if isinstance(e, asyncio.TimeoutError):
3406 e = "Timeout"
3407 retries -= 1
3408 if retries >= 0:
3409 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
3410 # wait and retry
3411 await asyncio.sleep(retries_interval, loop=self.loop)
3412 else:
3413 return 'FAILED', str(e)
3414
3415 return 'COMPLETED', output
3416
3417 except (LcmException, asyncio.CancelledError):
3418 raise
3419 except Exception as e:
3420 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
3421
3422 async def vca_status_refresh(self, nsr_id, nslcmop_id):
3423 """
3424 Updating the vca_status with latest juju information in nsrs record
3425 :param: nsr_id: Id of the nsr
3426 :param: nslcmop_id: Id of the nslcmop
3427 :return: None
3428 """
3429
3430 self.logger.debug("Task ns={} action={} Enter".format(nsr_id, nslcmop_id))
3431 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3432 if db_nsr['_admin']['deployed']['K8s']:
3433 for k8s_index, k8s in enumerate(db_nsr['_admin']['deployed']['K8s']):
3434 cluster_uuid, kdu_instance = k8s["k8scluster-uuid"], k8s["kdu-instance"]
3435 await self._on_update_k8s_db(cluster_uuid, kdu_instance, filter={'_id': nsr_id})
3436 else:
3437 for vca_index, _ in enumerate(db_nsr['_admin']['deployed']['VCA']):
3438 table, filter = "nsrs", {"_id": nsr_id}
3439 path = "_admin.deployed.VCA.{}.".format(vca_index)
3440 await self._on_update_n2vc_db(table, filter, path, {})
3441
3442 self.logger.debug("Task ns={} action={} Exit".format(nsr_id, nslcmop_id))
3443 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_vca_status_refresh")
3444
3445 async def action(self, nsr_id, nslcmop_id):
3446 # Try to lock HA task here
3447 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3448 if not task_is_locked_by_me:
3449 return
3450
3451 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3452 self.logger.debug(logging_text + "Enter")
3453 # get all needed from database
3454 db_nsr = None
3455 db_nslcmop = None
3456 db_nsr_update = {}
3457 db_nslcmop_update = {}
3458 nslcmop_operation_state = None
3459 error_description_nslcmop = None
3460 exc = None
3461 try:
3462 # wait for any previous tasks in process
3463 step = "Waiting for previous operations to terminate"
3464 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3465
3466 self._write_ns_status(
3467 nsr_id=nsr_id,
3468 ns_state=None,
3469 current_operation="RUNNING ACTION",
3470 current_operation_id=nslcmop_id
3471 )
3472
3473 step = "Getting information from database"
3474 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3475 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3476
3477 nsr_deployed = db_nsr["_admin"].get("deployed")
3478 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
3479 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3480 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
3481 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3482 primitive = db_nslcmop["operationParams"]["primitive"]
3483 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3484 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
3485
3486 if vnf_index:
3487 step = "Getting vnfr from database"
3488 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3489 step = "Getting vnfd from database"
3490 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3491 else:
3492 step = "Getting nsd from database"
3493 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
3494
3495 # for backward compatibility
3496 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3497 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3498 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3499 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3500
3501 # look for primitive
3502 config_primitive_desc = descriptor_configuration = None
3503 if vdu_id:
3504 descriptor_configuration = get_configuration(db_vnfd, vdu_id)
3505 elif kdu_name:
3506 descriptor_configuration = get_configuration(db_vnfd, kdu_name)
3507 elif vnf_index:
3508 descriptor_configuration = get_configuration(db_vnfd, db_vnfd["id"])
3509 else:
3510 descriptor_configuration = db_nsd.get("ns-configuration")
3511
3512 if descriptor_configuration and descriptor_configuration.get("config-primitive"):
3513 for config_primitive in descriptor_configuration["config-primitive"]:
3514 if config_primitive["name"] == primitive:
3515 config_primitive_desc = config_primitive
3516 break
3517
3518 if not config_primitive_desc:
3519 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
3520 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3521 format(primitive))
3522 primitive_name = primitive
3523 ee_descriptor_id = None
3524 else:
3525 primitive_name = config_primitive_desc.get("execution-environment-primitive", primitive)
3526 ee_descriptor_id = config_primitive_desc.get("execution-environment-ref")
3527
3528 if vnf_index:
3529 if vdu_id:
3530 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
3531 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
3532 elif kdu_name:
3533 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3534 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3535 else:
3536 desc_params = parse_yaml_strings(db_vnfr.get("additionalParamsForVnf"))
3537 else:
3538 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
3539 if kdu_name and get_configuration(db_vnfd, kdu_name):
3540 kdu_configuration = get_configuration(db_vnfd, kdu_name)
3541 actions = set()
3542 for primitive in kdu_configuration.get("initial-config-primitive", []):
3543 actions.add(primitive["name"])
3544 for primitive in kdu_configuration.get("config-primitive", []):
3545 actions.add(primitive["name"])
3546 kdu_action = True if primitive_name in actions else False
3547
3548 # TODO check if ns is in a proper status
3549 if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action):
3550 # kdur and desc_params already set from before
3551 if primitive_params:
3552 desc_params.update(primitive_params)
3553 # TODO Check if we will need something at vnf level
3554 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3555 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3556 break
3557 else:
3558 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
3559
3560 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3561 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3562 raise LcmException(msg)
3563
3564 db_dict = {"collection": "nsrs",
3565 "filter": {"_id": nsr_id},
3566 "path": "_admin.deployed.K8s.{}".format(index)}
3567 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name))
3568 step = "Executing kdu {}".format(primitive_name)
3569 if primitive_name == "upgrade":
3570 if desc_params.get("kdu_model"):
3571 kdu_model = desc_params.get("kdu_model")
3572 del desc_params["kdu_model"]
3573 else:
3574 kdu_model = kdu.get("kdu-model")
3575 parts = kdu_model.split(sep=":")
3576 if len(parts) == 2:
3577 kdu_model = parts[0]
3578
3579 detailed_status = await asyncio.wait_for(
3580 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3581 cluster_uuid=kdu.get("k8scluster-uuid"),
3582 kdu_instance=kdu.get("kdu-instance"),
3583 atomic=True, kdu_model=kdu_model,
3584 params=desc_params, db_dict=db_dict,
3585 timeout=timeout_ns_action),
3586 timeout=timeout_ns_action + 10)
3587 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
3588 elif primitive_name == "rollback":
3589 detailed_status = await asyncio.wait_for(
3590 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3591 cluster_uuid=kdu.get("k8scluster-uuid"),
3592 kdu_instance=kdu.get("kdu-instance"),
3593 db_dict=db_dict),
3594 timeout=timeout_ns_action)
3595 elif primitive_name == "status":
3596 detailed_status = await asyncio.wait_for(
3597 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3598 cluster_uuid=kdu.get("k8scluster-uuid"),
3599 kdu_instance=kdu.get("kdu-instance")),
3600 timeout=timeout_ns_action)
3601 else:
3602 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3603 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3604
3605 detailed_status = await asyncio.wait_for(
3606 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3607 cluster_uuid=kdu.get("k8scluster-uuid"),
3608 kdu_instance=kdu_instance,
3609 primitive_name=primitive_name,
3610 params=params, db_dict=db_dict,
3611 timeout=timeout_ns_action),
3612 timeout=timeout_ns_action)
3613
3614 if detailed_status:
3615 nslcmop_operation_state = 'COMPLETED'
3616 else:
3617 detailed_status = ''
3618 nslcmop_operation_state = 'FAILED'
3619 else:
3620 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"], member_vnf_index=vnf_index,
3621 vdu_id=vdu_id, vdu_count_index=vdu_count_index,
3622 ee_descriptor_id=ee_descriptor_id)
3623 for vca_index, vca_deployed in enumerate(db_nsr['_admin']['deployed']['VCA']):
3624 if vca_deployed.get("member-vnf-index") == vnf_index:
3625 db_dict = {"collection": "nsrs",
3626 "filter": {"_id": nsr_id},
3627 "path": "_admin.deployed.VCA.{}.".format(vca_index)}
3628 break
3629 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
3630 ee_id,
3631 primitive=primitive_name,
3632 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
3633 timeout=timeout_ns_action,
3634 vca_type=vca_type,
3635 db_dict=db_dict)
3636
3637 db_nslcmop_update["detailed-status"] = detailed_status
3638 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
3639 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
3640 detailed_status))
3641 return # database update is called inside finally
3642
3643 except (DbException, LcmException, N2VCException, K8sException) as e:
3644 self.logger.error(logging_text + "Exit Exception {}".format(e))
3645 exc = e
3646 except asyncio.CancelledError:
3647 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3648 exc = "Operation was cancelled"
3649 except asyncio.TimeoutError:
3650 self.logger.error(logging_text + "Timeout while '{}'".format(step))
3651 exc = "Timeout"
3652 except Exception as e:
3653 exc = traceback.format_exc()
3654 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3655 finally:
3656 if exc:
3657 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
3658 "FAILED {}: {}".format(step, exc)
3659 nslcmop_operation_state = "FAILED"
3660 if db_nsr:
3661 self._write_ns_status(
3662 nsr_id=nsr_id,
3663 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
3664 current_operation="IDLE",
3665 current_operation_id=None,
3666 # error_description=error_description_nsr,
3667 # error_detail=error_detail,
3668 other_update=db_nsr_update
3669 )
3670
3671 self._write_op_status(op_id=nslcmop_id, stage="", error_message=error_description_nslcmop,
3672 operation_state=nslcmop_operation_state, other_update=db_nslcmop_update)
3673
3674 if nslcmop_operation_state:
3675 try:
3676 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
3677 "operationState": nslcmop_operation_state},
3678 loop=self.loop)
3679 except Exception as e:
3680 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3681 self.logger.debug(logging_text + "Exit")
3682 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
3683 return nslcmop_operation_state, detailed_status
3684
3685 async def scale(self, nsr_id, nslcmop_id):
3686 # Try to lock HA task here
3687 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3688 if not task_is_locked_by_me:
3689 return
3690
3691 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
3692 stage = ['', '', '']
3693 tasks_dict_info = {}
3694 # ^ stage, step, VIM progress
3695 self.logger.debug(logging_text + "Enter")
3696 # get all needed from database
3697 db_nsr = None
3698 db_nslcmop_update = {}
3699 db_nsr_update = {}
3700 exc = None
3701 # in case of error, indicates what part of scale was failed to put nsr at error status
3702 scale_process = None
3703 old_operational_status = ""
3704 old_config_status = ""
3705 nsi_id = None
3706 try:
3707 # wait for any previous tasks in process
3708 step = "Waiting for previous operations to terminate"
3709 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3710 self._write_ns_status(nsr_id=nsr_id, ns_state=None,
3711 current_operation="SCALING", current_operation_id=nslcmop_id)
3712
3713 step = "Getting nslcmop from database"
3714 self.logger.debug(step + " after having waited for previous tasks to be completed")
3715 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3716
3717 step = "Getting nsr from database"
3718 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3719 old_operational_status = db_nsr["operational-status"]
3720 old_config_status = db_nsr["config-status"]
3721
3722 step = "Parsing scaling parameters"
3723 db_nsr_update["operational-status"] = "scaling"
3724 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3725 nsr_deployed = db_nsr["_admin"].get("deployed")
3726
3727 #######
3728 nsr_deployed = db_nsr["_admin"].get("deployed")
3729 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
3730 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3731 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3732 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
3733 #######
3734
3735 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
3736 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
3737 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
3738 # for backward compatibility
3739 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3740 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3741 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3742 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3743
3744 step = "Getting vnfr from database"
3745 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3746
3747 step = "Getting vnfd from database"
3748 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3749
3750 base_folder = db_vnfd["_admin"]["storage"]
3751
3752 step = "Getting scaling-group-descriptor"
3753 scaling_descriptor = find_in_list(
3754 get_scaling_aspect(
3755 db_vnfd
3756 ),
3757 lambda scale_desc: scale_desc["name"] == scaling_group
3758 )
3759 if not scaling_descriptor:
3760 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
3761 "at vnfd:scaling-group-descriptor".format(scaling_group))
3762
3763 step = "Sending scale order to VIM"
3764 # TODO check if ns is in a proper status
3765 nb_scale_op = 0
3766 if not db_nsr["_admin"].get("scaling-group"):
3767 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
3768 admin_scale_index = 0
3769 else:
3770 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
3771 if admin_scale_info["name"] == scaling_group:
3772 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
3773 break
3774 else: # not found, set index one plus last element and add new entry with the name
3775 admin_scale_index += 1
3776 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
3777 RO_scaling_info = []
3778 VCA_scaling_info = []
3779 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
3780 if scaling_type == "SCALE_OUT":
3781 if "aspect-delta-details" not in scaling_descriptor:
3782 raise LcmException(
3783 "Aspect delta details not fount in scaling descriptor {}".format(
3784 scaling_descriptor["name"]
3785 )
3786 )
3787 # count if max-instance-count is reached
3788 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
3789
3790 vdu_scaling_info["scaling_direction"] = "OUT"
3791 vdu_scaling_info["vdu-create"] = {}
3792 for delta in deltas:
3793 for vdu_delta in delta["vdu-delta"]:
3794 vdud = get_vdu(db_vnfd, vdu_delta["id"])
3795 vdu_index = get_vdur_index(db_vnfr, vdu_delta)
3796 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
3797 if cloud_init_text:
3798 additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {}
3799 cloud_init_list = []
3800
3801 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
3802 max_instance_count = 10
3803 if vdu_profile and "max-number-of-instances" in vdu_profile:
3804 max_instance_count = vdu_profile.get("max-number-of-instances", 10)
3805
3806 default_instance_num = get_number_of_instances(db_vnfd, vdud["id"])
3807
3808 nb_scale_op += vdu_delta.get("number-of-instances", 1)
3809
3810 if nb_scale_op + default_instance_num > max_instance_count:
3811 raise LcmException(
3812 "reached the limit of {} (max-instance-count) "
3813 "scaling-out operations for the "
3814 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group)
3815 )
3816 for x in range(vdu_delta.get("number-of-instances", 1)):
3817 if cloud_init_text:
3818 # TODO Information of its own ip is not available because db_vnfr is not updated.
3819 additional_params["OSM"] = get_osm_params(
3820 db_vnfr,
3821 vdu_delta["id"],
3822 vdu_index + x
3823 )
3824 cloud_init_list.append(
3825 self._parse_cloud_init(
3826 cloud_init_text,
3827 additional_params,
3828 db_vnfd["id"],
3829 vdud["id"]
3830 )
3831 )
3832 VCA_scaling_info.append(
3833 {
3834 "osm_vdu_id": vdu_delta["id"],
3835 "member-vnf-index": vnf_index,
3836 "type": "create",
3837 "vdu_index": vdu_index + x
3838 }
3839 )
3840 RO_scaling_info.append(
3841 {
3842 "osm_vdu_id": vdu_delta["id"],
3843 "member-vnf-index": vnf_index,
3844 "type": "create",
3845 "count": vdu_delta.get("number-of-instances", 1)
3846 }
3847 )
3848 if cloud_init_list:
3849 RO_scaling_info[-1]["cloud_init"] = cloud_init_list
3850 vdu_scaling_info["vdu-create"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1)
3851
3852 elif scaling_type == "SCALE_IN":
3853 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
3854 min_instance_count = int(scaling_descriptor["min-instance-count"])
3855
3856 vdu_scaling_info["scaling_direction"] = "IN"
3857 vdu_scaling_info["vdu-delete"] = {}
3858 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
3859 for delta in deltas:
3860 for vdu_delta in delta["vdu-delta"]:
3861 vdu_index = get_vdur_index(db_vnfr, vdu_delta)
3862 min_instance_count = 0
3863 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
3864 if vdu_profile and "min-number-of-instances" in vdu_profile:
3865 min_instance_count = vdu_profile["min-number-of-instances"]
3866
3867 default_instance_num = get_number_of_instances(db_vnfd, vdu_delta["id"])
3868
3869 nb_scale_op -= vdu_delta.get("number-of-instances", 1)
3870 if nb_scale_op + default_instance_num < min_instance_count:
3871 raise LcmException(
3872 "reached the limit of {} (min-instance-count) scaling-in operations for the "
3873 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group)
3874 )
3875 RO_scaling_info.append({"osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index,
3876 "type": "delete", "count": vdu_delta.get("number-of-instances", 1),
3877 "vdu_index": vdu_index - 1})
3878 for x in range(vdu_delta.get("number-of-instances", 1)):
3879 VCA_scaling_info.append(
3880 {
3881 "osm_vdu_id": vdu_delta["id"],
3882 "member-vnf-index": vnf_index,
3883 "type": "delete",
3884 "vdu_index": vdu_index - 1 - x
3885 }
3886 )
3887 vdu_scaling_info["vdu-delete"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1)
3888
3889 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
3890 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
3891 if vdu_scaling_info["scaling_direction"] == "IN":
3892 for vdur in reversed(db_vnfr["vdur"]):
3893 if vdu_delete.get(vdur["vdu-id-ref"]):
3894 vdu_delete[vdur["vdu-id-ref"]] -= 1
3895 vdu_scaling_info["vdu"].append({
3896 "name": vdur.get("name") or vdur.get("vdu-name"),
3897 "vdu_id": vdur["vdu-id-ref"],
3898 "interface": []
3899 })
3900 for interface in vdur["interfaces"]:
3901 vdu_scaling_info["vdu"][-1]["interface"].append({
3902 "name": interface["name"],
3903 "ip_address": interface["ip-address"],
3904 "mac_address": interface.get("mac-address"),
3905 })
3906 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
3907
3908 # PRE-SCALE BEGIN
3909 step = "Executing pre-scale vnf-config-primitive"
3910 if scaling_descriptor.get("scaling-config-action"):
3911 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
3912 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
3913 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
3914 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3915 step = db_nslcmop_update["detailed-status"] = \
3916 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
3917
3918 # look for primitive
3919 for config_primitive in (get_configuration(
3920 db_vnfd, db_vnfd["id"]
3921 ) or {}).get("config-primitive", ()):
3922 if config_primitive["name"] == vnf_config_primitive:
3923 break
3924 else:
3925 raise LcmException(
3926 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
3927 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
3928 "primitive".format(scaling_group, vnf_config_primitive))
3929
3930 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
3931 if db_vnfr.get("additionalParamsForVnf"):
3932 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
3933
3934 scale_process = "VCA"
3935 db_nsr_update["config-status"] = "configuring pre-scaling"
3936 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
3937
3938 # Pre-scale retry check: Check if this sub-operation has been executed before
3939 op_index = self._check_or_add_scale_suboperation(
3940 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
3941 if op_index == self.SUBOPERATION_STATUS_SKIP:
3942 # Skip sub-operation
3943 result = 'COMPLETED'
3944 result_detail = 'Done'
3945 self.logger.debug(logging_text +
3946 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
3947 vnf_config_primitive, result, result_detail))
3948 else:
3949 if op_index == self.SUBOPERATION_STATUS_NEW:
3950 # New sub-operation: Get index of this sub-operation
3951 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3952 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3953 format(vnf_config_primitive))
3954 else:
3955 # retry: Get registered params for this existing sub-operation
3956 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3957 vnf_index = op.get('member_vnf_index')
3958 vnf_config_primitive = op.get('primitive')
3959 primitive_params = op.get('primitive_params')
3960 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
3961 format(vnf_config_primitive))
3962 # Execute the primitive, either with new (first-time) or registered (reintent) args
3963 ee_descriptor_id = config_primitive.get("execution-environment-ref")
3964 primitive_name = config_primitive.get("execution-environment-primitive",
3965 vnf_config_primitive)
3966 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
3967 member_vnf_index=vnf_index,
3968 vdu_id=None,
3969 vdu_count_index=None,
3970 ee_descriptor_id=ee_descriptor_id)
3971 result, result_detail = await self._ns_execute_primitive(
3972 ee_id, primitive_name, primitive_params, vca_type=vca_type)
3973 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3974 vnf_config_primitive, result, result_detail))
3975 # Update operationState = COMPLETED | FAILED
3976 self._update_suboperation_status(
3977 db_nslcmop, op_index, result, result_detail)
3978
3979 if result == "FAILED":
3980 raise LcmException(result_detail)
3981 db_nsr_update["config-status"] = old_config_status
3982 scale_process = None
3983 # PRE-SCALE END
3984
3985 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
3986 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
3987
3988 # SCALE-IN VCA - BEGIN
3989 if VCA_scaling_info:
3990 step = db_nslcmop_update["detailed-status"] = \
3991 "Deleting the execution environments"
3992 scale_process = "VCA"
3993 for vdu_info in VCA_scaling_info:
3994 if vdu_info["type"] == "delete":
3995 member_vnf_index = str(vdu_info["member-vnf-index"])
3996 self.logger.debug(logging_text + "vdu info: {}".format(vdu_info))
3997 vdu_id = vdu_info["osm_vdu_id"]
3998 vdu_index = int(vdu_info["vdu_index"])
3999 stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
4000 member_vnf_index, vdu_id, vdu_index)
4001 stage[2] = step = "Scaling in VCA"
4002 self._write_op_status(
4003 op_id=nslcmop_id,
4004 stage=stage
4005 )
4006 vca_update = db_nsr["_admin"]["deployed"]["VCA"]
4007 config_update = db_nsr["configurationStatus"]
4008 for vca_index, vca in enumerate(vca_update):
4009 if (vca or vca.get("ee_id")) and vca["member-vnf-index"] == member_vnf_index and \
4010 vca["vdu_count_index"] == vdu_index:
4011 if vca.get("vdu_id"):
4012 config_descriptor = get_configuration(db_vnfd, vca.get("vdu_id"))
4013 elif vca.get("kdu_name"):
4014 config_descriptor = get_configuration(db_vnfd, vca.get("kdu_name"))
4015 else:
4016 config_descriptor = get_configuration(db_vnfd, db_vnfd["id"])
4017 operation_params = db_nslcmop.get("operationParams") or {}
4018 exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
4019 vca.get("needed_terminate"))
4020 task = asyncio.ensure_future(asyncio.wait_for(
4021 self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor,
4022 vca_index, destroy_ee=True,
4023 exec_primitives=exec_terminate_primitives,
4024 scaling_in=True), timeout=self.timeout_charm_delete))
4025 # wait before next removal
4026 await asyncio.sleep(30)
4027 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
4028 del vca_update[vca_index]
4029 del config_update[vca_index]
4030 # wait for pending tasks of terminate primitives
4031 if tasks_dict_info:
4032 self.logger.debug(logging_text +
4033 'Waiting for tasks {}'.format(list(tasks_dict_info.keys())))
4034 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
4035 min(self.timeout_charm_delete,
4036 self.timeout_ns_terminate),
4037 stage, nslcmop_id)
4038 tasks_dict_info.clear()
4039 if error_list:
4040 raise LcmException("; ".join(error_list))
4041
4042 db_vca_and_config_update = {
4043 "_admin.deployed.VCA": vca_update,
4044 "configurationStatus": config_update
4045 }
4046 self.update_db_2("nsrs", db_nsr["_id"], db_vca_and_config_update)
4047 scale_process = None
4048 # SCALE-IN VCA - END
4049
4050 # SCALE RO - BEGIN
4051 if RO_scaling_info:
4052 scale_process = "RO"
4053 if self.ro_config.get("ng"):
4054 await self._scale_ng_ro(logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage)
4055 vdu_scaling_info.pop("vdu-create", None)
4056 vdu_scaling_info.pop("vdu-delete", None)
4057
4058 scale_process = None
4059 if db_nsr_update:
4060 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4061 # SCALE RO - END
4062
4063 # SCALE-UP VCA - BEGIN
4064 if VCA_scaling_info:
4065 step = db_nslcmop_update["detailed-status"] = \
4066 "Creating new execution environments"
4067 scale_process = "VCA"
4068 for vdu_info in VCA_scaling_info:
4069 if vdu_info["type"] == "create":
4070 member_vnf_index = str(vdu_info["member-vnf-index"])
4071 self.logger.debug(logging_text + "vdu info: {}".format(vdu_info))
4072 vnfd_id = db_vnfr["vnfd-ref"]
4073 vdu_index = int(vdu_info["vdu_index"])
4074 deploy_params = {"OSM": get_osm_params(db_vnfr)}
4075 if db_vnfr.get("additionalParamsForVnf"):
4076 deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy()))
4077 descriptor_config = get_configuration(db_vnfd, db_vnfd["id"])
4078 if descriptor_config:
4079 vdu_id = None
4080 vdu_name = None
4081 kdu_name = None
4082 self._deploy_n2vc(
4083 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
4084 db_nsr=db_nsr,
4085 db_vnfr=db_vnfr,
4086 nslcmop_id=nslcmop_id,
4087 nsr_id=nsr_id,
4088 nsi_id=nsi_id,
4089 vnfd_id=vnfd_id,
4090 vdu_id=vdu_id,
4091 kdu_name=kdu_name,
4092 member_vnf_index=member_vnf_index,
4093 vdu_index=vdu_index,
4094 vdu_name=vdu_name,
4095 deploy_params=deploy_params,
4096 descriptor_config=descriptor_config,
4097 base_folder=base_folder,
4098 task_instantiation_info=tasks_dict_info,
4099 stage=stage
4100 )
4101 vdu_id = vdu_info["osm_vdu_id"]
4102 vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id)
4103 descriptor_config = get_configuration(db_vnfd, vdu_id)
4104 if vdur.get("additionalParams"):
4105 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
4106 else:
4107 deploy_params_vdu = deploy_params
4108 deploy_params_vdu["OSM"] = get_osm_params(db_vnfr, vdu_id, vdu_count_index=vdu_index)
4109 if descriptor_config:
4110 vdu_name = None
4111 kdu_name = None
4112 stage[1] = "Scaling member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
4113 member_vnf_index, vdu_id, vdu_index)
4114 stage[2] = step = "Scaling out VCA"
4115 self._write_op_status(
4116 op_id=nslcmop_id,
4117 stage=stage
4118 )
4119 self._deploy_n2vc(
4120 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
4121 member_vnf_index, vdu_id, vdu_index),
4122 db_nsr=db_nsr,
4123 db_vnfr=db_vnfr,
4124 nslcmop_id=nslcmop_id,
4125 nsr_id=nsr_id,
4126 nsi_id=nsi_id,
4127 vnfd_id=vnfd_id,
4128 vdu_id=vdu_id,
4129 kdu_name=kdu_name,
4130 member_vnf_index=member_vnf_index,
4131 vdu_index=vdu_index,
4132 vdu_name=vdu_name,
4133 deploy_params=deploy_params_vdu,
4134 descriptor_config=descriptor_config,
4135 base_folder=base_folder,
4136 task_instantiation_info=tasks_dict_info,
4137 stage=stage
4138 )
4139 # TODO: scaling for kdu is not implemented yet.
4140 kdu_name = vdu_info["osm_vdu_id"]
4141 descriptor_config = get_configuration(db_vnfd, kdu_name)
4142 if descriptor_config:
4143 vdu_id = None
4144 vdu_index = vdu_index
4145 vdu_name = None
4146 kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name)
4147 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
4148 if kdur.get("additionalParams"):
4149 deploy_params_kdu = parse_yaml_strings(kdur["additionalParams"])
4150
4151 self._deploy_n2vc(
4152 logging_text=logging_text,
4153 db_nsr=db_nsr,
4154 db_vnfr=db_vnfr,
4155 nslcmop_id=nslcmop_id,
4156 nsr_id=nsr_id,
4157 nsi_id=nsi_id,
4158 vnfd_id=vnfd_id,
4159 vdu_id=vdu_id,
4160 kdu_name=kdu_name,
4161 member_vnf_index=member_vnf_index,
4162 vdu_index=vdu_index,
4163 vdu_name=vdu_name,
4164 deploy_params=deploy_params_kdu,
4165 descriptor_config=descriptor_config,
4166 base_folder=base_folder,
4167 task_instantiation_info=tasks_dict_info,
4168 stage=stage
4169 )
4170 # SCALE-UP VCA - END
4171 scale_process = None
4172
4173 # POST-SCALE BEGIN
4174 # execute primitive service POST-SCALING
4175 step = "Executing post-scale vnf-config-primitive"
4176 if scaling_descriptor.get("scaling-config-action"):
4177 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
4178 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
4179 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
4180 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4181 step = db_nslcmop_update["detailed-status"] = \
4182 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
4183
4184 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
4185 if db_vnfr.get("additionalParamsForVnf"):
4186 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
4187
4188 # look for primitive
4189 for config_primitive in (
4190 get_configuration(db_vnfd, db_vnfd["id"]) or {}
4191 ).get("config-primitive", ()):
4192 if config_primitive["name"] == vnf_config_primitive:
4193 break
4194 else:
4195 raise LcmException(
4196 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
4197 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
4198 "config-primitive".format(scaling_group, vnf_config_primitive))
4199 scale_process = "VCA"
4200 db_nsr_update["config-status"] = "configuring post-scaling"
4201 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
4202
4203 # Post-scale retry check: Check if this sub-operation has been executed before
4204 op_index = self._check_or_add_scale_suboperation(
4205 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
4206 if op_index == self.SUBOPERATION_STATUS_SKIP:
4207 # Skip sub-operation
4208 result = 'COMPLETED'
4209 result_detail = 'Done'
4210 self.logger.debug(logging_text +
4211 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
4212 format(vnf_config_primitive, result, result_detail))
4213 else:
4214 if op_index == self.SUBOPERATION_STATUS_NEW:
4215 # New sub-operation: Get index of this sub-operation
4216 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4217 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4218 format(vnf_config_primitive))
4219 else:
4220 # retry: Get registered params for this existing sub-operation
4221 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4222 vnf_index = op.get('member_vnf_index')
4223 vnf_config_primitive = op.get('primitive')
4224 primitive_params = op.get('primitive_params')
4225 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
4226 format(vnf_config_primitive))
4227 # Execute the primitive, either with new (first-time) or registered (reintent) args
4228 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4229 primitive_name = config_primitive.get("execution-environment-primitive",
4230 vnf_config_primitive)
4231 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4232 member_vnf_index=vnf_index,
4233 vdu_id=None,
4234 vdu_count_index=None,
4235 ee_descriptor_id=ee_descriptor_id)
4236 result, result_detail = await self._ns_execute_primitive(
4237 ee_id, primitive_name, primitive_params, vca_type=vca_type)
4238 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4239 vnf_config_primitive, result, result_detail))
4240 # Update operationState = COMPLETED | FAILED
4241 self._update_suboperation_status(
4242 db_nslcmop, op_index, result, result_detail)
4243
4244 if result == "FAILED":
4245 raise LcmException(result_detail)
4246 db_nsr_update["config-status"] = old_config_status
4247 scale_process = None
4248 # POST-SCALE END
4249
4250 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
4251 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
4252 else old_operational_status
4253 db_nsr_update["config-status"] = old_config_status
4254 return
4255 except (ROclient.ROClientException, DbException, LcmException, NgRoException) as e:
4256 self.logger.error(logging_text + "Exit Exception {}".format(e))
4257 exc = e
4258 except asyncio.CancelledError:
4259 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4260 exc = "Operation was cancelled"
4261 except Exception as e:
4262 exc = traceback.format_exc()
4263 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4264 finally:
4265 self._write_ns_status(nsr_id=nsr_id, ns_state=None, current_operation="IDLE", current_operation_id=None)
4266 if tasks_dict_info:
4267 stage[1] = "Waiting for instantiate pending tasks."
4268 self.logger.debug(logging_text + stage[1])
4269 exc = await self._wait_for_tasks(logging_text, tasks_dict_info, self.timeout_ns_deploy,
4270 stage, nslcmop_id, nsr_id=nsr_id)
4271 if exc:
4272 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4273 nslcmop_operation_state = "FAILED"
4274 if db_nsr:
4275 db_nsr_update["operational-status"] = old_operational_status
4276 db_nsr_update["config-status"] = old_config_status
4277 db_nsr_update["detailed-status"] = ""
4278 if scale_process:
4279 if "VCA" in scale_process:
4280 db_nsr_update["config-status"] = "failed"
4281 if "RO" in scale_process:
4282 db_nsr_update["operational-status"] = "failed"
4283 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
4284 exc)
4285 else:
4286 error_description_nslcmop = None
4287 nslcmop_operation_state = "COMPLETED"
4288 db_nslcmop_update["detailed-status"] = "Done"
4289
4290 self._write_op_status(op_id=nslcmop_id, stage="", error_message=error_description_nslcmop,
4291 operation_state=nslcmop_operation_state, other_update=db_nslcmop_update)
4292 if db_nsr:
4293 self._write_ns_status(nsr_id=nsr_id, ns_state=None, current_operation="IDLE",
4294 current_operation_id=None, other_update=db_nsr_update)
4295
4296 if nslcmop_operation_state:
4297 try:
4298 msg = {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id, "operationState": nslcmop_operation_state}
4299 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
4300 except Exception as e:
4301 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4302 self.logger.debug(logging_text + "Exit")
4303 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
4304
4305 async def _scale_ng_ro(self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage):
4306 nsr_id = db_nslcmop["nsInstanceId"]
4307 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4308 db_vnfrs = {}
4309
4310 # read from db: vnfd's for every vnf
4311 db_vnfds = []
4312
4313 # for each vnf in ns, read vnfd
4314 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
4315 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
4316 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
4317 # if we haven't this vnfd, read it from db
4318 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
4319 # read from db
4320 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4321 db_vnfds.append(vnfd)
4322 n2vc_key = self.n2vc.get_public_key()
4323 n2vc_key_list = [n2vc_key]
4324 self.scale_vnfr(db_vnfr, vdu_scaling_info.get("vdu-create"), vdu_scaling_info.get("vdu-delete"),
4325 mark_delete=True)
4326 # db_vnfr has been updated, update db_vnfrs to use it
4327 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
4328 await self._instantiate_ng_ro(logging_text, nsr_id, db_nsd, db_nsr, db_nslcmop, db_vnfrs,
4329 db_vnfds, n2vc_key_list, stage=stage, start_deploy=time(),
4330 timeout_ns_deploy=self.timeout_ns_deploy)
4331 if vdu_scaling_info.get("vdu-delete"):
4332 self.scale_vnfr(db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False)
4333
4334 async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip):
4335 if not self.prometheus:
4336 return
4337 # look if exist a file called 'prometheus*.j2' and
4338 artifact_content = self.fs.dir_ls(artifact_path)
4339 job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None)
4340 if not job_file:
4341 return
4342 with self.fs.file_open((artifact_path, job_file), "r") as f:
4343 job_data = f.read()
4344
4345 # TODO get_service
4346 _, _, service = ee_id.partition(".") # remove prefix "namespace."
4347 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
4348 host_port = "80"
4349 vnfr_id = vnfr_id.replace("-", "")
4350 variables = {
4351 "JOB_NAME": vnfr_id,
4352 "TARGET_IP": target_ip,
4353 "EXPORTER_POD_IP": host_name,
4354 "EXPORTER_POD_PORT": host_port,
4355 }
4356 job_list = self.prometheus.parse_job(job_data, variables)
4357 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
4358 for job in job_list:
4359 if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]:
4360 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
4361 job["nsr_id"] = nsr_id
4362 job_dict = {jl["job_name"]: jl for jl in job_list}
4363 if await self.prometheus.update(job_dict):
4364 return list(job_dict.keys())
4365
4366 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
4367 """
4368 Get VCA Cloud and VCA Cloud Credentials for the VIM account
4369
4370 :param: vim_account_id: VIM Account ID
4371
4372 :return: (cloud_name, cloud_credential)
4373 """
4374 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
4375 return config.get("vca_cloud"), config.get("vca_cloud_credential")
4376
4377 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
4378 """
4379 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
4380
4381 :param: vim_account_id: VIM Account ID
4382
4383 :return: (cloud_name, cloud_credential)
4384 """
4385 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
4386 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")