Feature 7184 New Generation RO enhancemnt
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError
26
27 from osm_lcm import ROclient
28 from osm_lcm.ng_ro import NgRoClient, NgRoException
29 from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
30 from n2vc.k8s_helm_conn import K8sHelmConnector
31 from n2vc.k8s_helm3_conn import K8sHelm3Connector
32 from n2vc.k8s_juju_conn import K8sJujuConnector
33
34 from osm_common.dbbase import DbException
35 from osm_common.fsbase import FsException
36
37 from n2vc.n2vc_juju_conn import N2VCJujuConnector
38 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
39
40 from osm_lcm.lcm_helm_conn import LCMHelmConn
41
42 from copy import copy, deepcopy
43 from http import HTTPStatus
44 from time import time
45 from uuid import uuid4
46
47 from random import randint
48
49 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
50
51
52 class NsLcm(LcmBase):
53 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
54 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
55 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
56 timeout_charm_delete = 10 * 60
57 timeout_primitive = 30 * 60 # timeout for primitive execution
58 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
59
60 SUBOPERATION_STATUS_NOT_FOUND = -1
61 SUBOPERATION_STATUS_NEW = -2
62 SUBOPERATION_STATUS_SKIP = -3
63 task_name_deploy_vca = "Deploying VCA"
64
65 def __init__(self, db, msg, fs, lcm_tasks, config, loop, prometheus=None):
66 """
67 Init, Connect to database, filesystem storage, and messaging
68 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
69 :return: None
70 """
71 super().__init__(
72 db=db,
73 msg=msg,
74 fs=fs,
75 logger=logging.getLogger('lcm.ns')
76 )
77
78 self.loop = loop
79 self.lcm_tasks = lcm_tasks
80 self.timeout = config["timeout"]
81 self.ro_config = config["ro_config"]
82 self.ng_ro = config["ro_config"].get("ng")
83 self.vca_config = config["VCA"].copy()
84
85 # create N2VC connector
86 self.n2vc = N2VCJujuConnector(
87 db=self.db,
88 fs=self.fs,
89 log=self.logger,
90 loop=self.loop,
91 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
92 username=self.vca_config.get('user', None),
93 vca_config=self.vca_config,
94 on_update_db=self._on_update_n2vc_db
95 )
96
97 self.conn_helm_ee = LCMHelmConn(
98 db=self.db,
99 fs=self.fs,
100 log=self.logger,
101 loop=self.loop,
102 url=None,
103 username=None,
104 vca_config=self.vca_config,
105 on_update_db=self._on_update_n2vc_db
106 )
107
108 self.k8sclusterhelm2 = K8sHelmConnector(
109 kubectl_command=self.vca_config.get("kubectlpath"),
110 helm_command=self.vca_config.get("helmpath"),
111 fs=self.fs,
112 log=self.logger,
113 db=self.db,
114 on_update_db=None,
115 )
116
117 self.k8sclusterhelm3 = K8sHelm3Connector(
118 kubectl_command=self.vca_config.get("kubectlpath"),
119 helm_command=self.vca_config.get("helm3path"),
120 fs=self.fs,
121 log=self.logger,
122 db=self.db,
123 on_update_db=None,
124 )
125
126 self.k8sclusterjuju = K8sJujuConnector(
127 kubectl_command=self.vca_config.get("kubectlpath"),
128 juju_command=self.vca_config.get("jujupath"),
129 fs=self.fs,
130 log=self.logger,
131 db=self.db,
132 loop=self.loop,
133 on_update_db=None,
134 vca_config=self.vca_config,
135 )
136
137 self.k8scluster_map = {
138 "helm-chart": self.k8sclusterhelm2,
139 "helm-chart-v3": self.k8sclusterhelm3,
140 "chart": self.k8sclusterhelm3,
141 "juju-bundle": self.k8sclusterjuju,
142 "juju": self.k8sclusterjuju,
143 }
144
145 self.vca_map = {
146 "lxc_proxy_charm": self.n2vc,
147 "native_charm": self.n2vc,
148 "k8s_proxy_charm": self.n2vc,
149 "helm": self.conn_helm_ee,
150 "helm-v3": self.conn_helm_ee
151 }
152
153 self.prometheus = prometheus
154
155 # create RO client
156 if self.ng_ro:
157 self.RO = NgRoClient(self.loop, **self.ro_config)
158 else:
159 self.RO = ROclient.ROClient(self.loop, **self.ro_config)
160
161 @staticmethod
162 def increment_ip_mac(ip_mac, vm_index=1):
163 if not isinstance(ip_mac, str):
164 return ip_mac
165 try:
166 # try with ipv4 look for last dot
167 i = ip_mac.rfind(".")
168 if i > 0:
169 i += 1
170 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
171 # try with ipv6 or mac look for last colon. Operate in hex
172 i = ip_mac.rfind(":")
173 if i > 0:
174 i += 1
175 # format in hex, len can be 2 for mac or 4 for ipv6
176 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(ip_mac[:i], int(ip_mac[i:], 16) + vm_index)
177 except Exception:
178 pass
179 return None
180
181 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
182
183 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
184
185 try:
186 # TODO filter RO descriptor fields...
187
188 # write to database
189 db_dict = dict()
190 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
191 db_dict['deploymentStatus'] = ro_descriptor
192 self.update_db_2("nsrs", nsrs_id, db_dict)
193
194 except Exception as e:
195 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
196
197 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
198
199 # remove last dot from path (if exists)
200 if path.endswith('.'):
201 path = path[:-1]
202
203 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
204 # .format(table, filter, path, updated_data))
205
206 try:
207
208 nsr_id = filter.get('_id')
209
210 # read ns record from database
211 nsr = self.db.get_one(table='nsrs', q_filter=filter)
212 current_ns_status = nsr.get('nsState')
213
214 # get vca status for NS
215 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
216
217 # vcaStatus
218 db_dict = dict()
219 db_dict['vcaStatus'] = status_dict
220
221 # update configurationStatus for this VCA
222 try:
223 vca_index = int(path[path.rfind(".")+1:])
224
225 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
226 vca_status = vca_list[vca_index].get('status')
227
228 configuration_status_list = nsr.get('configurationStatus')
229 config_status = configuration_status_list[vca_index].get('status')
230
231 if config_status == 'BROKEN' and vca_status != 'failed':
232 db_dict['configurationStatus'][vca_index] = 'READY'
233 elif config_status != 'BROKEN' and vca_status == 'failed':
234 db_dict['configurationStatus'][vca_index] = 'BROKEN'
235 except Exception as e:
236 # not update configurationStatus
237 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
238
239 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
240 # if nsState = 'DEGRADED' check if all is OK
241 is_degraded = False
242 if current_ns_status in ('READY', 'DEGRADED'):
243 error_description = ''
244 # check machines
245 if status_dict.get('machines'):
246 for machine_id in status_dict.get('machines'):
247 machine = status_dict.get('machines').get(machine_id)
248 # check machine agent-status
249 if machine.get('agent-status'):
250 s = machine.get('agent-status').get('status')
251 if s != 'started':
252 is_degraded = True
253 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
254 # check machine instance status
255 if machine.get('instance-status'):
256 s = machine.get('instance-status').get('status')
257 if s != 'running':
258 is_degraded = True
259 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
260 # check applications
261 if status_dict.get('applications'):
262 for app_id in status_dict.get('applications'):
263 app = status_dict.get('applications').get(app_id)
264 # check application status
265 if app.get('status'):
266 s = app.get('status').get('status')
267 if s != 'active':
268 is_degraded = True
269 error_description += 'application {} status={} ; '.format(app_id, s)
270
271 if error_description:
272 db_dict['errorDescription'] = error_description
273 if current_ns_status == 'READY' and is_degraded:
274 db_dict['nsState'] = 'DEGRADED'
275 if current_ns_status == 'DEGRADED' and not is_degraded:
276 db_dict['nsState'] = 'READY'
277
278 # write to database
279 self.update_db_2("nsrs", nsr_id, db_dict)
280
281 except (asyncio.CancelledError, asyncio.TimeoutError):
282 raise
283 except Exception as e:
284 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
285
286 @staticmethod
287 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
288 try:
289 env = Environment(undefined=StrictUndefined)
290 template = env.from_string(cloud_init_text)
291 return template.render(additional_params or {})
292 except UndefinedError as e:
293 raise LcmException("Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
294 "file, must be provided in the instantiation parameters inside the "
295 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id))
296 except (TemplateError, TemplateNotFound) as e:
297 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
298 format(vnfd_id, vdu_id, e))
299
300 def _get_cloud_init(self, vdu, vnfd):
301 try:
302 cloud_init_content = cloud_init_file = None
303 if vdu.get("cloud-init-file"):
304 base_folder = vnfd["_admin"]["storage"]
305 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
306 vdu["cloud-init-file"])
307 with self.fs.file_open(cloud_init_file, "r") as ci_file:
308 cloud_init_content = ci_file.read()
309 elif vdu.get("cloud-init"):
310 cloud_init_content = vdu["cloud-init"]
311
312 return cloud_init_content
313 except FsException as e:
314 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
315 format(vnfd["id"], vdu["id"], cloud_init_file, e))
316
317 def _get_osm_params(self, db_vnfr, vdu_id=None, vdu_count_index=0):
318 osm_params = {x.replace("-", "_"): db_vnfr[x] for x in ("ip-address", "vim-account-id", "vnfd-id", "vnfd-ref")
319 if db_vnfr.get(x) is not None}
320 osm_params["ns_id"] = db_vnfr["nsr-id-ref"]
321 osm_params["vnf_id"] = db_vnfr["_id"]
322 osm_params["member_vnf_index"] = db_vnfr["member-vnf-index-ref"]
323 if db_vnfr.get("vdur"):
324 osm_params["vdu"] = {}
325 for vdur in db_vnfr["vdur"]:
326 vdu = {
327 "count_index": vdur["count-index"],
328 "vdu_id": vdur["vdu-id-ref"],
329 "interfaces": {}
330 }
331 if vdur.get("ip-address"):
332 vdu["ip_address"] = vdur["ip-address"]
333 for iface in vdur["interfaces"]:
334 vdu["interfaces"][iface["name"]] = \
335 {x.replace("-", "_"): iface[x] for x in ("mac-address", "ip-address", "vnf-vld-id", "name")
336 if iface.get(x) is not None}
337 vdu_id_index = "{}-{}".format(vdur["vdu-id-ref"], vdur["count-index"])
338 osm_params["vdu"][vdu_id_index] = vdu
339 if vdu_id:
340 osm_params["vdu_id"] = vdu_id
341 osm_params["count_index"] = vdu_count_index
342 return osm_params
343
344 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
345 vdur = next(vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"])
346 additional_params = vdur.get("additionalParams")
347 return self._format_additional_params(additional_params)
348
349 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
350 """
351 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
352 :param vnfd: input vnfd
353 :param new_id: overrides vnf id if provided
354 :param additionalParams: Instantiation params for VNFs provided
355 :param nsrId: Id of the NSR
356 :return: copy of vnfd
357 """
358 vnfd_RO = deepcopy(vnfd)
359 # remove unused by RO configuration, monitoring, scaling and internal keys
360 vnfd_RO.pop("_id", None)
361 vnfd_RO.pop("_admin", None)
362 vnfd_RO.pop("vnf-configuration", None)
363 vnfd_RO.pop("monitoring-param", None)
364 vnfd_RO.pop("scaling-group-descriptor", None)
365 vnfd_RO.pop("kdu", None)
366 vnfd_RO.pop("k8s-cluster", None)
367 if new_id:
368 vnfd_RO["id"] = new_id
369
370 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
371 for vdu in get_iterable(vnfd_RO, "vdu"):
372 vdu.pop("cloud-init-file", None)
373 vdu.pop("cloud-init", None)
374 return vnfd_RO
375
376 @staticmethod
377 def ip_profile_2_RO(ip_profile):
378 RO_ip_profile = deepcopy(ip_profile)
379 if "dns-server" in RO_ip_profile:
380 if isinstance(RO_ip_profile["dns-server"], list):
381 RO_ip_profile["dns-address"] = []
382 for ds in RO_ip_profile.pop("dns-server"):
383 RO_ip_profile["dns-address"].append(ds['address'])
384 else:
385 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
386 if RO_ip_profile.get("ip-version") == "ipv4":
387 RO_ip_profile["ip-version"] = "IPv4"
388 if RO_ip_profile.get("ip-version") == "ipv6":
389 RO_ip_profile["ip-version"] = "IPv6"
390 if "dhcp-params" in RO_ip_profile:
391 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
392 return RO_ip_profile
393
394 def _ns_params_2_RO(self, ns_params, nsd, vnfd_dict, db_vnfrs, n2vc_key_list):
395 """
396 Creates a RO ns descriptor from OSM ns_instantiate params
397 :param ns_params: OSM instantiate params
398 :param vnfd_dict: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
399 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index. {member-vnf-index: {vnfr_object}, ...}
400 :return: The RO ns descriptor
401 """
402 vim_2_RO = {}
403 wim_2_RO = {}
404 # TODO feature 1417: Check that no instantiation is set over PDU
405 # check if PDU forces a concrete vim-network-id and add it
406 # check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
407
408 def vim_account_2_RO(vim_account):
409 if vim_account in vim_2_RO:
410 return vim_2_RO[vim_account]
411
412 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
413 if db_vim["_admin"]["operationalState"] != "ENABLED":
414 raise LcmException("VIM={} is not available. operationalState={}".format(
415 vim_account, db_vim["_admin"]["operationalState"]))
416 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
417 vim_2_RO[vim_account] = RO_vim_id
418 return RO_vim_id
419
420 def wim_account_2_RO(wim_account):
421 if isinstance(wim_account, str):
422 if wim_account in wim_2_RO:
423 return wim_2_RO[wim_account]
424
425 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
426 if db_wim["_admin"]["operationalState"] != "ENABLED":
427 raise LcmException("WIM={} is not available. operationalState={}".format(
428 wim_account, db_wim["_admin"]["operationalState"]))
429 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
430 wim_2_RO[wim_account] = RO_wim_id
431 return RO_wim_id
432 else:
433 return wim_account
434
435 if not ns_params:
436 return None
437 RO_ns_params = {
438 # "name": ns_params["nsName"],
439 # "description": ns_params.get("nsDescription"),
440 "datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
441 "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
442 # "scenario": ns_params["nsdId"],
443 }
444 # set vim_account of each vnf if different from general vim_account.
445 # Get this information from <vnfr> database content, key vim-account-id
446 # Vim account can be set by placement_engine and it may be different from
447 # the instantiate parameters (vnfs.member-vnf-index.datacenter).
448 for vnf_index, vnfr in db_vnfrs.items():
449 if vnfr.get("vim-account-id") and vnfr["vim-account-id"] != ns_params["vimAccountId"]:
450 populate_dict(RO_ns_params, ("vnfs", vnf_index, "datacenter"), vim_account_2_RO(vnfr["vim-account-id"]))
451
452 n2vc_key_list = n2vc_key_list or []
453 for vnfd_ref, vnfd in vnfd_dict.items():
454 vdu_needed_access = []
455 mgmt_cp = None
456 if vnfd.get("vnf-configuration"):
457 ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
458 if ssh_required and vnfd.get("mgmt-interface"):
459 if vnfd["mgmt-interface"].get("vdu-id"):
460 vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
461 elif vnfd["mgmt-interface"].get("cp"):
462 mgmt_cp = vnfd["mgmt-interface"]["cp"]
463
464 for vdu in vnfd.get("vdu", ()):
465 if vdu.get("vdu-configuration"):
466 ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
467 if ssh_required:
468 vdu_needed_access.append(vdu["id"])
469 elif mgmt_cp:
470 for vdu_interface in vdu.get("interface"):
471 if vdu_interface.get("external-connection-point-ref") and \
472 vdu_interface["external-connection-point-ref"] == mgmt_cp:
473 vdu_needed_access.append(vdu["id"])
474 mgmt_cp = None
475 break
476
477 if vdu_needed_access:
478 for vnf_member in nsd.get("constituent-vnfd"):
479 if vnf_member["vnfd-id-ref"] != vnfd_ref:
480 continue
481 for vdu in vdu_needed_access:
482 populate_dict(RO_ns_params,
483 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
484 n2vc_key_list)
485 # cloud init
486 for vdu in get_iterable(vnfd, "vdu"):
487 cloud_init_text = self._get_cloud_init(vdu, vnfd)
488 if not cloud_init_text:
489 continue
490 for vnf_member in nsd.get("constituent-vnfd"):
491 if vnf_member["vnfd-id-ref"] != vnfd_ref:
492 continue
493 db_vnfr = db_vnfrs[vnf_member["member-vnf-index"]]
494 additional_params = self._get_vdu_additional_params(db_vnfr, vdu["id"]) or {}
495
496 cloud_init_list = []
497 for vdu_index in range(0, int(vdu.get("count", 1))):
498 additional_params["OSM"] = self._get_osm_params(db_vnfr, vdu["id"], vdu_index)
499 cloud_init_list.append(self._parse_cloud_init(cloud_init_text, additional_params, vnfd["id"],
500 vdu["id"]))
501 populate_dict(RO_ns_params,
502 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu["id"], "cloud_init"),
503 cloud_init_list)
504
505 if ns_params.get("vduImage"):
506 RO_ns_params["vduImage"] = ns_params["vduImage"]
507
508 if ns_params.get("ssh_keys"):
509 RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh_keys"]}
510 for vnf_params in get_iterable(ns_params, "vnf"):
511 for constituent_vnfd in nsd["constituent-vnfd"]:
512 if constituent_vnfd["member-vnf-index"] == vnf_params["member-vnf-index"]:
513 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
514 break
515 else:
516 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index={} is not present at nsd:"
517 "constituent-vnfd".format(vnf_params["member-vnf-index"]))
518
519 for vdu_params in get_iterable(vnf_params, "vdu"):
520 # TODO feature 1417: check that this VDU exist and it is not a PDU
521 if vdu_params.get("volume"):
522 for volume_params in vdu_params["volume"]:
523 if volume_params.get("vim-volume-id"):
524 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
525 vdu_params["id"], "devices", volume_params["name"], "vim_id"),
526 volume_params["vim-volume-id"])
527 if vdu_params.get("interface"):
528 for interface_params in vdu_params["interface"]:
529 if interface_params.get("ip-address"):
530 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
531 vdu_params["id"], "interfaces", interface_params["name"],
532 "ip_address"),
533 interface_params["ip-address"])
534 if interface_params.get("mac-address"):
535 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
536 vdu_params["id"], "interfaces", interface_params["name"],
537 "mac_address"),
538 interface_params["mac-address"])
539 if interface_params.get("floating-ip-required"):
540 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
541 vdu_params["id"], "interfaces", interface_params["name"],
542 "floating-ip"),
543 interface_params["floating-ip-required"])
544
545 for internal_vld_params in get_iterable(vnf_params, "internal-vld"):
546 if internal_vld_params.get("vim-network-name"):
547 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
548 internal_vld_params["name"], "vim-network-name"),
549 internal_vld_params["vim-network-name"])
550 if internal_vld_params.get("vim-network-id"):
551 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
552 internal_vld_params["name"], "vim-network-id"),
553 internal_vld_params["vim-network-id"])
554 if internal_vld_params.get("ip-profile"):
555 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
556 internal_vld_params["name"], "ip-profile"),
557 self.ip_profile_2_RO(internal_vld_params["ip-profile"]))
558 if internal_vld_params.get("provider-network"):
559
560 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
561 internal_vld_params["name"], "provider-network"),
562 internal_vld_params["provider-network"].copy())
563
564 for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
565 # look for interface
566 iface_found = False
567 for vdu_descriptor in vnf_descriptor["vdu"]:
568 for vdu_interface in vdu_descriptor["interface"]:
569 if vdu_interface.get("internal-connection-point-ref") == icp_params["id-ref"]:
570 if icp_params.get("ip-address"):
571 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
572 vdu_descriptor["id"], "interfaces",
573 vdu_interface["name"], "ip_address"),
574 icp_params["ip-address"])
575
576 if icp_params.get("mac-address"):
577 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
578 vdu_descriptor["id"], "interfaces",
579 vdu_interface["name"], "mac_address"),
580 icp_params["mac-address"])
581 iface_found = True
582 break
583 if iface_found:
584 break
585 else:
586 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index[{}]:"
587 "internal-vld:id-ref={} is not present at vnfd:internal-"
588 "connection-point".format(vnf_params["member-vnf-index"],
589 icp_params["id-ref"]))
590
591 for vld_params in get_iterable(ns_params, "vld"):
592 if "ip-profile" in vld_params:
593 populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
594 self.ip_profile_2_RO(vld_params["ip-profile"]))
595
596 if vld_params.get("provider-network"):
597
598 populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
599 vld_params["provider-network"].copy())
600
601 if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
602 populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
603 wim_account_2_RO(vld_params["wimAccountId"])),
604 if vld_params.get("vim-network-name"):
605 RO_vld_sites = []
606 if isinstance(vld_params["vim-network-name"], dict):
607 for vim_account, vim_net in vld_params["vim-network-name"].items():
608 RO_vld_sites.append({
609 "netmap-use": vim_net,
610 "datacenter": vim_account_2_RO(vim_account)
611 })
612 else: # isinstance str
613 RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
614 if RO_vld_sites:
615 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
616
617 if vld_params.get("vim-network-id"):
618 RO_vld_sites = []
619 if isinstance(vld_params["vim-network-id"], dict):
620 for vim_account, vim_net in vld_params["vim-network-id"].items():
621 RO_vld_sites.append({
622 "netmap-use": vim_net,
623 "datacenter": vim_account_2_RO(vim_account)
624 })
625 else: # isinstance str
626 RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
627 if RO_vld_sites:
628 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
629 if vld_params.get("ns-net"):
630 if isinstance(vld_params["ns-net"], dict):
631 for vld_id, instance_scenario_id in vld_params["ns-net"].items():
632 RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
633 populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
634 if "vnfd-connection-point-ref" in vld_params:
635 for cp_params in vld_params["vnfd-connection-point-ref"]:
636 # look for interface
637 for constituent_vnfd in nsd["constituent-vnfd"]:
638 if constituent_vnfd["member-vnf-index"] == cp_params["member-vnf-index-ref"]:
639 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
640 break
641 else:
642 raise LcmException(
643 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={} "
644 "is not present at nsd:constituent-vnfd".format(cp_params["member-vnf-index-ref"]))
645 match_cp = False
646 for vdu_descriptor in vnf_descriptor["vdu"]:
647 for interface_descriptor in vdu_descriptor["interface"]:
648 if interface_descriptor.get("external-connection-point-ref") == \
649 cp_params["vnfd-connection-point-ref"]:
650 match_cp = True
651 break
652 if match_cp:
653 break
654 else:
655 raise LcmException(
656 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={}:"
657 "vnfd-connection-point-ref={} is not present at vnfd={}".format(
658 cp_params["member-vnf-index-ref"],
659 cp_params["vnfd-connection-point-ref"],
660 vnf_descriptor["id"]))
661 if cp_params.get("ip-address"):
662 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
663 vdu_descriptor["id"], "interfaces",
664 interface_descriptor["name"], "ip_address"),
665 cp_params["ip-address"])
666 if cp_params.get("mac-address"):
667 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
668 vdu_descriptor["id"], "interfaces",
669 interface_descriptor["name"], "mac_address"),
670 cp_params["mac-address"])
671 return RO_ns_params
672
673 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
674
675 db_vdu_push_list = []
676 db_update = {"_admin.modified": time()}
677 if vdu_create:
678 for vdu_id, vdu_count in vdu_create.items():
679 vdur = next((vdur for vdur in reversed(db_vnfr["vdur"]) if vdur["vdu-id-ref"] == vdu_id), None)
680 if not vdur:
681 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".
682 format(vdu_id))
683
684 for count in range(vdu_count):
685 vdur_copy = deepcopy(vdur)
686 vdur_copy["status"] = "BUILD"
687 vdur_copy["status-detailed"] = None
688 vdur_copy["ip-address"]: None
689 vdur_copy["_id"] = str(uuid4())
690 vdur_copy["count-index"] += count + 1
691 vdur_copy["id"] = "{}-{}".format(vdur_copy["vdu-id-ref"], vdur_copy["count-index"])
692 vdur_copy.pop("vim_info", None)
693 for iface in vdur_copy["interfaces"]:
694 if iface.get("fixed-ip"):
695 iface["ip-address"] = self.increment_ip_mac(iface["ip-address"], count+1)
696 else:
697 iface.pop("ip-address", None)
698 if iface.get("fixed-mac"):
699 iface["mac-address"] = self.increment_ip_mac(iface["mac-address"], count+1)
700 else:
701 iface.pop("mac-address", None)
702 iface.pop("mgmt_vnf", None) # only first vdu can be managment of vnf
703 db_vdu_push_list.append(vdur_copy)
704 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
705 if vdu_delete:
706 for vdu_id, vdu_count in vdu_delete.items():
707 if mark_delete:
708 indexes_to_delete = [iv[0] for iv in enumerate(db_vnfr["vdur"]) if iv[1]["vdu-id-ref"] == vdu_id]
709 db_update.update({"vdur.{}.status".format(i): "DELETING" for i in indexes_to_delete[-vdu_count:]})
710 else:
711 # it must be deleted one by one because common.db does not allow otherwise
712 vdus_to_delete = [v for v in reversed(db_vnfr["vdur"]) if v["vdu-id-ref"] == vdu_id]
713 for vdu in vdus_to_delete[:vdu_count]:
714 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, None, pull={"vdur": {"_id": vdu["_id"]}})
715 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
716 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
717 # modify passed dictionary db_vnfr
718 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
719 db_vnfr["vdur"] = db_vnfr_["vdur"]
720
721 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
722 """
723 Updates database nsr with the RO info for the created vld
724 :param ns_update_nsr: dictionary to be filled with the updated info
725 :param db_nsr: content of db_nsr. This is also modified
726 :param nsr_desc_RO: nsr descriptor from RO
727 :return: Nothing, LcmException is raised on errors
728 """
729
730 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
731 for net_RO in get_iterable(nsr_desc_RO, "nets"):
732 if vld["id"] != net_RO.get("ns_net_osm_id"):
733 continue
734 vld["vim-id"] = net_RO.get("vim_net_id")
735 vld["name"] = net_RO.get("vim_name")
736 vld["status"] = net_RO.get("status")
737 vld["status-detailed"] = net_RO.get("error_msg")
738 ns_update_nsr["vld.{}".format(vld_index)] = vld
739 break
740 else:
741 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
742
743 def set_vnfr_at_error(self, db_vnfrs, error_text):
744 try:
745 for db_vnfr in db_vnfrs.values():
746 vnfr_update = {"status": "ERROR"}
747 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
748 if "status" not in vdur:
749 vdur["status"] = "ERROR"
750 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
751 if error_text:
752 vdur["status-detailed"] = str(error_text)
753 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
754 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
755 except DbException as e:
756 self.logger.error("Cannot update vnf. {}".format(e))
757
758 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
759 """
760 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
761 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
762 :param nsr_desc_RO: nsr descriptor from RO
763 :return: Nothing, LcmException is raised on errors
764 """
765 for vnf_index, db_vnfr in db_vnfrs.items():
766 for vnf_RO in nsr_desc_RO["vnfs"]:
767 if vnf_RO["member_vnf_index"] != vnf_index:
768 continue
769 vnfr_update = {}
770 if vnf_RO.get("ip_address"):
771 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
772 elif not db_vnfr.get("ip-address"):
773 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
774 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
775
776 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
777 vdur_RO_count_index = 0
778 if vdur.get("pdu-type"):
779 continue
780 for vdur_RO in get_iterable(vnf_RO, "vms"):
781 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
782 continue
783 if vdur["count-index"] != vdur_RO_count_index:
784 vdur_RO_count_index += 1
785 continue
786 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
787 if vdur_RO.get("ip_address"):
788 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
789 else:
790 vdur["ip-address"] = None
791 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
792 vdur["name"] = vdur_RO.get("vim_name")
793 vdur["status"] = vdur_RO.get("status")
794 vdur["status-detailed"] = vdur_RO.get("error_msg")
795 for ifacer in get_iterable(vdur, "interfaces"):
796 for interface_RO in get_iterable(vdur_RO, "interfaces"):
797 if ifacer["name"] == interface_RO.get("internal_name"):
798 ifacer["ip-address"] = interface_RO.get("ip_address")
799 ifacer["mac-address"] = interface_RO.get("mac_address")
800 break
801 else:
802 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
803 "from VIM info"
804 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
805 vnfr_update["vdur.{}".format(vdu_index)] = vdur
806 break
807 else:
808 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
809 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
810
811 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
812 for net_RO in get_iterable(nsr_desc_RO, "nets"):
813 if vld["id"] != net_RO.get("vnf_net_osm_id"):
814 continue
815 vld["vim-id"] = net_RO.get("vim_net_id")
816 vld["name"] = net_RO.get("vim_name")
817 vld["status"] = net_RO.get("status")
818 vld["status-detailed"] = net_RO.get("error_msg")
819 vnfr_update["vld.{}".format(vld_index)] = vld
820 break
821 else:
822 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
823 vnf_index, vld["id"]))
824
825 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
826 break
827
828 else:
829 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
830
831 def _get_ns_config_info(self, nsr_id):
832 """
833 Generates a mapping between vnf,vdu elements and the N2VC id
834 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
835 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
836 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
837 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
838 """
839 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
840 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
841 mapping = {}
842 ns_config_info = {"osm-config-mapping": mapping}
843 for vca in vca_deployed_list:
844 if not vca["member-vnf-index"]:
845 continue
846 if not vca["vdu_id"]:
847 mapping[vca["member-vnf-index"]] = vca["application"]
848 else:
849 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
850 vca["application"]
851 return ns_config_info
852
853 @staticmethod
854 def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed, ee_descriptor_id):
855 """
856 Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
857 primitives as verify-ssh-credentials, or config when needed
858 :param desc_primitive_list: information of the descriptor
859 :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
860 this element contains a ssh public key
861 :param ee_descriptor_id: execution environment descriptor id. It is the value of
862 XXX_configuration.execution-environment-list.INDEX.id; it can be None
863 :return: The modified list. Can ba an empty list, but always a list
864 """
865
866 primitive_list = desc_primitive_list or []
867
868 # filter primitives by ee_id
869 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
870
871 # sort by 'seq'
872 if primitive_list:
873 primitive_list.sort(key=lambda val: int(val['seq']))
874
875 # look for primitive config, and get the position. None if not present
876 config_position = None
877 for index, primitive in enumerate(primitive_list):
878 if primitive["name"] == "config":
879 config_position = index
880 break
881
882 # for NS, add always a config primitive if not present (bug 874)
883 if not vca_deployed["member-vnf-index"] and config_position is None:
884 primitive_list.insert(0, {"name": "config", "parameter": []})
885 config_position = 0
886 # TODO revise if needed: for VNF/VDU add verify-ssh-credentials after config
887 if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
888 primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
889 return primitive_list
890
891 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
892 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
893
894 db_vims = {}
895
896 def get_vim_account(vim_account_id):
897 nonlocal db_vims
898 if vim_account_id in db_vims:
899 return db_vims[vim_account_id]
900 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
901 db_vims[vim_account_id] = db_vim
902 return db_vim
903
904 # modify target_vld info with instantiation parameters
905 def parse_vld_instantiation_params(target_vim, target_vld, vld_params, target_sdn):
906 if vld_params.get("ip-profile"):
907 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params["ip-profile"]
908 if vld_params.get("provider-network"):
909 target_vld["vim_info"][target_vim]["provider_network"] = vld_params["provider-network"]
910 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
911 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params["provider-network"]["sdn-ports"]
912 if vld_params.get("wimAccountId"):
913 target_wim = "wim:{}".format(vld_params["wimAccountId"])
914 target_vld["vim_info"][target_wim] = {}
915 for param in ("vim-network-name", "vim-network-id"):
916 if vld_params.get(param):
917 if isinstance(vld_params[param], dict):
918 pass
919 # for vim_account, vim_net in vld_params[param].items():
920 # TODO populate vim_info RO_vld_sites.append({
921 else: # isinstance str
922 target_vld["vim_info"][target_vim][param.replace("-", "_")] = vld_params[param]
923 # TODO if vld_params.get("ns-net"):
924
925 nslcmop_id = db_nslcmop["_id"]
926 target = {
927 "name": db_nsr["name"],
928 "ns": {"vld": []},
929 "vnf": [],
930 "image": deepcopy(db_nsr["image"]),
931 "flavor": deepcopy(db_nsr["flavor"]),
932 "action_id": nslcmop_id,
933 "cloud_init_content": {},
934 }
935 for image in target["image"]:
936 image["vim_info"] = {}
937 for flavor in target["flavor"]:
938 flavor["vim_info"] = {}
939
940 if db_nslcmop.get("lcmOperationType") != "instantiate":
941 # get parameters of instantiation:
942 db_nslcmop_instantiate = self.db.get_list("nslcmops", {"nsInstanceId": db_nslcmop["nsInstanceId"],
943 "lcmOperationType": "instantiate"})[-1]
944 ns_params = db_nslcmop_instantiate.get("operationParams")
945 else:
946 ns_params = db_nslcmop.get("operationParams")
947 ssh_keys = []
948 if ns_params.get("ssh_keys"):
949 ssh_keys += ns_params.get("ssh_keys")
950 if n2vc_key_list:
951 ssh_keys += n2vc_key_list
952
953 cp2target = {}
954 for vld_index, vld in enumerate(db_nsr.get("vld")):
955 target_vim = "vim:{}".format(ns_params["vimAccountId"])
956 target_vld = {
957 "id": vld["id"],
958 "name": vld["name"],
959 "mgmt-network": vld.get("mgmt-network", False),
960 "type": vld.get("type"),
961 "vim_info": {
962 target_vim: {"vim-network-name": vld.get("vim-network-name")}
963 }
964 }
965 # check if this network needs SDN assist
966 target_sdn = None
967 if vld.get("pci-interfaces"):
968 db_vim = get_vim_account(ns_params["vimAccountId"])
969 sdnc_id = db_vim["config"].get("sdn-controller")
970 if sdnc_id:
971 sdn_vld = "nsrs:{}:vld.{}".format(nsr_id, vld["id"])
972 target_sdn = "sdn:{}".format(sdnc_id)
973 target_vld["vim_info"][target_sdn] = {
974 "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")}
975
976 nsd_vld = next(v for v in nsd["vld"] if v["id"] == vld["id"])
977 for cp in nsd_vld["vnfd-connection-point-ref"]:
978 cp2target["member_vnf:{}.{}".format(cp["member-vnf-index-ref"], cp["vnfd-connection-point-ref"])] = \
979 "nsrs:{}:vld.{}".format(nsr_id, vld_index)
980
981 # check at nsd descriptor, if there is an ip-profile
982 vld_params = {}
983 if nsd_vld.get("ip-profile-ref"):
984 ip_profile = next(ipp for ipp in nsd["ip-profiles"] if ipp["name"] == nsd_vld["ip-profile-ref"])
985 vld_params["ip-profile"] = ip_profile["ip-profile-params"]
986 # update vld_params with instantiation params
987 vld_instantiation_params = next((v for v in get_iterable(ns_params, "vld")
988 if v["name"] in (vld["name"], vld["id"])), None)
989 if vld_instantiation_params:
990 vld_params.update(vld_instantiation_params)
991 parse_vld_instantiation_params(target_vim, target_vld, vld_params, target_sdn)
992 target["ns"]["vld"].append(target_vld)
993 for vnfr in db_vnfrs.values():
994 vnfd = db_vnfds_ref[vnfr["vnfd-ref"]]
995 vnf_params = next((v for v in get_iterable(ns_params, "vnf")
996 if v["member-vnf-index"] == vnfr["member-vnf-index-ref"]), None)
997 target_vnf = deepcopy(vnfr)
998 target_vim = "vim:{}".format(vnfr["vim-account-id"])
999 for vld in target_vnf.get("vld", ()):
1000 # check if connected to a ns.vld, to fill target'
1001 vnf_cp = next((cp for cp in vnfd.get("connection-point", ()) if
1002 cp.get("internal-vld-ref") == vld["id"]), None)
1003 if vnf_cp:
1004 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
1005 if cp2target.get(ns_cp):
1006 vld["target"] = cp2target[ns_cp]
1007 vld["vim_info"] = {target_vim: {"vim-network-name": vld.get("vim-network-name")}}
1008 # check if this network needs SDN assist
1009 target_sdn = None
1010 if vld.get("pci-interfaces"):
1011 db_vim = get_vim_account(vnfr["vim-account-id"])
1012 sdnc_id = db_vim["config"].get("sdn-controller")
1013 if sdnc_id:
1014 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
1015 target_sdn = "sdn:{}".format(sdnc_id)
1016 vld["vim_info"][target_sdn] = {
1017 "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")}
1018
1019 # check at vnfd descriptor, if there is an ip-profile
1020 vld_params = {}
1021 vnfd_vld = next(v for v in vnfd["internal-vld"] if v["id"] == vld["id"])
1022 if vnfd_vld.get("ip-profile-ref"):
1023 ip_profile = next(ipp for ipp in vnfd["ip-profiles"] if ipp["name"] == vnfd_vld["ip-profile-ref"])
1024 vld_params["ip-profile"] = ip_profile["ip-profile-params"]
1025 # update vld_params with instantiation params
1026 if vnf_params:
1027 vld_instantiation_params = next((v for v in get_iterable(vnf_params, "internal-vld")
1028 if v["name"] == vld["id"]), None)
1029 if vld_instantiation_params:
1030 vld_params.update(vld_instantiation_params)
1031 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
1032
1033 vdur_list = []
1034 for vdur in target_vnf.get("vdur", ()):
1035 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
1036 continue # This vdu must not be created
1037 vdur["vim_info"] = {target_vim: {}}
1038 vdud_index, vdud = next(k for k in enumerate(vnfd["vdu"]) if k[1]["id"] == vdur["vdu-id-ref"])
1039
1040 if ssh_keys:
1041 if deep_get(vdud, ("vdu-configuration", "config-access", "ssh-access", "required")):
1042 vdur["ssh-keys"] = ssh_keys
1043 vdur["ssh-access-required"] = True
1044 elif deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required")) and \
1045 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
1046 vdur["ssh-keys"] = ssh_keys
1047 vdur["ssh-access-required"] = True
1048
1049 # cloud-init
1050 if vdud.get("cloud-init-file"):
1051 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
1052 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
1053 if vdur["cloud-init"] not in target["cloud_init_content"]:
1054 base_folder = vnfd["_admin"]["storage"]
1055 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
1056 vdud.get("cloud-init-file"))
1057 with self.fs.file_open(cloud_init_file, "r") as ci_file:
1058 target["cloud_init_content"][vdur["cloud-init"]] = ci_file.read()
1059 elif vdud.get("cloud-init"):
1060 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], vdud_index)
1061 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
1062 target["cloud_init_content"][vdur["cloud-init"]] = vdud["cloud-init"]
1063 vdur["additionalParams"] = vdur.get("additionalParams") or {}
1064 deploy_params_vdu = self._format_additional_params(vdur.get("additionalParams") or {})
1065 deploy_params_vdu["OSM"] = self._get_osm_params(vnfr, vdur["vdu-id-ref"], vdur["count-index"])
1066 vdur["additionalParams"] = deploy_params_vdu
1067
1068 # flavor
1069 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
1070 if target_vim not in ns_flavor["vim_info"]:
1071 ns_flavor["vim_info"][target_vim] = {}
1072 # image
1073 ns_image = target["image"][int(vdur["ns-image-id"])]
1074 if target_vim not in ns_image["vim_info"]:
1075 ns_image["vim_info"][target_vim] = {}
1076
1077 vdur["vim_info"] = {target_vim: {}}
1078 # instantiation parameters
1079 # if vnf_params:
1080 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
1081 # vdud["id"]), None)
1082 vdur_list.append(vdur)
1083 target_vnf["vdur"] = vdur_list
1084 target["vnf"].append(target_vnf)
1085
1086 desc = await self.RO.deploy(nsr_id, target)
1087 action_id = desc["action_id"]
1088 await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
1089
1090 # Updating NSR
1091 db_nsr_update = {
1092 "_admin.deployed.RO.operational-status": "running",
1093 "detailed-status": " ".join(stage)
1094 }
1095 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1096 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1097 self._write_op_status(nslcmop_id, stage)
1098 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
1099 return
1100
1101 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id=None, start_time=None, timeout=600, stage=None):
1102 detailed_status_old = None
1103 db_nsr_update = {}
1104 start_time = start_time or time()
1105 while time() <= start_time + timeout:
1106 desc_status = await self.RO.status(nsr_id, action_id)
1107 if desc_status["status"] == "FAILED":
1108 raise NgRoException(desc_status["details"])
1109 elif desc_status["status"] == "BUILD":
1110 if stage:
1111 stage[2] = "VIM: ({})".format(desc_status["details"])
1112 elif desc_status["status"] == "DONE":
1113 if stage:
1114 stage[2] = "Deployed at VIM"
1115 break
1116 else:
1117 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
1118 if stage and nslcmop_id and stage[2] != detailed_status_old:
1119 detailed_status_old = stage[2]
1120 db_nsr_update["detailed-status"] = " ".join(stage)
1121 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1122 self._write_op_status(nslcmop_id, stage)
1123 await asyncio.sleep(5, loop=self.loop)
1124 else: # timeout_ns_deploy
1125 raise NgRoException("Timeout waiting ns to deploy")
1126
1127 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
1128 db_nsr_update = {}
1129 failed_detail = []
1130 action_id = None
1131 start_deploy = time()
1132 try:
1133 target = {
1134 "ns": {"vld": []},
1135 "vnf": [],
1136 "image": [],
1137 "flavor": [],
1138 "action_id": nslcmop_id
1139 }
1140 desc = await self.RO.deploy(nsr_id, target)
1141 action_id = desc["action_id"]
1142 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
1143 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
1144 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
1145
1146 # wait until done
1147 delete_timeout = 20 * 60 # 20 minutes
1148 await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
1149
1150 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1151 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1152 # delete all nsr
1153 await self.RO.delete(nsr_id)
1154 except Exception as e:
1155 if isinstance(e, NgRoException) and e.http_code == 404: # not found
1156 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1157 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
1158 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
1159 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
1160 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
1161 failed_detail.append("delete conflict: {}".format(e))
1162 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
1163 else:
1164 failed_detail.append("delete error: {}".format(e))
1165 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
1166
1167 if failed_detail:
1168 stage[2] = "Error deleting from VIM"
1169 else:
1170 stage[2] = "Deleted from VIM"
1171 db_nsr_update["detailed-status"] = " ".join(stage)
1172 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1173 self._write_op_status(nslcmop_id, stage)
1174
1175 if failed_detail:
1176 raise LcmException("; ".join(failed_detail))
1177 return
1178
1179 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
1180 n2vc_key_list, stage):
1181 """
1182 Instantiate at RO
1183 :param logging_text: preffix text to use at logging
1184 :param nsr_id: nsr identity
1185 :param nsd: database content of ns descriptor
1186 :param db_nsr: database content of ns record
1187 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
1188 :param db_vnfrs:
1189 :param db_vnfds_ref: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
1190 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
1191 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
1192 :return: None or exception
1193 """
1194 try:
1195 db_nsr_update = {}
1196 RO_descriptor_number = 0 # number of descriptors created at RO
1197 vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
1198 nslcmop_id = db_nslcmop["_id"]
1199 start_deploy = time()
1200 ns_params = db_nslcmop.get("operationParams")
1201 if ns_params and ns_params.get("timeout_ns_deploy"):
1202 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1203 else:
1204 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
1205
1206 # Check for and optionally request placement optimization. Database will be updated if placement activated
1207 stage[2] = "Waiting for Placement."
1208 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
1209 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
1210 for vnfr in db_vnfrs.values():
1211 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
1212 break
1213 else:
1214 ns_params["vimAccountId"] == vnfr["vim-account-id"]
1215
1216 if self.ng_ro:
1217 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
1218 db_vnfds_ref, n2vc_key_list, stage, start_deploy,
1219 timeout_ns_deploy)
1220 # deploy RO
1221 # get vnfds, instantiate at RO
1222 for c_vnf in nsd.get("constituent-vnfd", ()):
1223 member_vnf_index = c_vnf["member-vnf-index"]
1224 vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
1225 vnfd_ref = vnfd["id"]
1226
1227 stage[2] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(vnfd_ref, member_vnf_index)
1228 db_nsr_update["detailed-status"] = " ".join(stage)
1229 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1230 self._write_op_status(nslcmop_id, stage)
1231
1232 # self.logger.debug(logging_text + stage[2])
1233 vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
1234 vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
1235 RO_descriptor_number += 1
1236
1237 # look position at deployed.RO.vnfd if not present it will be appended at the end
1238 for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
1239 if vnf_deployed["member-vnf-index"] == member_vnf_index:
1240 break
1241 else:
1242 index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
1243 db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
1244
1245 # look if present
1246 RO_update = {"member-vnf-index": member_vnf_index}
1247 vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
1248 if vnfd_list:
1249 RO_update["id"] = vnfd_list[0]["uuid"]
1250 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
1251 format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
1252 else:
1253 vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
1254 get("additionalParamsForVnf"), nsr_id)
1255 desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
1256 RO_update["id"] = desc["uuid"]
1257 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
1258 vnfd_ref, member_vnf_index, desc["uuid"]))
1259 db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
1260 db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
1261
1262 # create nsd at RO
1263 nsd_ref = nsd["id"]
1264
1265 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1266 db_nsr_update["detailed-status"] = " ".join(stage)
1267 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1268 self._write_op_status(nslcmop_id, stage)
1269
1270 # self.logger.debug(logging_text + stage[2])
1271 RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
1272 RO_descriptor_number += 1
1273 nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
1274 if nsd_list:
1275 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
1276 self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
1277 nsd_ref, RO_nsd_uuid))
1278 else:
1279 nsd_RO = deepcopy(nsd)
1280 nsd_RO["id"] = RO_osm_nsd_id
1281 nsd_RO.pop("_id", None)
1282 nsd_RO.pop("_admin", None)
1283 for c_vnf in nsd_RO.get("constituent-vnfd", ()):
1284 member_vnf_index = c_vnf["member-vnf-index"]
1285 c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
1286 for c_vld in nsd_RO.get("vld", ()):
1287 for cp in c_vld.get("vnfd-connection-point-ref", ()):
1288 member_vnf_index = cp["member-vnf-index-ref"]
1289 cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
1290
1291 desc = await self.RO.create("nsd", descriptor=nsd_RO)
1292 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1293 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
1294 self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
1295 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1296
1297 # Crate ns at RO
1298 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
1299 db_nsr_update["detailed-status"] = " ".join(stage)
1300 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1301 self._write_op_status(nslcmop_id, stage)
1302
1303 # if present use it unless in error status
1304 RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
1305 if RO_nsr_id:
1306 try:
1307 stage[2] = "Looking for existing ns at RO"
1308 db_nsr_update["detailed-status"] = " ".join(stage)
1309 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1310 self._write_op_status(nslcmop_id, stage)
1311 # self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1312 desc = await self.RO.show("ns", RO_nsr_id)
1313
1314 except ROclient.ROClientException as e:
1315 if e.http_code != HTTPStatus.NOT_FOUND:
1316 raise
1317 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1318 if RO_nsr_id:
1319 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1320 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1321 if ns_status == "ERROR":
1322 stage[2] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
1323 self.logger.debug(logging_text + stage[2])
1324 await self.RO.delete("ns", RO_nsr_id)
1325 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
1326 if not RO_nsr_id:
1327 stage[2] = "Checking dependencies"
1328 db_nsr_update["detailed-status"] = " ".join(stage)
1329 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1330 self._write_op_status(nslcmop_id, stage)
1331 # self.logger.debug(logging_text + stage[2])
1332
1333 # check if VIM is creating and wait look if previous tasks in process
1334 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account", ns_params["vimAccountId"])
1335 if task_dependency:
1336 stage[2] = "Waiting for related tasks '{}' to be completed".format(task_name)
1337 self.logger.debug(logging_text + stage[2])
1338 await asyncio.wait(task_dependency, timeout=3600)
1339 if ns_params.get("vnf"):
1340 for vnf in ns_params["vnf"]:
1341 if "vimAccountId" in vnf:
1342 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account",
1343 vnf["vimAccountId"])
1344 if task_dependency:
1345 stage[2] = "Waiting for related tasks '{}' to be completed.".format(task_name)
1346 self.logger.debug(logging_text + stage[2])
1347 await asyncio.wait(task_dependency, timeout=3600)
1348
1349 stage[2] = "Checking instantiation parameters."
1350 RO_ns_params = self._ns_params_2_RO(ns_params, nsd, db_vnfds_ref, db_vnfrs, n2vc_key_list)
1351 stage[2] = "Deploying ns at VIM."
1352 db_nsr_update["detailed-status"] = " ".join(stage)
1353 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1354 self._write_op_status(nslcmop_id, stage)
1355
1356 desc = await self.RO.create("ns", descriptor=RO_ns_params, name=db_nsr["name"], scenario=RO_nsd_uuid)
1357 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
1358 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1359 db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
1360 self.logger.debug(logging_text + "ns created at RO. RO_id={}".format(desc["uuid"]))
1361
1362 # wait until NS is ready
1363 stage[2] = "Waiting VIM to deploy ns."
1364 db_nsr_update["detailed-status"] = " ".join(stage)
1365 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1366 self._write_op_status(nslcmop_id, stage)
1367 detailed_status_old = None
1368 self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
1369
1370 old_desc = None
1371 while time() <= start_deploy + timeout_ns_deploy:
1372 desc = await self.RO.show("ns", RO_nsr_id)
1373
1374 # deploymentStatus
1375 if desc != old_desc:
1376 # desc has changed => update db
1377 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
1378 old_desc = desc
1379
1380 ns_status, ns_status_info = self.RO.check_ns_status(desc)
1381 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
1382 if ns_status == "ERROR":
1383 raise ROclient.ROClientException(ns_status_info)
1384 elif ns_status == "BUILD":
1385 stage[2] = "VIM: ({})".format(ns_status_info)
1386 elif ns_status == "ACTIVE":
1387 stage[2] = "Waiting for management IP address reported by the VIM. Updating VNFRs."
1388 try:
1389 self.ns_update_vnfr(db_vnfrs, desc)
1390 break
1391 except LcmExceptionNoMgmtIP:
1392 pass
1393 else:
1394 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
1395 if stage[2] != detailed_status_old:
1396 detailed_status_old = stage[2]
1397 db_nsr_update["detailed-status"] = " ".join(stage)
1398 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1399 self._write_op_status(nslcmop_id, stage)
1400 await asyncio.sleep(5, loop=self.loop)
1401 else: # timeout_ns_deploy
1402 raise ROclient.ROClientException("Timeout waiting ns to be ready")
1403
1404 # Updating NSR
1405 self.ns_update_nsr(db_nsr_update, db_nsr, desc)
1406
1407 db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
1408 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
1409 stage[2] = "Deployed at VIM"
1410 db_nsr_update["detailed-status"] = " ".join(stage)
1411 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1412 self._write_op_status(nslcmop_id, stage)
1413 # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
1414 # self.logger.debug(logging_text + "Deployed at VIM")
1415 except Exception as e:
1416 stage[2] = "ERROR deploying at VIM"
1417 self.set_vnfr_at_error(db_vnfrs, str(e))
1418 self.logger.error("Error deploying at VIM {}".format(e),
1419 exc_info=not isinstance(e, (ROclient.ROClientException, LcmException, DbException,
1420 NgRoException)))
1421 raise
1422
1423 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
1424 """
1425 Wait for kdu to be up, get ip address
1426 :param logging_text: prefix use for logging
1427 :param nsr_id:
1428 :param vnfr_id:
1429 :param kdu_name:
1430 :return: IP address
1431 """
1432
1433 # self.logger.debug(logging_text + "Starting wait_kdu_up")
1434 nb_tries = 0
1435
1436 while nb_tries < 360:
1437 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1438 kdur = next((x for x in get_iterable(db_vnfr, "kdur") if x.get("kdu-name") == kdu_name), None)
1439 if not kdur:
1440 raise LcmException("Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name))
1441 if kdur.get("status"):
1442 if kdur["status"] in ("READY", "ENABLED"):
1443 return kdur.get("ip-address")
1444 else:
1445 raise LcmException("target KDU={} is in error state".format(kdu_name))
1446
1447 await asyncio.sleep(10, loop=self.loop)
1448 nb_tries += 1
1449 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
1450
1451 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
1452 """
1453 Wait for ip addres at RO, and optionally, insert public key in virtual machine
1454 :param logging_text: prefix use for logging
1455 :param nsr_id:
1456 :param vnfr_id:
1457 :param vdu_id:
1458 :param vdu_index:
1459 :param pub_key: public ssh key to inject, None to skip
1460 :param user: user to apply the public ssh key
1461 :return: IP address
1462 """
1463
1464 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
1465 ro_nsr_id = None
1466 ip_address = None
1467 nb_tries = 0
1468 target_vdu_id = None
1469 ro_retries = 0
1470
1471 while True:
1472
1473 ro_retries += 1
1474 if ro_retries >= 360: # 1 hour
1475 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
1476
1477 await asyncio.sleep(10, loop=self.loop)
1478
1479 # get ip address
1480 if not target_vdu_id:
1481 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
1482
1483 if not vdu_id: # for the VNF case
1484 if db_vnfr.get("status") == "ERROR":
1485 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
1486 ip_address = db_vnfr.get("ip-address")
1487 if not ip_address:
1488 continue
1489 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1490 else: # VDU case
1491 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1492 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1493
1494 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1495 vdur = db_vnfr["vdur"][0]
1496 if not vdur:
1497 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1498 vdu_index))
1499 # New generation RO stores information at "vim_info"
1500 ng_ro_status = None
1501 if vdur.get("vim_info"):
1502 target_vim = next(t for t in vdur["vim_info"]) # there should be only one key
1503 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1504 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE" or ng_ro_status == "ACTIVE":
1505 ip_address = vdur.get("ip-address")
1506 if not ip_address:
1507 continue
1508 target_vdu_id = vdur["vdu-id-ref"]
1509 elif vdur.get("status") == "ERROR" or vdur["vim_info"][target_vim].get("vim_status") == "ERROR":
1510 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1511
1512 if not target_vdu_id:
1513 continue
1514
1515 # inject public key into machine
1516 if pub_key and user:
1517 self.logger.debug(logging_text + "Inserting RO key")
1518 if vdur.get("pdu-type"):
1519 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1520 return ip_address
1521 try:
1522 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
1523 if self.ng_ro:
1524 self.logger.debug(logging_text + "ALF lanzando orden")
1525 target = {"action": "inject_ssh_key", "key": pub_key, "user": user,
1526 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1527 }
1528 desc = await self.RO.deploy(nsr_id, target)
1529 action_id = desc["action_id"]
1530 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1531 break
1532 else:
1533 # wait until NS is deployed at RO
1534 if not ro_nsr_id:
1535 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1536 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1537 if not ro_nsr_id:
1538 continue
1539 result_dict = await self.RO.create_action(
1540 item="ns",
1541 item_id_name=ro_nsr_id,
1542 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1543 )
1544 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1545 if not result_dict or not isinstance(result_dict, dict):
1546 raise LcmException("Unknown response from RO when injecting key")
1547 for result in result_dict.values():
1548 if result.get("vim_result") == 200:
1549 break
1550 else:
1551 raise ROclient.ROClientException("error injecting key: {}".format(
1552 result.get("description")))
1553 break
1554 except NgRoException as e:
1555 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
1556 except ROclient.ROClientException as e:
1557 if not nb_tries:
1558 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1559 format(e, 20*10))
1560 nb_tries += 1
1561 if nb_tries >= 20:
1562 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
1563 else:
1564 break
1565
1566 return ip_address
1567
1568 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1569 """
1570 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1571 """
1572 my_vca = vca_deployed_list[vca_index]
1573 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1574 # vdu or kdu: no dependencies
1575 return
1576 timeout = 300
1577 while timeout >= 0:
1578 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1579 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1580 configuration_status_list = db_nsr["configurationStatus"]
1581 for index, vca_deployed in enumerate(configuration_status_list):
1582 if index == vca_index:
1583 # myself
1584 continue
1585 if not my_vca.get("member-vnf-index") or \
1586 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
1587 internal_status = configuration_status_list[index].get("status")
1588 if internal_status == 'READY':
1589 continue
1590 elif internal_status == 'BROKEN':
1591 raise LcmException("Configuration aborted because dependent charm/s has failed")
1592 else:
1593 break
1594 else:
1595 # no dependencies, return
1596 return
1597 await asyncio.sleep(10)
1598 timeout -= 1
1599
1600 raise LcmException("Configuration aborted because dependent charm/s timeout")
1601
1602 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
1603 config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
1604 ee_config_descriptor):
1605 nsr_id = db_nsr["_id"]
1606 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1607 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1608 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1609 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1610 db_dict = {
1611 'collection': 'nsrs',
1612 'filter': {'_id': nsr_id},
1613 'path': db_update_entry
1614 }
1615 step = ""
1616 try:
1617
1618 element_type = 'NS'
1619 element_under_configuration = nsr_id
1620
1621 vnfr_id = None
1622 if db_vnfr:
1623 vnfr_id = db_vnfr["_id"]
1624 osm_config["osm"]["vnf_id"] = vnfr_id
1625
1626 namespace = "{nsi}.{ns}".format(
1627 nsi=nsi_id if nsi_id else "",
1628 ns=nsr_id)
1629
1630 if vnfr_id:
1631 element_type = 'VNF'
1632 element_under_configuration = vnfr_id
1633 namespace += ".{}".format(vnfr_id)
1634 if vdu_id:
1635 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
1636 element_type = 'VDU'
1637 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
1638 osm_config["osm"]["vdu_id"] = vdu_id
1639 elif kdu_name:
1640 namespace += ".{}".format(kdu_name)
1641 element_type = 'KDU'
1642 element_under_configuration = kdu_name
1643 osm_config["osm"]["kdu_name"] = kdu_name
1644
1645 # Get artifact path
1646 artifact_path = "{}/{}/{}/{}".format(
1647 base_folder["folder"],
1648 base_folder["pkg-dir"],
1649 "charms" if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts",
1650 vca_name
1651 )
1652 # get initial_config_primitive_list that applies to this element
1653 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1654
1655 # add config if not present for NS charm
1656 ee_descriptor_id = ee_config_descriptor.get("id")
1657 initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
1658 vca_deployed, ee_descriptor_id)
1659
1660 # n2vc_redesign STEP 3.1
1661 # find old ee_id if exists
1662 ee_id = vca_deployed.get("ee_id")
1663
1664 vim_account_id = (
1665 deep_get(db_vnfr, ("vim-account-id",)) or
1666 deep_get(deploy_params, ("OSM", "vim_account_id"))
1667 )
1668 vca_cloud, vca_cloud_credential = self.get_vca_cloud_and_credentials(vim_account_id)
1669 vca_k8s_cloud, vca_k8s_cloud_credential = self.get_vca_k8s_cloud_and_credentials(vim_account_id)
1670 # create or register execution environment in VCA
1671 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1672
1673 self._write_configuration_status(
1674 nsr_id=nsr_id,
1675 vca_index=vca_index,
1676 status='CREATING',
1677 element_under_configuration=element_under_configuration,
1678 element_type=element_type
1679 )
1680
1681 step = "create execution environment"
1682 self.logger.debug(logging_text + step)
1683
1684 ee_id = None
1685 credentials = None
1686 if vca_type == "k8s_proxy_charm":
1687 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1688 charm_name=artifact_path[artifact_path.rfind("/") + 1:],
1689 namespace=namespace,
1690 artifact_path=artifact_path,
1691 db_dict=db_dict,
1692 cloud_name=vca_k8s_cloud,
1693 credential_name=vca_k8s_cloud_credential,
1694 )
1695 else:
1696 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1697 namespace=namespace,
1698 reuse_ee_id=ee_id,
1699 db_dict=db_dict,
1700 config=osm_config,
1701 cloud_name=vca_cloud,
1702 credential_name=vca_cloud_credential,
1703 )
1704
1705 elif vca_type == "native_charm":
1706 step = "Waiting to VM being up and getting IP address"
1707 self.logger.debug(logging_text + step)
1708 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1709 user=None, pub_key=None)
1710 credentials = {"hostname": rw_mgmt_ip}
1711 # get username
1712 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1713 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1714 # merged. Meanwhile let's get username from initial-config-primitive
1715 if not username and initial_config_primitive_list:
1716 for config_primitive in initial_config_primitive_list:
1717 for param in config_primitive.get("parameter", ()):
1718 if param["name"] == "ssh-username":
1719 username = param["value"]
1720 break
1721 if not username:
1722 raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with "
1723 "'config-access.ssh-access.default-user'")
1724 credentials["username"] = username
1725 # n2vc_redesign STEP 3.2
1726
1727 self._write_configuration_status(
1728 nsr_id=nsr_id,
1729 vca_index=vca_index,
1730 status='REGISTERING',
1731 element_under_configuration=element_under_configuration,
1732 element_type=element_type
1733 )
1734
1735 step = "register execution environment {}".format(credentials)
1736 self.logger.debug(logging_text + step)
1737 ee_id = await self.vca_map[vca_type].register_execution_environment(
1738 credentials=credentials,
1739 namespace=namespace,
1740 db_dict=db_dict,
1741 cloud_name=vca_cloud,
1742 credential_name=vca_cloud_credential,
1743 )
1744
1745 # for compatibility with MON/POL modules, the need model and application name at database
1746 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1747 ee_id_parts = ee_id.split('.')
1748 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1749 if len(ee_id_parts) >= 2:
1750 model_name = ee_id_parts[0]
1751 application_name = ee_id_parts[1]
1752 db_nsr_update[db_update_entry + "model"] = model_name
1753 db_nsr_update[db_update_entry + "application"] = application_name
1754
1755 # n2vc_redesign STEP 3.3
1756 step = "Install configuration Software"
1757
1758 self._write_configuration_status(
1759 nsr_id=nsr_id,
1760 vca_index=vca_index,
1761 status='INSTALLING SW',
1762 element_under_configuration=element_under_configuration,
1763 element_type=element_type,
1764 other_update=db_nsr_update
1765 )
1766
1767 # TODO check if already done
1768 self.logger.debug(logging_text + step)
1769 config = None
1770 if vca_type == "native_charm":
1771 config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None)
1772 if config_primitive:
1773 config = self._map_primitive_params(
1774 config_primitive,
1775 {},
1776 deploy_params
1777 )
1778 num_units = 1
1779 if vca_type == "lxc_proxy_charm":
1780 if element_type == "NS":
1781 num_units = db_nsr.get("config-units") or 1
1782 elif element_type == "VNF":
1783 num_units = db_vnfr.get("config-units") or 1
1784 elif element_type == "VDU":
1785 for v in db_vnfr["vdur"]:
1786 if vdu_id == v["vdu-id-ref"]:
1787 num_units = v.get("config-units") or 1
1788 break
1789 if vca_type != "k8s_proxy_charm":
1790 await self.vca_map[vca_type].install_configuration_sw(
1791 ee_id=ee_id,
1792 artifact_path=artifact_path,
1793 db_dict=db_dict,
1794 config=config,
1795 num_units=num_units,
1796 )
1797
1798 # write in db flag of configuration_sw already installed
1799 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1800
1801 # add relations for this VCA (wait for other peers related with this VCA)
1802 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
1803 vca_index=vca_index, vca_type=vca_type)
1804
1805 # if SSH access is required, then get execution environment SSH public
1806 # if native charm we have waited already to VM be UP
1807 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1808 pub_key = None
1809 user = None
1810 # self.logger.debug("get ssh key block")
1811 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
1812 # self.logger.debug("ssh key needed")
1813 # Needed to inject a ssh key
1814 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1815 step = "Install configuration Software, getting public ssh key"
1816 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
1817
1818 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
1819 else:
1820 # self.logger.debug("no need to get ssh key")
1821 step = "Waiting to VM being up and getting IP address"
1822 self.logger.debug(logging_text + step)
1823
1824 # n2vc_redesign STEP 5.1
1825 # wait for RO (ip-address) Insert pub_key into VM
1826 if vnfr_id:
1827 if kdu_name:
1828 rw_mgmt_ip = await self.wait_kdu_up(logging_text, nsr_id, vnfr_id, kdu_name)
1829 else:
1830 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id,
1831 vdu_index, user=user, pub_key=pub_key)
1832 else:
1833 rw_mgmt_ip = None # This is for a NS configuration
1834
1835 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
1836
1837 # store rw_mgmt_ip in deploy params for later replacement
1838 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1839
1840 # n2vc_redesign STEP 6 Execute initial config primitive
1841 step = 'execute initial config primitive'
1842
1843 # wait for dependent primitives execution (NS -> VNF -> VDU)
1844 if initial_config_primitive_list:
1845 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1846
1847 # stage, in function of element type: vdu, kdu, vnf or ns
1848 my_vca = vca_deployed_list[vca_index]
1849 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1850 # VDU or KDU
1851 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
1852 elif my_vca.get("member-vnf-index"):
1853 # VNF
1854 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
1855 else:
1856 # NS
1857 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
1858
1859 self._write_configuration_status(
1860 nsr_id=nsr_id,
1861 vca_index=vca_index,
1862 status='EXECUTING PRIMITIVE'
1863 )
1864
1865 self._write_op_status(
1866 op_id=nslcmop_id,
1867 stage=stage
1868 )
1869
1870 check_if_terminated_needed = True
1871 for initial_config_primitive in initial_config_primitive_list:
1872 # adding information on the vca_deployed if it is a NS execution environment
1873 if not vca_deployed["member-vnf-index"]:
1874 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
1875 # TODO check if already done
1876 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
1877
1878 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1879 self.logger.debug(logging_text + step)
1880 await self.vca_map[vca_type].exec_primitive(
1881 ee_id=ee_id,
1882 primitive_name=initial_config_primitive["name"],
1883 params_dict=primitive_params_,
1884 db_dict=db_dict
1885 )
1886 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1887 if check_if_terminated_needed:
1888 if config_descriptor.get('terminate-config-primitive'):
1889 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1890 check_if_terminated_needed = False
1891
1892 # TODO register in database that primitive is done
1893
1894 # STEP 7 Configure metrics
1895 if vca_type == "helm" or vca_type == "helm-v3":
1896 prometheus_jobs = await self.add_prometheus_metrics(
1897 ee_id=ee_id,
1898 artifact_path=artifact_path,
1899 ee_config_descriptor=ee_config_descriptor,
1900 vnfr_id=vnfr_id,
1901 nsr_id=nsr_id,
1902 target_ip=rw_mgmt_ip,
1903 )
1904 if prometheus_jobs:
1905 self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs})
1906
1907 step = "instantiated at VCA"
1908 self.logger.debug(logging_text + step)
1909
1910 self._write_configuration_status(
1911 nsr_id=nsr_id,
1912 vca_index=vca_index,
1913 status='READY'
1914 )
1915
1916 except Exception as e: # TODO not use Exception but N2VC exception
1917 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1918 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1919 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
1920 self._write_configuration_status(
1921 nsr_id=nsr_id,
1922 vca_index=vca_index,
1923 status='BROKEN'
1924 )
1925 raise LcmException("{} {}".format(step, e)) from e
1926
1927 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
1928 error_description: str = None, error_detail: str = None, other_update: dict = None):
1929 """
1930 Update db_nsr fields.
1931 :param nsr_id:
1932 :param ns_state:
1933 :param current_operation:
1934 :param current_operation_id:
1935 :param error_description:
1936 :param error_detail:
1937 :param other_update: Other required changes at database if provided, will be cleared
1938 :return:
1939 """
1940 try:
1941 db_dict = other_update or {}
1942 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1943 db_dict["_admin.current-operation"] = current_operation_id
1944 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
1945 db_dict["currentOperation"] = current_operation
1946 db_dict["currentOperationID"] = current_operation_id
1947 db_dict["errorDescription"] = error_description
1948 db_dict["errorDetail"] = error_detail
1949
1950 if ns_state:
1951 db_dict["nsState"] = ns_state
1952 self.update_db_2("nsrs", nsr_id, db_dict)
1953 except DbException as e:
1954 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1955
1956 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1957 operation_state: str = None, other_update: dict = None):
1958 try:
1959 db_dict = other_update or {}
1960 db_dict['queuePosition'] = queuePosition
1961 if isinstance(stage, list):
1962 db_dict['stage'] = stage[0]
1963 db_dict['detailed-status'] = " ".join(stage)
1964 elif stage is not None:
1965 db_dict['stage'] = str(stage)
1966
1967 if error_message is not None:
1968 db_dict['errorMessage'] = error_message
1969 if operation_state is not None:
1970 db_dict['operationState'] = operation_state
1971 db_dict["statusEnteredTime"] = time()
1972 self.update_db_2("nslcmops", op_id, db_dict)
1973 except DbException as e:
1974 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1975
1976 def _write_all_config_status(self, db_nsr: dict, status: str):
1977 try:
1978 nsr_id = db_nsr["_id"]
1979 # configurationStatus
1980 config_status = db_nsr.get('configurationStatus')
1981 if config_status:
1982 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1983 enumerate(config_status) if v}
1984 # update status
1985 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1986
1987 except DbException as e:
1988 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1989
1990 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
1991 element_under_configuration: str = None, element_type: str = None,
1992 other_update: dict = None):
1993
1994 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1995 # .format(vca_index, status))
1996
1997 try:
1998 db_path = 'configurationStatus.{}.'.format(vca_index)
1999 db_dict = other_update or {}
2000 if status:
2001 db_dict[db_path + 'status'] = status
2002 if element_under_configuration:
2003 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
2004 if element_type:
2005 db_dict[db_path + 'elementType'] = element_type
2006 self.update_db_2("nsrs", nsr_id, db_dict)
2007 except DbException as e:
2008 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
2009 .format(status, nsr_id, vca_index, e))
2010
2011 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
2012 """
2013 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
2014 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
2015 Database is used because the result can be obtained from a different LCM worker in case of HA.
2016 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
2017 :param db_nslcmop: database content of nslcmop
2018 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
2019 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
2020 computed 'vim-account-id'
2021 """
2022 modified = False
2023 nslcmop_id = db_nslcmop['_id']
2024 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
2025 if placement_engine == "PLA":
2026 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
2027 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
2028 db_poll_interval = 5
2029 wait = db_poll_interval * 10
2030 pla_result = None
2031 while not pla_result and wait >= 0:
2032 await asyncio.sleep(db_poll_interval)
2033 wait -= db_poll_interval
2034 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2035 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
2036
2037 if not pla_result:
2038 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
2039
2040 for pla_vnf in pla_result['vnf']:
2041 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
2042 if not pla_vnf.get('vimAccountId') or not vnfr:
2043 continue
2044 modified = True
2045 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
2046 # Modifies db_vnfrs
2047 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
2048 return modified
2049
2050 def update_nsrs_with_pla_result(self, params):
2051 try:
2052 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
2053 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
2054 except Exception as e:
2055 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
2056
2057 async def instantiate(self, nsr_id, nslcmop_id):
2058 """
2059
2060 :param nsr_id: ns instance to deploy
2061 :param nslcmop_id: operation to run
2062 :return:
2063 """
2064
2065 # Try to lock HA task here
2066 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
2067 if not task_is_locked_by_me:
2068 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
2069 return
2070
2071 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
2072 self.logger.debug(logging_text + "Enter")
2073
2074 # get all needed from database
2075
2076 # database nsrs record
2077 db_nsr = None
2078
2079 # database nslcmops record
2080 db_nslcmop = None
2081
2082 # update operation on nsrs
2083 db_nsr_update = {}
2084 # update operation on nslcmops
2085 db_nslcmop_update = {}
2086
2087 nslcmop_operation_state = None
2088 db_vnfrs = {} # vnf's info indexed by member-index
2089 # n2vc_info = {}
2090 tasks_dict_info = {} # from task to info text
2091 exc = None
2092 error_list = []
2093 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
2094 # ^ stage, step, VIM progress
2095 try:
2096 # wait for any previous tasks in process
2097 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
2098
2099 stage[1] = "Sync filesystem from database."
2100 self.fs.sync() # TODO, make use of partial sync, only for the needed packages
2101
2102 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
2103 stage[1] = "Reading from database."
2104 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
2105 db_nsr_update["detailed-status"] = "creating"
2106 db_nsr_update["operational-status"] = "init"
2107 self._write_ns_status(
2108 nsr_id=nsr_id,
2109 ns_state="BUILDING",
2110 current_operation="INSTANTIATING",
2111 current_operation_id=nslcmop_id,
2112 other_update=db_nsr_update
2113 )
2114 self._write_op_status(
2115 op_id=nslcmop_id,
2116 stage=stage,
2117 queuePosition=0
2118 )
2119
2120 # read from db: operation
2121 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2122 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2123 ns_params = db_nslcmop.get("operationParams")
2124 if ns_params and ns_params.get("timeout_ns_deploy"):
2125 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
2126 else:
2127 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
2128
2129 # read from db: ns
2130 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2131 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2132 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
2133 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2134 db_nsr["nsd"] = nsd
2135 # nsr_name = db_nsr["name"] # TODO short-name??
2136
2137 # read from db: vnf's of this ns
2138 stage[1] = "Getting vnfrs from db."
2139 self.logger.debug(logging_text + stage[1])
2140 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2141
2142 # read from db: vnfd's for every vnf
2143 db_vnfds_ref = {} # every vnfd data indexed by vnf name
2144 db_vnfds = {} # every vnfd data indexed by vnf id
2145 db_vnfds_index = {} # every vnfd data indexed by vnf member-index
2146
2147 # for each vnf in ns, read vnfd
2148 for vnfr in db_vnfrs_list:
2149 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
2150 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
2151 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
2152
2153 # if we haven't this vnfd, read it from db
2154 if vnfd_id not in db_vnfds:
2155 # read from db
2156 stage[1] = "Getting vnfd={} id='{}' from db.".format(vnfd_id, vnfd_ref)
2157 self.logger.debug(logging_text + stage[1])
2158 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2159
2160 # store vnfd
2161 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
2162 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
2163 db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
2164
2165 # Get or generates the _admin.deployed.VCA list
2166 vca_deployed_list = None
2167 if db_nsr["_admin"].get("deployed"):
2168 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
2169 if vca_deployed_list is None:
2170 vca_deployed_list = []
2171 configuration_status_list = []
2172 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2173 db_nsr_update["configurationStatus"] = configuration_status_list
2174 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
2175 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2176 elif isinstance(vca_deployed_list, dict):
2177 # maintain backward compatibility. Change a dict to list at database
2178 vca_deployed_list = list(vca_deployed_list.values())
2179 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
2180 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
2181
2182 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
2183 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
2184 db_nsr_update["_admin.deployed.RO.vnfd"] = []
2185
2186 # set state to INSTANTIATED. When instantiated NBI will not delete directly
2187 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
2188 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2189 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"})
2190
2191 # n2vc_redesign STEP 2 Deploy Network Scenario
2192 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
2193 self._write_op_status(
2194 op_id=nslcmop_id,
2195 stage=stage
2196 )
2197
2198 stage[1] = "Deploying KDUs."
2199 # self.logger.debug(logging_text + "Before deploy_kdus")
2200 # Call to deploy_kdus in case exists the "vdu:kdu" param
2201 await self.deploy_kdus(
2202 logging_text=logging_text,
2203 nsr_id=nsr_id,
2204 nslcmop_id=nslcmop_id,
2205 db_vnfrs=db_vnfrs,
2206 db_vnfds=db_vnfds,
2207 task_instantiation_info=tasks_dict_info,
2208 )
2209
2210 stage[1] = "Getting VCA public key."
2211 # n2vc_redesign STEP 1 Get VCA public ssh-key
2212 # feature 1429. Add n2vc public key to needed VMs
2213 n2vc_key = self.n2vc.get_public_key()
2214 n2vc_key_list = [n2vc_key]
2215 if self.vca_config.get("public_key"):
2216 n2vc_key_list.append(self.vca_config["public_key"])
2217
2218 stage[1] = "Deploying NS at VIM."
2219 task_ro = asyncio.ensure_future(
2220 self.instantiate_RO(
2221 logging_text=logging_text,
2222 nsr_id=nsr_id,
2223 nsd=nsd,
2224 db_nsr=db_nsr,
2225 db_nslcmop=db_nslcmop,
2226 db_vnfrs=db_vnfrs,
2227 db_vnfds_ref=db_vnfds_ref,
2228 n2vc_key_list=n2vc_key_list,
2229 stage=stage
2230 )
2231 )
2232 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
2233 tasks_dict_info[task_ro] = "Deploying at VIM"
2234
2235 # n2vc_redesign STEP 3 to 6 Deploy N2VC
2236 stage[1] = "Deploying Execution Environments."
2237 self.logger.debug(logging_text + stage[1])
2238
2239 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
2240 # get_iterable() returns a value from a dict or empty tuple if key does not exist
2241 for c_vnf in get_iterable(nsd, "constituent-vnfd"):
2242 vnfd_id = c_vnf["vnfd-id-ref"]
2243 vnfd = db_vnfds_ref[vnfd_id]
2244 member_vnf_index = str(c_vnf["member-vnf-index"])
2245 db_vnfr = db_vnfrs[member_vnf_index]
2246 base_folder = vnfd["_admin"]["storage"]
2247 vdu_id = None
2248 vdu_index = 0
2249 vdu_name = None
2250 kdu_name = None
2251
2252 # Get additional parameters
2253 deploy_params = {"OSM": self._get_osm_params(db_vnfr)}
2254 if db_vnfr.get("additionalParamsForVnf"):
2255 deploy_params.update(self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy()))
2256
2257 descriptor_config = vnfd.get("vnf-configuration")
2258 if descriptor_config:
2259 self._deploy_n2vc(
2260 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
2261 db_nsr=db_nsr,
2262 db_vnfr=db_vnfr,
2263 nslcmop_id=nslcmop_id,
2264 nsr_id=nsr_id,
2265 nsi_id=nsi_id,
2266 vnfd_id=vnfd_id,
2267 vdu_id=vdu_id,
2268 kdu_name=kdu_name,
2269 member_vnf_index=member_vnf_index,
2270 vdu_index=vdu_index,
2271 vdu_name=vdu_name,
2272 deploy_params=deploy_params,
2273 descriptor_config=descriptor_config,
2274 base_folder=base_folder,
2275 task_instantiation_info=tasks_dict_info,
2276 stage=stage
2277 )
2278
2279 # Deploy charms for each VDU that supports one.
2280 for vdud in get_iterable(vnfd, 'vdu'):
2281 vdu_id = vdud["id"]
2282 descriptor_config = vdud.get('vdu-configuration')
2283 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
2284 if vdur.get("additionalParams"):
2285 deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
2286 else:
2287 deploy_params_vdu = deploy_params
2288 deploy_params_vdu["OSM"] = self._get_osm_params(db_vnfr, vdu_id, vdu_count_index=0)
2289 if descriptor_config:
2290 vdu_name = None
2291 kdu_name = None
2292 for vdu_index in range(int(vdud.get("count", 1))):
2293 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
2294 self._deploy_n2vc(
2295 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
2296 member_vnf_index, vdu_id, vdu_index),
2297 db_nsr=db_nsr,
2298 db_vnfr=db_vnfr,
2299 nslcmop_id=nslcmop_id,
2300 nsr_id=nsr_id,
2301 nsi_id=nsi_id,
2302 vnfd_id=vnfd_id,
2303 vdu_id=vdu_id,
2304 kdu_name=kdu_name,
2305 member_vnf_index=member_vnf_index,
2306 vdu_index=vdu_index,
2307 vdu_name=vdu_name,
2308 deploy_params=deploy_params_vdu,
2309 descriptor_config=descriptor_config,
2310 base_folder=base_folder,
2311 task_instantiation_info=tasks_dict_info,
2312 stage=stage
2313 )
2314 for kdud in get_iterable(vnfd, 'kdu'):
2315 kdu_name = kdud["name"]
2316 descriptor_config = kdud.get('kdu-configuration')
2317 if descriptor_config:
2318 vdu_id = None
2319 vdu_index = 0
2320 vdu_name = None
2321 kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name)
2322 deploy_params_kdu = {"OSM": self._get_osm_params(db_vnfr)}
2323 if kdur.get("additionalParams"):
2324 deploy_params_kdu = self._format_additional_params(kdur["additionalParams"])
2325
2326 self._deploy_n2vc(
2327 logging_text=logging_text,
2328 db_nsr=db_nsr,
2329 db_vnfr=db_vnfr,
2330 nslcmop_id=nslcmop_id,
2331 nsr_id=nsr_id,
2332 nsi_id=nsi_id,
2333 vnfd_id=vnfd_id,
2334 vdu_id=vdu_id,
2335 kdu_name=kdu_name,
2336 member_vnf_index=member_vnf_index,
2337 vdu_index=vdu_index,
2338 vdu_name=vdu_name,
2339 deploy_params=deploy_params_kdu,
2340 descriptor_config=descriptor_config,
2341 base_folder=base_folder,
2342 task_instantiation_info=tasks_dict_info,
2343 stage=stage
2344 )
2345
2346 # Check if this NS has a charm configuration
2347 descriptor_config = nsd.get("ns-configuration")
2348 if descriptor_config and descriptor_config.get("juju"):
2349 vnfd_id = None
2350 db_vnfr = None
2351 member_vnf_index = None
2352 vdu_id = None
2353 kdu_name = None
2354 vdu_index = 0
2355 vdu_name = None
2356
2357 # Get additional parameters
2358 deploy_params = {"OSM": self._get_osm_params(db_vnfr)}
2359 if db_nsr.get("additionalParamsForNs"):
2360 deploy_params.update(self._format_additional_params(db_nsr["additionalParamsForNs"].copy()))
2361 base_folder = nsd["_admin"]["storage"]
2362 self._deploy_n2vc(
2363 logging_text=logging_text,
2364 db_nsr=db_nsr,
2365 db_vnfr=db_vnfr,
2366 nslcmop_id=nslcmop_id,
2367 nsr_id=nsr_id,
2368 nsi_id=nsi_id,
2369 vnfd_id=vnfd_id,
2370 vdu_id=vdu_id,
2371 kdu_name=kdu_name,
2372 member_vnf_index=member_vnf_index,
2373 vdu_index=vdu_index,
2374 vdu_name=vdu_name,
2375 deploy_params=deploy_params,
2376 descriptor_config=descriptor_config,
2377 base_folder=base_folder,
2378 task_instantiation_info=tasks_dict_info,
2379 stage=stage
2380 )
2381
2382 # rest of staff will be done at finally
2383
2384 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
2385 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
2386 exc = e
2387 except asyncio.CancelledError:
2388 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
2389 exc = "Operation was cancelled"
2390 except Exception as e:
2391 exc = traceback.format_exc()
2392 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
2393 finally:
2394 if exc:
2395 error_list.append(str(exc))
2396 try:
2397 # wait for pending tasks
2398 if tasks_dict_info:
2399 stage[1] = "Waiting for instantiate pending tasks."
2400 self.logger.debug(logging_text + stage[1])
2401 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
2402 stage, nslcmop_id, nsr_id=nsr_id)
2403 stage[1] = stage[2] = ""
2404 except asyncio.CancelledError:
2405 error_list.append("Cancelled")
2406 # TODO cancel all tasks
2407 except Exception as exc:
2408 error_list.append(str(exc))
2409
2410 # update operation-status
2411 db_nsr_update["operational-status"] = "running"
2412 # let's begin with VCA 'configured' status (later we can change it)
2413 db_nsr_update["config-status"] = "configured"
2414 for task, task_name in tasks_dict_info.items():
2415 if not task.done() or task.cancelled() or task.exception():
2416 if task_name.startswith(self.task_name_deploy_vca):
2417 # A N2VC task is pending
2418 db_nsr_update["config-status"] = "failed"
2419 else:
2420 # RO or KDU task is pending
2421 db_nsr_update["operational-status"] = "failed"
2422
2423 # update status at database
2424 if error_list:
2425 error_detail = ". ".join(error_list)
2426 self.logger.error(logging_text + error_detail)
2427 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
2428 error_description_nsr = 'Operation: INSTANTIATING.{}, {}'.format(nslcmop_id, stage[0])
2429
2430 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
2431 db_nslcmop_update["detailed-status"] = error_detail
2432 nslcmop_operation_state = "FAILED"
2433 ns_state = "BROKEN"
2434 else:
2435 error_detail = None
2436 error_description_nsr = error_description_nslcmop = None
2437 ns_state = "READY"
2438 db_nsr_update["detailed-status"] = "Done"
2439 db_nslcmop_update["detailed-status"] = "Done"
2440 nslcmop_operation_state = "COMPLETED"
2441
2442 if db_nsr:
2443 self._write_ns_status(
2444 nsr_id=nsr_id,
2445 ns_state=ns_state,
2446 current_operation="IDLE",
2447 current_operation_id=None,
2448 error_description=error_description_nsr,
2449 error_detail=error_detail,
2450 other_update=db_nsr_update
2451 )
2452 self._write_op_status(
2453 op_id=nslcmop_id,
2454 stage="",
2455 error_message=error_description_nslcmop,
2456 operation_state=nslcmop_operation_state,
2457 other_update=db_nslcmop_update,
2458 )
2459
2460 if nslcmop_operation_state:
2461 try:
2462 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
2463 "operationState": nslcmop_operation_state},
2464 loop=self.loop)
2465 except Exception as e:
2466 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
2467
2468 self.logger.debug(logging_text + "Exit")
2469 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2470
2471 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
2472 timeout: int = 3600, vca_type: str = None) -> bool:
2473
2474 # steps:
2475 # 1. find all relations for this VCA
2476 # 2. wait for other peers related
2477 # 3. add relations
2478
2479 try:
2480 vca_type = vca_type or "lxc_proxy_charm"
2481
2482 # STEP 1: find all relations for this VCA
2483
2484 # read nsr record
2485 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2486 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2487
2488 # this VCA data
2489 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2490
2491 # read all ns-configuration relations
2492 ns_relations = list()
2493 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
2494 if db_ns_relations:
2495 for r in db_ns_relations:
2496 # check if this VCA is in the relation
2497 if my_vca.get('member-vnf-index') in\
2498 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2499 ns_relations.append(r)
2500
2501 # read all vnf-configuration relations
2502 vnf_relations = list()
2503 db_vnfd_list = db_nsr.get('vnfd-id')
2504 if db_vnfd_list:
2505 for vnfd in db_vnfd_list:
2506 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2507 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
2508 if db_vnf_relations:
2509 for r in db_vnf_relations:
2510 # check if this VCA is in the relation
2511 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2512 vnf_relations.append(r)
2513
2514 # if no relations, terminate
2515 if not ns_relations and not vnf_relations:
2516 self.logger.debug(logging_text + ' No relations')
2517 return True
2518
2519 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2520
2521 # add all relations
2522 start = time()
2523 while True:
2524 # check timeout
2525 now = time()
2526 if now - start >= timeout:
2527 self.logger.error(logging_text + ' : timeout adding relations')
2528 return False
2529
2530 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2531 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2532
2533 # for each defined NS relation, find the VCA's related
2534 for r in ns_relations.copy():
2535 from_vca_ee_id = None
2536 to_vca_ee_id = None
2537 from_vca_endpoint = None
2538 to_vca_endpoint = None
2539 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2540 for vca in vca_list:
2541 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2542 and vca.get('config_sw_installed'):
2543 from_vca_ee_id = vca.get('ee_id')
2544 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2545 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2546 and vca.get('config_sw_installed'):
2547 to_vca_ee_id = vca.get('ee_id')
2548 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2549 if from_vca_ee_id and to_vca_ee_id:
2550 # add relation
2551 await self.vca_map[vca_type].add_relation(
2552 ee_id_1=from_vca_ee_id,
2553 ee_id_2=to_vca_ee_id,
2554 endpoint_1=from_vca_endpoint,
2555 endpoint_2=to_vca_endpoint)
2556 # remove entry from relations list
2557 ns_relations.remove(r)
2558 else:
2559 # check failed peers
2560 try:
2561 vca_status_list = db_nsr.get('configurationStatus')
2562 if vca_status_list:
2563 for i in range(len(vca_list)):
2564 vca = vca_list[i]
2565 vca_status = vca_status_list[i]
2566 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2567 if vca_status.get('status') == 'BROKEN':
2568 # peer broken: remove relation from list
2569 ns_relations.remove(r)
2570 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2571 if vca_status.get('status') == 'BROKEN':
2572 # peer broken: remove relation from list
2573 ns_relations.remove(r)
2574 except Exception:
2575 # ignore
2576 pass
2577
2578 # for each defined VNF relation, find the VCA's related
2579 for r in vnf_relations.copy():
2580 from_vca_ee_id = None
2581 to_vca_ee_id = None
2582 from_vca_endpoint = None
2583 to_vca_endpoint = None
2584 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2585 for vca in vca_list:
2586 key_to_check = "vdu_id"
2587 if vca.get("vdu_id") is None:
2588 key_to_check = "vnfd_id"
2589 if vca.get(key_to_check) == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
2590 from_vca_ee_id = vca.get('ee_id')
2591 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2592 if vca.get(key_to_check) == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
2593 to_vca_ee_id = vca.get('ee_id')
2594 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2595 if from_vca_ee_id and to_vca_ee_id:
2596 # add relation
2597 await self.vca_map[vca_type].add_relation(
2598 ee_id_1=from_vca_ee_id,
2599 ee_id_2=to_vca_ee_id,
2600 endpoint_1=from_vca_endpoint,
2601 endpoint_2=to_vca_endpoint)
2602 # remove entry from relations list
2603 vnf_relations.remove(r)
2604 else:
2605 # check failed peers
2606 try:
2607 vca_status_list = db_nsr.get('configurationStatus')
2608 if vca_status_list:
2609 for i in range(len(vca_list)):
2610 vca = vca_list[i]
2611 vca_status = vca_status_list[i]
2612 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2613 if vca_status.get('status') == 'BROKEN':
2614 # peer broken: remove relation from list
2615 vnf_relations.remove(r)
2616 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2617 if vca_status.get('status') == 'BROKEN':
2618 # peer broken: remove relation from list
2619 vnf_relations.remove(r)
2620 except Exception:
2621 # ignore
2622 pass
2623
2624 # wait for next try
2625 await asyncio.sleep(5.0)
2626
2627 if not ns_relations and not vnf_relations:
2628 self.logger.debug('Relations added')
2629 break
2630
2631 return True
2632
2633 except Exception as e:
2634 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2635 return False
2636
2637 async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict,
2638 vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600):
2639
2640 try:
2641 k8sclustertype = k8s_instance_info["k8scluster-type"]
2642 # Instantiate kdu
2643 db_dict_install = {"collection": "nsrs",
2644 "filter": {"_id": nsr_id},
2645 "path": nsr_db_path}
2646
2647 kdu_instance = await self.k8scluster_map[k8sclustertype].install(
2648 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2649 kdu_model=k8s_instance_info["kdu-model"],
2650 atomic=True,
2651 params=k8params,
2652 db_dict=db_dict_install,
2653 timeout=timeout,
2654 kdu_name=k8s_instance_info["kdu-name"],
2655 namespace=k8s_instance_info["namespace"])
2656 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
2657
2658 # Obtain services to obtain management service ip
2659 services = await self.k8scluster_map[k8sclustertype].get_services(
2660 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2661 kdu_instance=kdu_instance,
2662 namespace=k8s_instance_info["namespace"])
2663
2664 # Obtain management service info (if exists)
2665 vnfr_update_dict = {}
2666 if services:
2667 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
2668 mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")]
2669 for mgmt_service in mgmt_services:
2670 for service in services:
2671 if service["name"].startswith(mgmt_service["name"]):
2672 # Mgmt service found, Obtain service ip
2673 ip = service.get("external_ip", service.get("cluster_ip"))
2674 if isinstance(ip, list) and len(ip) == 1:
2675 ip = ip[0]
2676
2677 vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip
2678
2679 # Check if must update also mgmt ip at the vnf
2680 service_external_cp = mgmt_service.get("external-connection-point-ref")
2681 if service_external_cp:
2682 if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp:
2683 vnfr_update_dict["ip-address"] = ip
2684
2685 break
2686 else:
2687 self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"]))
2688
2689 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2690 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2691
2692 kdu_config = kdud.get("kdu-configuration")
2693 if kdu_config and kdu_config.get("initial-config-primitive") and kdu_config.get("juju") is None:
2694 initial_config_primitive_list = kdu_config.get("initial-config-primitive")
2695 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2696
2697 for initial_config_primitive in initial_config_primitive_list:
2698 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, {})
2699
2700 await asyncio.wait_for(
2701 self.k8scluster_map[k8sclustertype].exec_primitive(
2702 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2703 kdu_instance=kdu_instance,
2704 primitive_name=initial_config_primitive["name"],
2705 params=primitive_params_, db_dict={}),
2706 timeout=timeout)
2707
2708 except Exception as e:
2709 # Prepare update db with error and raise exception
2710 try:
2711 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)})
2712 self.update_db_2("vnfrs", vnfr_data.get("_id"), {"kdur.{}.status".format(kdu_index): "ERROR"})
2713 except Exception:
2714 # ignore to keep original exception
2715 pass
2716 # reraise original error
2717 raise
2718
2719 return kdu_instance
2720
2721 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
2722 # Launch kdus if present in the descriptor
2723
2724 k8scluster_id_2_uuic = {"helm-chart-v3": {}, "helm-chart": {}, "juju-bundle": {}}
2725
2726 async def _get_cluster_id(cluster_id, cluster_type):
2727 nonlocal k8scluster_id_2_uuic
2728 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2729 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2730
2731 # check if K8scluster is creating and wait look if previous tasks in process
2732 task_name, task_dependency = self.lcm_tasks.lookfor_related("k8scluster", cluster_id)
2733 if task_dependency:
2734 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(task_name, cluster_id)
2735 self.logger.debug(logging_text + text)
2736 await asyncio.wait(task_dependency, timeout=3600)
2737
2738 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2739 if not db_k8scluster:
2740 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
2741
2742 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2743 if not k8s_id:
2744 if cluster_type == "helm-chart-v3":
2745 try:
2746 # backward compatibility for existing clusters that have not been initialized for helm v3
2747 k8s_credentials = yaml.safe_dump(db_k8scluster.get("credentials"))
2748 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(k8s_credentials,
2749 reuse_cluster_uuid=cluster_id)
2750 db_k8scluster_update = {}
2751 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
2752 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
2753 db_k8scluster_update["_admin.helm-chart-v3.created"] = uninstall_sw
2754 db_k8scluster_update["_admin.helm-chart-v3.operationalState"] = "ENABLED"
2755 self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update)
2756 except Exception as e:
2757 self.logger.error(logging_text + "error initializing helm-v3 cluster: {}".format(str(e)))
2758 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id,
2759 cluster_type))
2760 else:
2761 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".
2762 format(cluster_id, cluster_type))
2763 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2764 return k8s_id
2765
2766 logging_text += "Deploy kdus: "
2767 step = ""
2768 try:
2769 db_nsr_update = {"_admin.deployed.K8s": []}
2770 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2771
2772 index = 0
2773 updated_cluster_list = []
2774 updated_v3_cluster_list = []
2775
2776 for vnfr_data in db_vnfrs.values():
2777 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
2778 # Step 0: Prepare and set parameters
2779 desc_params = self._format_additional_params(kdur.get("additionalParams"))
2780 vnfd_id = vnfr_data.get('vnfd-id')
2781 kdud = next(kdud for kdud in db_vnfds[vnfd_id]["kdu"] if kdud["name"] == kdur["kdu-name"])
2782 namespace = kdur.get("k8s-namespace")
2783 if kdur.get("helm-chart"):
2784 kdumodel = kdur["helm-chart"]
2785 # Default version: helm3, if helm-version is v2 assign v2
2786 k8sclustertype = "helm-chart-v3"
2787 self.logger.debug("kdur: {}".format(kdur))
2788 if kdur.get("helm-version") and kdur.get("helm-version") == "v2":
2789 k8sclustertype = "helm-chart"
2790 elif kdur.get("juju-bundle"):
2791 kdumodel = kdur["juju-bundle"]
2792 k8sclustertype = "juju-bundle"
2793 else:
2794 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2795 "juju-bundle. Maybe an old NBI version is running".
2796 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
2797 # check if kdumodel is a file and exists
2798 try:
2799 storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
2800 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2801 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
2802 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
2803 kdumodel)
2804 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2805 kdumodel = self.fs.path + filename
2806 except (asyncio.TimeoutError, asyncio.CancelledError):
2807 raise
2808 except Exception: # it is not a file
2809 pass
2810
2811 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2812 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
2813 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
2814
2815 # Synchronize repos
2816 if (k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list)\
2817 or (k8sclustertype == "helm-chart-v3" and cluster_uuid not in updated_v3_cluster_list):
2818 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2819 self.k8scluster_map[k8sclustertype].synchronize_repos(cluster_uuid=cluster_uuid))
2820 if del_repo_list or added_repo_dict:
2821 if k8sclustertype == "helm-chart":
2822 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2823 updated = {'_admin.helm_charts_added.' +
2824 item: name for item, name in added_repo_dict.items()}
2825 updated_cluster_list.append(cluster_uuid)
2826 elif k8sclustertype == "helm-chart-v3":
2827 unset = {'_admin.helm_charts_v3_added.' + item: None for item in del_repo_list}
2828 updated = {'_admin.helm_charts_v3_added.' +
2829 item: name for item, name in added_repo_dict.items()}
2830 updated_v3_cluster_list.append(cluster_uuid)
2831 self.logger.debug(logging_text + "repos synchronized on k8s cluster "
2832 "'{}' to_delete: {}, to_add: {}".
2833 format(k8s_cluster_id, del_repo_list, added_repo_dict))
2834 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2835
2836 # Instantiate kdu
2837 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2838 kdur["kdu-name"], k8s_cluster_id)
2839 k8s_instance_info = {"kdu-instance": None,
2840 "k8scluster-uuid": cluster_uuid,
2841 "k8scluster-type": k8sclustertype,
2842 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2843 "kdu-name": kdur["kdu-name"],
2844 "kdu-model": kdumodel,
2845 "namespace": namespace}
2846 db_path = "_admin.deployed.K8s.{}".format(index)
2847 db_nsr_update[db_path] = k8s_instance_info
2848 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2849
2850 task = asyncio.ensure_future(
2851 self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, db_vnfds[vnfd_id],
2852 k8s_instance_info, k8params=desc_params, timeout=600))
2853 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
2854 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
2855
2856 index += 1
2857
2858 except (LcmException, asyncio.CancelledError):
2859 raise
2860 except Exception as e:
2861 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2862 if isinstance(e, (N2VCException, DbException)):
2863 self.logger.error(logging_text + msg)
2864 else:
2865 self.logger.critical(logging_text + msg, exc_info=True)
2866 raise LcmException(msg)
2867 finally:
2868 if db_nsr_update:
2869 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2870
2871 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
2872 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
2873 base_folder, task_instantiation_info, stage):
2874 # launch instantiate_N2VC in a asyncio task and register task object
2875 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2876 # if not found, create one entry and update database
2877 # fill db_nsr._admin.deployed.VCA.<index>
2878
2879 self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
2880 if descriptor_config.get("juju"): # There is one execution envioronment of type juju
2881 ee_list = [descriptor_config]
2882 elif descriptor_config.get("execution-environment-list"):
2883 ee_list = descriptor_config.get("execution-environment-list")
2884 else: # other types as script are not supported
2885 ee_list = []
2886
2887 for ee_item in ee_list:
2888 self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
2889 ee_item.get("helm-chart")))
2890 ee_descriptor_id = ee_item.get("id")
2891 if ee_item.get("juju"):
2892 vca_name = ee_item['juju'].get('charm')
2893 vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
2894 if ee_item['juju'].get('cloud') == "k8s":
2895 vca_type = "k8s_proxy_charm"
2896 elif ee_item['juju'].get('proxy') is False:
2897 vca_type = "native_charm"
2898 elif ee_item.get("helm-chart"):
2899 vca_name = ee_item['helm-chart']
2900 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
2901 vca_type = "helm"
2902 else:
2903 vca_type = "helm-v3"
2904 else:
2905 self.logger.debug(logging_text + "skipping non juju neither charm configuration")
2906 continue
2907
2908 vca_index = -1
2909 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2910 if not vca_deployed:
2911 continue
2912 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2913 vca_deployed.get("vdu_id") == vdu_id and \
2914 vca_deployed.get("kdu_name") == kdu_name and \
2915 vca_deployed.get("vdu_count_index", 0) == vdu_index and \
2916 vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
2917 break
2918 else:
2919 # not found, create one.
2920 target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
2921 if vdu_id:
2922 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
2923 elif kdu_name:
2924 target += "/kdu/{}".format(kdu_name)
2925 vca_deployed = {
2926 "target_element": target,
2927 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
2928 "member-vnf-index": member_vnf_index,
2929 "vdu_id": vdu_id,
2930 "kdu_name": kdu_name,
2931 "vdu_count_index": vdu_index,
2932 "operational-status": "init", # TODO revise
2933 "detailed-status": "", # TODO revise
2934 "step": "initial-deploy", # TODO revise
2935 "vnfd_id": vnfd_id,
2936 "vdu_name": vdu_name,
2937 "type": vca_type,
2938 "ee_descriptor_id": ee_descriptor_id
2939 }
2940 vca_index += 1
2941
2942 # create VCA and configurationStatus in db
2943 db_dict = {
2944 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2945 "configurationStatus.{}".format(vca_index): dict()
2946 }
2947 self.update_db_2("nsrs", nsr_id, db_dict)
2948
2949 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2950
2951 # Launch task
2952 task_n2vc = asyncio.ensure_future(
2953 self.instantiate_N2VC(
2954 logging_text=logging_text,
2955 vca_index=vca_index,
2956 nsi_id=nsi_id,
2957 db_nsr=db_nsr,
2958 db_vnfr=db_vnfr,
2959 vdu_id=vdu_id,
2960 kdu_name=kdu_name,
2961 vdu_index=vdu_index,
2962 deploy_params=deploy_params,
2963 config_descriptor=descriptor_config,
2964 base_folder=base_folder,
2965 nslcmop_id=nslcmop_id,
2966 stage=stage,
2967 vca_type=vca_type,
2968 vca_name=vca_name,
2969 ee_config_descriptor=ee_item
2970 )
2971 )
2972 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
2973 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2974 member_vnf_index or "", vdu_id or "")
2975
2976 @staticmethod
2977 def _get_terminate_config_primitive(primitive_list, vca_deployed):
2978 """ Get a sorted terminate config primitive list. In case ee_descriptor_id is present at vca_deployed,
2979 it get only those primitives for this execution envirom"""
2980
2981 primitive_list = primitive_list or []
2982 # filter primitives by ee_descriptor_id
2983 ee_descriptor_id = vca_deployed.get("ee_descriptor_id")
2984 primitive_list = [p for p in primitive_list if p.get("execution-environment-ref") == ee_descriptor_id]
2985
2986 if primitive_list:
2987 primitive_list.sort(key=lambda val: int(val['seq']))
2988
2989 return primitive_list
2990
2991 @staticmethod
2992 def _create_nslcmop(nsr_id, operation, params):
2993 """
2994 Creates a ns-lcm-opp content to be stored at database.
2995 :param nsr_id: internal id of the instance
2996 :param operation: instantiate, terminate, scale, action, ...
2997 :param params: user parameters for the operation
2998 :return: dictionary following SOL005 format
2999 """
3000 # Raise exception if invalid arguments
3001 if not (nsr_id and operation and params):
3002 raise LcmException(
3003 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
3004 now = time()
3005 _id = str(uuid4())
3006 nslcmop = {
3007 "id": _id,
3008 "_id": _id,
3009 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
3010 "operationState": "PROCESSING",
3011 "statusEnteredTime": now,
3012 "nsInstanceId": nsr_id,
3013 "lcmOperationType": operation,
3014 "startTime": now,
3015 "isAutomaticInvocation": False,
3016 "operationParams": params,
3017 "isCancelPending": False,
3018 "links": {
3019 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
3020 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
3021 }
3022 }
3023 return nslcmop
3024
3025 def _format_additional_params(self, params):
3026 params = params or {}
3027 for key, value in params.items():
3028 if str(value).startswith("!!yaml "):
3029 params[key] = yaml.safe_load(value[7:])
3030 return params
3031
3032 def _get_terminate_primitive_params(self, seq, vnf_index):
3033 primitive = seq.get('name')
3034 primitive_params = {}
3035 params = {
3036 "member_vnf_index": vnf_index,
3037 "primitive": primitive,
3038 "primitive_params": primitive_params,
3039 }
3040 desc_params = {}
3041 return self._map_primitive_params(seq, params, desc_params)
3042
3043 # sub-operations
3044
3045 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
3046 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
3047 if op.get('operationState') == 'COMPLETED':
3048 # b. Skip sub-operation
3049 # _ns_execute_primitive() or RO.create_action() will NOT be executed
3050 return self.SUBOPERATION_STATUS_SKIP
3051 else:
3052 # c. retry executing sub-operation
3053 # The sub-operation exists, and operationState != 'COMPLETED'
3054 # Update operationState = 'PROCESSING' to indicate a retry.
3055 operationState = 'PROCESSING'
3056 detailed_status = 'In progress'
3057 self._update_suboperation_status(
3058 db_nslcmop, op_index, operationState, detailed_status)
3059 # Return the sub-operation index
3060 # _ns_execute_primitive() or RO.create_action() will be called from scale()
3061 # with arguments extracted from the sub-operation
3062 return op_index
3063
3064 # Find a sub-operation where all keys in a matching dictionary must match
3065 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
3066 def _find_suboperation(self, db_nslcmop, match):
3067 if db_nslcmop and match:
3068 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
3069 for i, op in enumerate(op_list):
3070 if all(op.get(k) == match[k] for k in match):
3071 return i
3072 return self.SUBOPERATION_STATUS_NOT_FOUND
3073
3074 # Update status for a sub-operation given its index
3075 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
3076 # Update DB for HA tasks
3077 q_filter = {'_id': db_nslcmop['_id']}
3078 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
3079 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
3080 self.db.set_one("nslcmops",
3081 q_filter=q_filter,
3082 update_dict=update_dict,
3083 fail_on_empty=False)
3084
3085 # Add sub-operation, return the index of the added sub-operation
3086 # Optionally, set operationState, detailed-status, and operationType
3087 # Status and type are currently set for 'scale' sub-operations:
3088 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
3089 # 'detailed-status' : status message
3090 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
3091 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
3092 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
3093 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
3094 RO_nsr_id=None, RO_scaling_info=None):
3095 if not db_nslcmop:
3096 return self.SUBOPERATION_STATUS_NOT_FOUND
3097 # Get the "_admin.operations" list, if it exists
3098 db_nslcmop_admin = db_nslcmop.get('_admin', {})
3099 op_list = db_nslcmop_admin.get('operations')
3100 # Create or append to the "_admin.operations" list
3101 new_op = {'member_vnf_index': vnf_index,
3102 'vdu_id': vdu_id,
3103 'vdu_count_index': vdu_count_index,
3104 'primitive': primitive,
3105 'primitive_params': mapped_primitive_params}
3106 if operationState:
3107 new_op['operationState'] = operationState
3108 if detailed_status:
3109 new_op['detailed-status'] = detailed_status
3110 if operationType:
3111 new_op['lcmOperationType'] = operationType
3112 if RO_nsr_id:
3113 new_op['RO_nsr_id'] = RO_nsr_id
3114 if RO_scaling_info:
3115 new_op['RO_scaling_info'] = RO_scaling_info
3116 if not op_list:
3117 # No existing operations, create key 'operations' with current operation as first list element
3118 db_nslcmop_admin.update({'operations': [new_op]})
3119 op_list = db_nslcmop_admin.get('operations')
3120 else:
3121 # Existing operations, append operation to list
3122 op_list.append(new_op)
3123
3124 db_nslcmop_update = {'_admin.operations': op_list}
3125 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
3126 op_index = len(op_list) - 1
3127 return op_index
3128
3129 # Helper methods for scale() sub-operations
3130
3131 # pre-scale/post-scale:
3132 # Check for 3 different cases:
3133 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
3134 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
3135 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
3136 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
3137 operationType, RO_nsr_id=None, RO_scaling_info=None):
3138 # Find this sub-operation
3139 if RO_nsr_id and RO_scaling_info:
3140 operationType = 'SCALE-RO'
3141 match = {
3142 'member_vnf_index': vnf_index,
3143 'RO_nsr_id': RO_nsr_id,
3144 'RO_scaling_info': RO_scaling_info,
3145 }
3146 else:
3147 match = {
3148 'member_vnf_index': vnf_index,
3149 'primitive': vnf_config_primitive,
3150 'primitive_params': primitive_params,
3151 'lcmOperationType': operationType
3152 }
3153 op_index = self._find_suboperation(db_nslcmop, match)
3154 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
3155 # a. New sub-operation
3156 # The sub-operation does not exist, add it.
3157 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
3158 # The following parameters are set to None for all kind of scaling:
3159 vdu_id = None
3160 vdu_count_index = None
3161 vdu_name = None
3162 if RO_nsr_id and RO_scaling_info:
3163 vnf_config_primitive = None
3164 primitive_params = None
3165 else:
3166 RO_nsr_id = None
3167 RO_scaling_info = None
3168 # Initial status for sub-operation
3169 operationState = 'PROCESSING'
3170 detailed_status = 'In progress'
3171 # Add sub-operation for pre/post-scaling (zero or more operations)
3172 self._add_suboperation(db_nslcmop,
3173 vnf_index,
3174 vdu_id,
3175 vdu_count_index,
3176 vdu_name,
3177 vnf_config_primitive,
3178 primitive_params,
3179 operationState,
3180 detailed_status,
3181 operationType,
3182 RO_nsr_id,
3183 RO_scaling_info)
3184 return self.SUBOPERATION_STATUS_NEW
3185 else:
3186 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
3187 # or op_index (operationState != 'COMPLETED')
3188 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
3189
3190 # Function to return execution_environment id
3191
3192 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
3193 # TODO vdu_index_count
3194 for vca in vca_deployed_list:
3195 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
3196 return vca["ee_id"]
3197
3198 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
3199 vca_index, destroy_ee=True, exec_primitives=True):
3200 """
3201 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
3202 :param logging_text:
3203 :param db_nslcmop:
3204 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
3205 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
3206 :param vca_index: index in the database _admin.deployed.VCA
3207 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
3208 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
3209 not executed properly
3210 :return: None or exception
3211 """
3212
3213 self.logger.debug(
3214 logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
3215 vca_index, vca_deployed, config_descriptor, destroy_ee
3216 )
3217 )
3218
3219 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
3220
3221 # execute terminate_primitives
3222 if exec_primitives:
3223 terminate_primitives = self._get_terminate_config_primitive(
3224 config_descriptor.get("terminate-config-primitive"), vca_deployed)
3225 vdu_id = vca_deployed.get("vdu_id")
3226 vdu_count_index = vca_deployed.get("vdu_count_index")
3227 vdu_name = vca_deployed.get("vdu_name")
3228 vnf_index = vca_deployed.get("member-vnf-index")
3229 if terminate_primitives and vca_deployed.get("needed_terminate"):
3230 for seq in terminate_primitives:
3231 # For each sequence in list, get primitive and call _ns_execute_primitive()
3232 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
3233 vnf_index, seq.get("name"))
3234 self.logger.debug(logging_text + step)
3235 # Create the primitive for each sequence, i.e. "primitive": "touch"
3236 primitive = seq.get('name')
3237 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
3238
3239 # Add sub-operation
3240 self._add_suboperation(db_nslcmop,
3241 vnf_index,
3242 vdu_id,
3243 vdu_count_index,
3244 vdu_name,
3245 primitive,
3246 mapped_primitive_params)
3247 # Sub-operations: Call _ns_execute_primitive() instead of action()
3248 try:
3249 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
3250 mapped_primitive_params,
3251 vca_type=vca_type)
3252 except LcmException:
3253 # this happens when VCA is not deployed. In this case it is not needed to terminate
3254 continue
3255 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
3256 if result not in result_ok:
3257 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
3258 "error {}".format(seq.get("name"), vnf_index, result_detail))
3259 # set that this VCA do not need terminated
3260 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
3261 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
3262
3263 if vca_deployed.get("prometheus_jobs") and self.prometheus:
3264 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
3265
3266 if destroy_ee:
3267 await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"])
3268
3269 async def _delete_all_N2VC(self, db_nsr: dict):
3270 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
3271 namespace = "." + db_nsr["_id"]
3272 try:
3273 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
3274 except N2VCNotFound: # already deleted. Skip
3275 pass
3276 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
3277
3278 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
3279 """
3280 Terminates a deployment from RO
3281 :param logging_text:
3282 :param nsr_deployed: db_nsr._admin.deployed
3283 :param nsr_id:
3284 :param nslcmop_id:
3285 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
3286 this method will update only the index 2, but it will write on database the concatenated content of the list
3287 :return:
3288 """
3289 db_nsr_update = {}
3290 failed_detail = []
3291 ro_nsr_id = ro_delete_action = None
3292 if nsr_deployed and nsr_deployed.get("RO"):
3293 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
3294 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
3295 try:
3296 if ro_nsr_id:
3297 stage[2] = "Deleting ns from VIM."
3298 db_nsr_update["detailed-status"] = " ".join(stage)
3299 self._write_op_status(nslcmop_id, stage)
3300 self.logger.debug(logging_text + stage[2])
3301 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3302 self._write_op_status(nslcmop_id, stage)
3303 desc = await self.RO.delete("ns", ro_nsr_id)
3304 ro_delete_action = desc["action_id"]
3305 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
3306 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3307 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3308 if ro_delete_action:
3309 # wait until NS is deleted from VIM
3310 stage[2] = "Waiting ns deleted from VIM."
3311 detailed_status_old = None
3312 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
3313 ro_delete_action))
3314 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3315 self._write_op_status(nslcmop_id, stage)
3316
3317 delete_timeout = 20 * 60 # 20 minutes
3318 while delete_timeout > 0:
3319 desc = await self.RO.show(
3320 "ns",
3321 item_id_name=ro_nsr_id,
3322 extra_item="action",
3323 extra_item_id=ro_delete_action)
3324
3325 # deploymentStatus
3326 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3327
3328 ns_status, ns_status_info = self.RO.check_action_status(desc)
3329 if ns_status == "ERROR":
3330 raise ROclient.ROClientException(ns_status_info)
3331 elif ns_status == "BUILD":
3332 stage[2] = "Deleting from VIM {}".format(ns_status_info)
3333 elif ns_status == "ACTIVE":
3334 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3335 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3336 break
3337 else:
3338 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
3339 if stage[2] != detailed_status_old:
3340 detailed_status_old = stage[2]
3341 db_nsr_update["detailed-status"] = " ".join(stage)
3342 self._write_op_status(nslcmop_id, stage)
3343 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3344 await asyncio.sleep(5, loop=self.loop)
3345 delete_timeout -= 5
3346 else: # delete_timeout <= 0:
3347 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
3348
3349 except Exception as e:
3350 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3351 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3352 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
3353 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
3354 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
3355 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
3356 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3357 failed_detail.append("delete conflict: {}".format(e))
3358 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
3359 else:
3360 failed_detail.append("delete error: {}".format(e))
3361 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
3362
3363 # Delete nsd
3364 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
3365 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
3366 try:
3367 stage[2] = "Deleting nsd from RO."
3368 db_nsr_update["detailed-status"] = " ".join(stage)
3369 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3370 self._write_op_status(nslcmop_id, stage)
3371 await self.RO.delete("nsd", ro_nsd_id)
3372 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
3373 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3374 except Exception as e:
3375 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3376 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
3377 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
3378 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3379 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
3380 self.logger.debug(logging_text + failed_detail[-1])
3381 else:
3382 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
3383 self.logger.error(logging_text + failed_detail[-1])
3384
3385 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
3386 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
3387 if not vnf_deployed or not vnf_deployed["id"]:
3388 continue
3389 try:
3390 ro_vnfd_id = vnf_deployed["id"]
3391 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
3392 vnf_deployed["member-vnf-index"], ro_vnfd_id)
3393 db_nsr_update["detailed-status"] = " ".join(stage)
3394 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3395 self._write_op_status(nslcmop_id, stage)
3396 await self.RO.delete("vnfd", ro_vnfd_id)
3397 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
3398 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3399 except Exception as e:
3400 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
3401 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
3402 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
3403 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
3404 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
3405 self.logger.debug(logging_text + failed_detail[-1])
3406 else:
3407 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
3408 self.logger.error(logging_text + failed_detail[-1])
3409
3410 if failed_detail:
3411 stage[2] = "Error deleting from VIM"
3412 else:
3413 stage[2] = "Deleted from VIM"
3414 db_nsr_update["detailed-status"] = " ".join(stage)
3415 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3416 self._write_op_status(nslcmop_id, stage)
3417
3418 if failed_detail:
3419 raise LcmException("; ".join(failed_detail))
3420
3421 async def terminate(self, nsr_id, nslcmop_id):
3422 # Try to lock HA task here
3423 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3424 if not task_is_locked_by_me:
3425 return
3426
3427 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
3428 self.logger.debug(logging_text + "Enter")
3429 timeout_ns_terminate = self.timeout_ns_terminate
3430 db_nsr = None
3431 db_nslcmop = None
3432 operation_params = None
3433 exc = None
3434 error_list = [] # annotates all failed error messages
3435 db_nslcmop_update = {}
3436 autoremove = False # autoremove after terminated
3437 tasks_dict_info = {}
3438 db_nsr_update = {}
3439 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
3440 # ^ contains [stage, step, VIM-status]
3441 try:
3442 # wait for any previous tasks in process
3443 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
3444
3445 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
3446 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3447 operation_params = db_nslcmop.get("operationParams") or {}
3448 if operation_params.get("timeout_ns_terminate"):
3449 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
3450 stage[1] = "Getting nsr={} from db.".format(nsr_id)
3451 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3452
3453 db_nsr_update["operational-status"] = "terminating"
3454 db_nsr_update["config-status"] = "terminating"
3455 self._write_ns_status(
3456 nsr_id=nsr_id,
3457 ns_state="TERMINATING",
3458 current_operation="TERMINATING",
3459 current_operation_id=nslcmop_id,
3460 other_update=db_nsr_update
3461 )
3462 self._write_op_status(
3463 op_id=nslcmop_id,
3464 queuePosition=0,
3465 stage=stage
3466 )
3467 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
3468 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
3469 return
3470
3471 stage[1] = "Getting vnf descriptors from db."
3472 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
3473 db_vnfds_from_id = {}
3474 db_vnfds_from_member_index = {}
3475 # Loop over VNFRs
3476 for vnfr in db_vnfrs_list:
3477 vnfd_id = vnfr["vnfd-id"]
3478 if vnfd_id not in db_vnfds_from_id:
3479 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3480 db_vnfds_from_id[vnfd_id] = vnfd
3481 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
3482
3483 # Destroy individual execution environments when there are terminating primitives.
3484 # Rest of EE will be deleted at once
3485 # TODO - check before calling _destroy_N2VC
3486 # if not operation_params.get("skip_terminate_primitives"):#
3487 # or not vca.get("needed_terminate"):
3488 stage[0] = "Stage 2/3 execute terminating primitives."
3489 self.logger.debug(logging_text + stage[0])
3490 stage[1] = "Looking execution environment that needs terminate."
3491 self.logger.debug(logging_text + stage[1])
3492 # self.logger.debug("nsr_deployed: {}".format(nsr_deployed))
3493 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
3494 config_descriptor = None
3495 if not vca or not vca.get("ee_id"):
3496 continue
3497 if not vca.get("member-vnf-index"):
3498 # ns
3499 config_descriptor = db_nsr.get("ns-configuration")
3500 elif vca.get("vdu_id"):
3501 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3502 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
3503 if vdud:
3504 config_descriptor = vdud.get("vdu-configuration")
3505 elif vca.get("kdu_name"):
3506 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3507 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
3508 if kdud:
3509 config_descriptor = kdud.get("kdu-configuration")
3510 else:
3511 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
3512 vca_type = vca.get("type")
3513 exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
3514 vca.get("needed_terminate"))
3515 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
3516 # pending native charms
3517 destroy_ee = True if vca_type in ("helm", "helm-v3", "native_charm") else False
3518 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
3519 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
3520 task = asyncio.ensure_future(
3521 self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
3522 destroy_ee, exec_terminate_primitives))
3523 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
3524
3525 # wait for pending tasks of terminate primitives
3526 if tasks_dict_info:
3527 self.logger.debug(logging_text + 'Waiting for tasks {}'.format(list(tasks_dict_info.keys())))
3528 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3529 min(self.timeout_charm_delete, timeout_ns_terminate),
3530 stage, nslcmop_id)
3531 tasks_dict_info.clear()
3532 if error_list:
3533 return # raise LcmException("; ".join(error_list))
3534
3535 # remove All execution environments at once
3536 stage[0] = "Stage 3/3 delete all."
3537
3538 if nsr_deployed.get("VCA"):
3539 stage[1] = "Deleting all execution environments."
3540 self.logger.debug(logging_text + stage[1])
3541 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3542 timeout=self.timeout_charm_delete))
3543 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3544 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
3545
3546 # Delete from k8scluster
3547 stage[1] = "Deleting KDUs."
3548 self.logger.debug(logging_text + stage[1])
3549 # print(nsr_deployed)
3550 for kdu in get_iterable(nsr_deployed, "K8s"):
3551 if not kdu or not kdu.get("kdu-instance"):
3552 continue
3553 kdu_instance = kdu.get("kdu-instance")
3554 if kdu.get("k8scluster-type") in self.k8scluster_map:
3555 task_delete_kdu_instance = asyncio.ensure_future(
3556 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3557 cluster_uuid=kdu.get("k8scluster-uuid"),
3558 kdu_instance=kdu_instance))
3559 else:
3560 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3561 format(kdu.get("k8scluster-type")))
3562 continue
3563 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
3564
3565 # remove from RO
3566 stage[1] = "Deleting ns from VIM."
3567 if self.ng_ro:
3568 task_delete_ro = asyncio.ensure_future(
3569 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3570 else:
3571 task_delete_ro = asyncio.ensure_future(
3572 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3573 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
3574
3575 # rest of staff will be done at finally
3576
3577 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3578 self.logger.error(logging_text + "Exit Exception {}".format(e))
3579 exc = e
3580 except asyncio.CancelledError:
3581 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3582 exc = "Operation was cancelled"
3583 except Exception as e:
3584 exc = traceback.format_exc()
3585 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3586 finally:
3587 if exc:
3588 error_list.append(str(exc))
3589 try:
3590 # wait for pending tasks
3591 if tasks_dict_info:
3592 stage[1] = "Waiting for terminate pending tasks."
3593 self.logger.debug(logging_text + stage[1])
3594 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3595 stage, nslcmop_id)
3596 stage[1] = stage[2] = ""
3597 except asyncio.CancelledError:
3598 error_list.append("Cancelled")
3599 # TODO cancell all tasks
3600 except Exception as exc:
3601 error_list.append(str(exc))
3602 # update status at database
3603 if error_list:
3604 error_detail = "; ".join(error_list)
3605 # self.logger.error(logging_text + error_detail)
3606 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
3607 error_description_nsr = 'Operation: TERMINATING.{}, {}.'.format(nslcmop_id, stage[0])
3608
3609 db_nsr_update["operational-status"] = "failed"
3610 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
3611 db_nslcmop_update["detailed-status"] = error_detail
3612 nslcmop_operation_state = "FAILED"
3613 ns_state = "BROKEN"
3614 else:
3615 error_detail = None
3616 error_description_nsr = error_description_nslcmop = None
3617 ns_state = "NOT_INSTANTIATED"
3618 db_nsr_update["operational-status"] = "terminated"
3619 db_nsr_update["detailed-status"] = "Done"
3620 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3621 db_nslcmop_update["detailed-status"] = "Done"
3622 nslcmop_operation_state = "COMPLETED"
3623
3624 if db_nsr:
3625 self._write_ns_status(
3626 nsr_id=nsr_id,
3627 ns_state=ns_state,
3628 current_operation="IDLE",
3629 current_operation_id=None,
3630 error_description=error_description_nsr,
3631 error_detail=error_detail,
3632 other_update=db_nsr_update
3633 )
3634 self._write_op_status(
3635 op_id=nslcmop_id,
3636 stage="",
3637 error_message=error_description_nslcmop,
3638 operation_state=nslcmop_operation_state,
3639 other_update=db_nslcmop_update,
3640 )
3641 if ns_state == "NOT_INSTANTIATED":
3642 try:
3643 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "NOT_INSTANTIATED"})
3644 except DbException as e:
3645 self.logger.warn(logging_text + 'Error writing VNFR status for nsr-id-ref: {} -> {}'.
3646 format(nsr_id, e))
3647 if operation_params:
3648 autoremove = operation_params.get("autoremove", False)
3649 if nslcmop_operation_state:
3650 try:
3651 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
3652 "operationState": nslcmop_operation_state,
3653 "autoremove": autoremove},
3654 loop=self.loop)
3655 except Exception as e:
3656 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3657
3658 self.logger.debug(logging_text + "Exit")
3659 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3660
3661 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3662 time_start = time()
3663 error_detail_list = []
3664 error_list = []
3665 pending_tasks = list(created_tasks_info.keys())
3666 num_tasks = len(pending_tasks)
3667 num_done = 0
3668 stage[1] = "{}/{}.".format(num_done, num_tasks)
3669 self._write_op_status(nslcmop_id, stage)
3670 while pending_tasks:
3671 new_error = None
3672 _timeout = timeout + time_start - time()
3673 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3674 return_when=asyncio.FIRST_COMPLETED)
3675 num_done += len(done)
3676 if not done: # Timeout
3677 for task in pending_tasks:
3678 new_error = created_tasks_info[task] + ": Timeout"
3679 error_detail_list.append(new_error)
3680 error_list.append(new_error)
3681 break
3682 for task in done:
3683 if task.cancelled():
3684 exc = "Cancelled"
3685 else:
3686 exc = task.exception()
3687 if exc:
3688 if isinstance(exc, asyncio.TimeoutError):
3689 exc = "Timeout"
3690 new_error = created_tasks_info[task] + ": {}".format(exc)
3691 error_list.append(created_tasks_info[task])
3692 error_detail_list.append(new_error)
3693 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
3694 K8sException, NgRoException)):
3695 self.logger.error(logging_text + new_error)
3696 else:
3697 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
3698 self.logger.error(logging_text + created_tasks_info[task] + " " + exc_traceback)
3699 else:
3700 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
3701 stage[1] = "{}/{}.".format(num_done, num_tasks)
3702 if new_error:
3703 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
3704 if nsr_id: # update also nsr
3705 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3706 "errorDetail": ". ".join(error_detail_list)})
3707 self._write_op_status(nslcmop_id, stage)
3708 return error_detail_list
3709
3710 @staticmethod
3711 def _map_primitive_params(primitive_desc, params, instantiation_params):
3712 """
3713 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3714 The default-value is used. If it is between < > it look for a value at instantiation_params
3715 :param primitive_desc: portion of VNFD/NSD that describes primitive
3716 :param params: Params provided by user
3717 :param instantiation_params: Instantiation params provided by user
3718 :return: a dictionary with the calculated params
3719 """
3720 calculated_params = {}
3721 for parameter in primitive_desc.get("parameter", ()):
3722 param_name = parameter["name"]
3723 if param_name in params:
3724 calculated_params[param_name] = params[param_name]
3725 elif "default-value" in parameter or "value" in parameter:
3726 if "value" in parameter:
3727 calculated_params[param_name] = parameter["value"]
3728 else:
3729 calculated_params[param_name] = parameter["default-value"]
3730 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3731 and calculated_params[param_name].endswith(">"):
3732 if calculated_params[param_name][1:-1] in instantiation_params:
3733 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
3734 else:
3735 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3736 format(calculated_params[param_name], primitive_desc["name"]))
3737 else:
3738 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3739 format(param_name, primitive_desc["name"]))
3740
3741 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3742 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
3743 width=256)
3744 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3745 calculated_params[param_name] = calculated_params[param_name][7:]
3746 if parameter.get("data-type") == "INTEGER":
3747 try:
3748 calculated_params[param_name] = int(calculated_params[param_name])
3749 except ValueError: # error converting string to int
3750 raise LcmException(
3751 "Parameter {} of primitive {} must be integer".format(param_name, primitive_desc["name"]))
3752 elif parameter.get("data-type") == "BOOLEAN":
3753 calculated_params[param_name] = not ((str(calculated_params[param_name])).lower() == 'false')
3754
3755 # add always ns_config_info if primitive name is config
3756 if primitive_desc["name"] == "config":
3757 if "ns_config_info" in instantiation_params:
3758 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
3759 return calculated_params
3760
3761 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None,
3762 ee_descriptor_id=None):
3763 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3764 for vca in deployed_vca:
3765 if not vca:
3766 continue
3767 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3768 continue
3769 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3770 continue
3771 if kdu_name and kdu_name != vca["kdu_name"]:
3772 continue
3773 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
3774 continue
3775 break
3776 else:
3777 # vca_deployed not found
3778 raise LcmException("charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
3779 " is not deployed".format(member_vnf_index, vdu_id, vdu_count_index, kdu_name,
3780 ee_descriptor_id))
3781
3782 # get ee_id
3783 ee_id = vca.get("ee_id")
3784 vca_type = vca.get("type", "lxc_proxy_charm") # default value for backward compatibility - proxy charm
3785 if not ee_id:
3786 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
3787 "execution environment"
3788 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
3789 return ee_id, vca_type
3790
3791 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0,
3792 retries_interval=30, timeout=None,
3793 vca_type=None, db_dict=None) -> (str, str):
3794 try:
3795 if primitive == "config":
3796 primitive_params = {"params": primitive_params}
3797
3798 vca_type = vca_type or "lxc_proxy_charm"
3799
3800 while retries >= 0:
3801 try:
3802 output = await asyncio.wait_for(
3803 self.vca_map[vca_type].exec_primitive(
3804 ee_id=ee_id,
3805 primitive_name=primitive,
3806 params_dict=primitive_params,
3807 progress_timeout=self.timeout_progress_primitive,
3808 total_timeout=self.timeout_primitive,
3809 db_dict=db_dict),
3810 timeout=timeout or self.timeout_primitive)
3811 # execution was OK
3812 break
3813 except asyncio.CancelledError:
3814 raise
3815 except Exception as e: # asyncio.TimeoutError
3816 if isinstance(e, asyncio.TimeoutError):
3817 e = "Timeout"
3818 retries -= 1
3819 if retries >= 0:
3820 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
3821 # wait and retry
3822 await asyncio.sleep(retries_interval, loop=self.loop)
3823 else:
3824 return 'FAILED', str(e)
3825
3826 return 'COMPLETED', output
3827
3828 except (LcmException, asyncio.CancelledError):
3829 raise
3830 except Exception as e:
3831 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
3832
3833 async def action(self, nsr_id, nslcmop_id):
3834
3835 # Try to lock HA task here
3836 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3837 if not task_is_locked_by_me:
3838 return
3839
3840 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3841 self.logger.debug(logging_text + "Enter")
3842 # get all needed from database
3843 db_nsr = None
3844 db_nslcmop = None
3845 db_nsr_update = {}
3846 db_nslcmop_update = {}
3847 nslcmop_operation_state = None
3848 error_description_nslcmop = None
3849 exc = None
3850 try:
3851 # wait for any previous tasks in process
3852 step = "Waiting for previous operations to terminate"
3853 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3854
3855 self._write_ns_status(
3856 nsr_id=nsr_id,
3857 ns_state=None,
3858 current_operation="RUNNING ACTION",
3859 current_operation_id=nslcmop_id
3860 )
3861
3862 step = "Getting information from database"
3863 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3864 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3865
3866 nsr_deployed = db_nsr["_admin"].get("deployed")
3867 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
3868 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3869 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
3870 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3871 primitive = db_nslcmop["operationParams"]["primitive"]
3872 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3873 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
3874
3875 if vnf_index:
3876 step = "Getting vnfr from database"
3877 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3878 step = "Getting vnfd from database"
3879 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3880 else:
3881 step = "Getting nsd from database"
3882 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
3883
3884 # for backward compatibility
3885 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3886 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3887 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3888 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3889
3890 # look for primitive
3891 config_primitive_desc = descriptor_configuration = None
3892 if vdu_id:
3893 for vdu in get_iterable(db_vnfd, "vdu"):
3894 if vdu_id == vdu["id"]:
3895 descriptor_configuration = vdu.get("vdu-configuration")
3896 break
3897 elif kdu_name:
3898 for kdu in get_iterable(db_vnfd, "kdu"):
3899 if kdu_name == kdu["name"]:
3900 descriptor_configuration = kdu.get("kdu-configuration")
3901 break
3902 elif vnf_index:
3903 descriptor_configuration = db_vnfd.get("vnf-configuration")
3904 else:
3905 descriptor_configuration = db_nsd.get("ns-configuration")
3906
3907 if descriptor_configuration and descriptor_configuration.get("config-primitive"):
3908 for config_primitive in descriptor_configuration["config-primitive"]:
3909 if config_primitive["name"] == primitive:
3910 config_primitive_desc = config_primitive
3911 break
3912
3913 if not config_primitive_desc:
3914 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
3915 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3916 format(primitive))
3917 primitive_name = primitive
3918 ee_descriptor_id = None
3919 else:
3920 primitive_name = config_primitive_desc.get("execution-environment-primitive", primitive)
3921 ee_descriptor_id = config_primitive_desc.get("execution-environment-ref")
3922
3923 if vnf_index:
3924 if vdu_id:
3925 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
3926 desc_params = self._format_additional_params(vdur.get("additionalParams"))
3927 elif kdu_name:
3928 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3929 desc_params = self._format_additional_params(kdur.get("additionalParams"))
3930 else:
3931 desc_params = self._format_additional_params(db_vnfr.get("additionalParamsForVnf"))
3932 else:
3933 desc_params = self._format_additional_params(db_nsr.get("additionalParamsForNs"))
3934
3935 if kdu_name:
3936 kdu_action = True if not deep_get(kdu, ("kdu-configuration", "juju")) else False
3937
3938 # TODO check if ns is in a proper status
3939 if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action):
3940 # kdur and desc_params already set from before
3941 if primitive_params:
3942 desc_params.update(primitive_params)
3943 # TODO Check if we will need something at vnf level
3944 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3945 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3946 break
3947 else:
3948 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
3949
3950 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3951 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3952 raise LcmException(msg)
3953
3954 db_dict = {"collection": "nsrs",
3955 "filter": {"_id": nsr_id},
3956 "path": "_admin.deployed.K8s.{}".format(index)}
3957 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name))
3958 step = "Executing kdu {}".format(primitive_name)
3959 if primitive_name == "upgrade":
3960 if desc_params.get("kdu_model"):
3961 kdu_model = desc_params.get("kdu_model")
3962 del desc_params["kdu_model"]
3963 else:
3964 kdu_model = kdu.get("kdu-model")
3965 parts = kdu_model.split(sep=":")
3966 if len(parts) == 2:
3967 kdu_model = parts[0]
3968
3969 detailed_status = await asyncio.wait_for(
3970 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3971 cluster_uuid=kdu.get("k8scluster-uuid"),
3972 kdu_instance=kdu.get("kdu-instance"),
3973 atomic=True, kdu_model=kdu_model,
3974 params=desc_params, db_dict=db_dict,
3975 timeout=timeout_ns_action),
3976 timeout=timeout_ns_action + 10)
3977 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
3978 elif primitive_name == "rollback":
3979 detailed_status = await asyncio.wait_for(
3980 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3981 cluster_uuid=kdu.get("k8scluster-uuid"),
3982 kdu_instance=kdu.get("kdu-instance"),
3983 db_dict=db_dict),
3984 timeout=timeout_ns_action)
3985 elif primitive_name == "status":
3986 detailed_status = await asyncio.wait_for(
3987 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3988 cluster_uuid=kdu.get("k8scluster-uuid"),
3989 kdu_instance=kdu.get("kdu-instance")),
3990 timeout=timeout_ns_action)
3991 else:
3992 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3993 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3994
3995 detailed_status = await asyncio.wait_for(
3996 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3997 cluster_uuid=kdu.get("k8scluster-uuid"),
3998 kdu_instance=kdu_instance,
3999 primitive_name=primitive_name,
4000 params=params, db_dict=db_dict,
4001 timeout=timeout_ns_action),
4002 timeout=timeout_ns_action)
4003
4004 if detailed_status:
4005 nslcmop_operation_state = 'COMPLETED'
4006 else:
4007 detailed_status = ''
4008 nslcmop_operation_state = 'FAILED'
4009 else:
4010 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4011 member_vnf_index=vnf_index,
4012 vdu_id=vdu_id,
4013 vdu_count_index=vdu_count_index,
4014 ee_descriptor_id=ee_descriptor_id)
4015 db_nslcmop_notif = {"collection": "nslcmops",
4016 "filter": {"_id": nslcmop_id},
4017 "path": "admin.VCA"}
4018 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
4019 ee_id,
4020 primitive=primitive_name,
4021 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
4022 timeout=timeout_ns_action,
4023 vca_type=vca_type,
4024 db_dict=db_nslcmop_notif)
4025
4026 db_nslcmop_update["detailed-status"] = detailed_status
4027 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
4028 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
4029 detailed_status))
4030 return # database update is called inside finally
4031
4032 except (DbException, LcmException, N2VCException, K8sException) as e:
4033 self.logger.error(logging_text + "Exit Exception {}".format(e))
4034 exc = e
4035 except asyncio.CancelledError:
4036 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4037 exc = "Operation was cancelled"
4038 except asyncio.TimeoutError:
4039 self.logger.error(logging_text + "Timeout while '{}'".format(step))
4040 exc = "Timeout"
4041 except Exception as e:
4042 exc = traceback.format_exc()
4043 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4044 finally:
4045 if exc:
4046 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
4047 "FAILED {}: {}".format(step, exc)
4048 nslcmop_operation_state = "FAILED"
4049 if db_nsr:
4050 self._write_ns_status(
4051 nsr_id=nsr_id,
4052 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
4053 current_operation="IDLE",
4054 current_operation_id=None,
4055 # error_description=error_description_nsr,
4056 # error_detail=error_detail,
4057 other_update=db_nsr_update
4058 )
4059
4060 self._write_op_status(
4061 op_id=nslcmop_id,
4062 stage="",
4063 error_message=error_description_nslcmop,
4064 operation_state=nslcmop_operation_state,
4065 other_update=db_nslcmop_update,
4066 )
4067
4068 if nslcmop_operation_state:
4069 try:
4070 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
4071 "operationState": nslcmop_operation_state},
4072 loop=self.loop)
4073 except Exception as e:
4074 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4075 self.logger.debug(logging_text + "Exit")
4076 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
4077 return nslcmop_operation_state, detailed_status
4078
4079 async def scale(self, nsr_id, nslcmop_id):
4080
4081 # Try to lock HA task here
4082 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
4083 if not task_is_locked_by_me:
4084 return
4085
4086 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
4087 stage = ['', '', '']
4088 # ^ stage, step, VIM progress
4089 self.logger.debug(logging_text + "Enter")
4090 # get all needed from database
4091 db_nsr = None
4092 db_nslcmop = None
4093 db_nslcmop_update = {}
4094 nslcmop_operation_state = None
4095 db_nsr_update = {}
4096 exc = None
4097 # in case of error, indicates what part of scale was failed to put nsr at error status
4098 scale_process = None
4099 old_operational_status = ""
4100 old_config_status = ""
4101 try:
4102 # wait for any previous tasks in process
4103 step = "Waiting for previous operations to terminate"
4104 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
4105
4106 self._write_ns_status(
4107 nsr_id=nsr_id,
4108 ns_state=None,
4109 current_operation="SCALING",
4110 current_operation_id=nslcmop_id
4111 )
4112
4113 step = "Getting nslcmop from database"
4114 self.logger.debug(step + " after having waited for previous tasks to be completed")
4115 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
4116 step = "Getting nsr from database"
4117 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
4118
4119 old_operational_status = db_nsr["operational-status"]
4120 old_config_status = db_nsr["config-status"]
4121 step = "Parsing scaling parameters"
4122 # self.logger.debug(step)
4123 db_nsr_update["operational-status"] = "scaling"
4124 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4125 nsr_deployed = db_nsr["_admin"].get("deployed")
4126
4127 #######
4128 nsr_deployed = db_nsr["_admin"].get("deployed")
4129 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
4130 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
4131 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
4132 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
4133 #######
4134
4135 RO_nsr_id = nsr_deployed["RO"].get("nsr_id")
4136 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
4137 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
4138 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
4139 # scaling_policy = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"].get("scaling-policy")
4140
4141 # for backward compatibility
4142 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
4143 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
4144 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
4145 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4146
4147 step = "Getting vnfr from database"
4148 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
4149 step = "Getting vnfd from database"
4150 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
4151
4152 step = "Getting scaling-group-descriptor"
4153 for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
4154 if scaling_descriptor["name"] == scaling_group:
4155 break
4156 else:
4157 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
4158 "at vnfd:scaling-group-descriptor".format(scaling_group))
4159
4160 # cooldown_time = 0
4161 # for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
4162 # cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
4163 # if scaling_policy and scaling_policy == scaling_policy_descriptor.get("name"):
4164 # break
4165
4166 # TODO check if ns is in a proper status
4167 step = "Sending scale order to VIM"
4168 nb_scale_op = 0
4169 if not db_nsr["_admin"].get("scaling-group"):
4170 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
4171 admin_scale_index = 0
4172 else:
4173 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
4174 if admin_scale_info["name"] == scaling_group:
4175 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
4176 break
4177 else: # not found, set index one plus last element and add new entry with the name
4178 admin_scale_index += 1
4179 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
4180 RO_scaling_info = []
4181 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
4182 if scaling_type == "SCALE_OUT":
4183 # count if max-instance-count is reached
4184 max_instance_count = scaling_descriptor.get("max-instance-count", 10)
4185 # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
4186 if nb_scale_op >= max_instance_count:
4187 raise LcmException("reached the limit of {} (max-instance-count) "
4188 "scaling-out operations for the "
4189 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
4190
4191 nb_scale_op += 1
4192 vdu_scaling_info["scaling_direction"] = "OUT"
4193 vdu_scaling_info["vdu-create"] = {}
4194 for vdu_scale_info in scaling_descriptor["vdu"]:
4195 vdud = next(vdu for vdu in db_vnfd.get("vdu") if vdu["id"] == vdu_scale_info["vdu-id-ref"])
4196 vdu_index = len([x for x in db_vnfr.get("vdur", ())
4197 if x.get("vdu-id-ref") == vdu_scale_info["vdu-id-ref"] and
4198 x.get("member-vnf-index-ref") == vnf_index])
4199 cloud_init_text = self._get_cloud_init(vdud, db_vnfd)
4200 if cloud_init_text:
4201 additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {}
4202 cloud_init_list = []
4203 for x in range(vdu_scale_info.get("count", 1)):
4204 if cloud_init_text:
4205 # TODO Information of its own ip is not available because db_vnfr is not updated.
4206 additional_params["OSM"] = self._get_osm_params(db_vnfr, vdu_scale_info["vdu-id-ref"],
4207 vdu_index + x)
4208 cloud_init_list.append(self._parse_cloud_init(cloud_init_text, additional_params,
4209 db_vnfd["id"], vdud["id"]))
4210 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
4211 "type": "create", "count": vdu_scale_info.get("count", 1)})
4212 if cloud_init_list:
4213 RO_scaling_info[-1]["cloud_init"] = cloud_init_list
4214 vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
4215
4216 elif scaling_type == "SCALE_IN":
4217 # count if min-instance-count is reached
4218 min_instance_count = 0
4219 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
4220 min_instance_count = int(scaling_descriptor["min-instance-count"])
4221 if nb_scale_op <= min_instance_count:
4222 raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
4223 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
4224 nb_scale_op -= 1
4225 vdu_scaling_info["scaling_direction"] = "IN"
4226 vdu_scaling_info["vdu-delete"] = {}
4227 for vdu_scale_info in scaling_descriptor["vdu"]:
4228 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
4229 "type": "delete", "count": vdu_scale_info.get("count", 1)})
4230 vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
4231
4232 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
4233 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
4234 if vdu_scaling_info["scaling_direction"] == "IN":
4235 for vdur in reversed(db_vnfr["vdur"]):
4236 if vdu_delete.get(vdur["vdu-id-ref"]):
4237 vdu_delete[vdur["vdu-id-ref"]] -= 1
4238 vdu_scaling_info["vdu"].append({
4239 "name": vdur.get("name") or vdur.get("vdu-name"),
4240 "vdu_id": vdur["vdu-id-ref"],
4241 "interface": []
4242 })
4243 for interface in vdur["interfaces"]:
4244 vdu_scaling_info["vdu"][-1]["interface"].append({
4245 "name": interface["name"],
4246 "ip_address": interface["ip-address"],
4247 "mac_address": interface.get("mac-address"),
4248 })
4249 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
4250
4251 # PRE-SCALE BEGIN
4252 step = "Executing pre-scale vnf-config-primitive"
4253 if scaling_descriptor.get("scaling-config-action"):
4254 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
4255 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
4256 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
4257 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4258 step = db_nslcmop_update["detailed-status"] = \
4259 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
4260
4261 # look for primitive
4262 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4263 if config_primitive["name"] == vnf_config_primitive:
4264 break
4265 else:
4266 raise LcmException(
4267 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
4268 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
4269 "primitive".format(scaling_group, vnf_config_primitive))
4270
4271 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
4272 if db_vnfr.get("additionalParamsForVnf"):
4273 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
4274
4275 scale_process = "VCA"
4276 db_nsr_update["config-status"] = "configuring pre-scaling"
4277 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
4278
4279 # Pre-scale retry check: Check if this sub-operation has been executed before
4280 op_index = self._check_or_add_scale_suboperation(
4281 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
4282 if op_index == self.SUBOPERATION_STATUS_SKIP:
4283 # Skip sub-operation
4284 result = 'COMPLETED'
4285 result_detail = 'Done'
4286 self.logger.debug(logging_text +
4287 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
4288 vnf_config_primitive, result, result_detail))
4289 else:
4290 if op_index == self.SUBOPERATION_STATUS_NEW:
4291 # New sub-operation: Get index of this sub-operation
4292 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4293 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4294 format(vnf_config_primitive))
4295 else:
4296 # retry: Get registered params for this existing sub-operation
4297 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4298 vnf_index = op.get('member_vnf_index')
4299 vnf_config_primitive = op.get('primitive')
4300 primitive_params = op.get('primitive_params')
4301 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
4302 format(vnf_config_primitive))
4303 # Execute the primitive, either with new (first-time) or registered (reintent) args
4304 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4305 primitive_name = config_primitive.get("execution-environment-primitive",
4306 vnf_config_primitive)
4307 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4308 member_vnf_index=vnf_index,
4309 vdu_id=None,
4310 vdu_count_index=None,
4311 ee_descriptor_id=ee_descriptor_id)
4312 result, result_detail = await self._ns_execute_primitive(
4313 ee_id, primitive_name, primitive_params, vca_type)
4314 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4315 vnf_config_primitive, result, result_detail))
4316 # Update operationState = COMPLETED | FAILED
4317 self._update_suboperation_status(
4318 db_nslcmop, op_index, result, result_detail)
4319
4320 if result == "FAILED":
4321 raise LcmException(result_detail)
4322 db_nsr_update["config-status"] = old_config_status
4323 scale_process = None
4324 # PRE-SCALE END
4325
4326 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
4327 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
4328
4329 # SCALE RO - BEGIN
4330 if RO_scaling_info:
4331 scale_process = "RO"
4332 if self.ro_config.get("ng"):
4333 await self._scale_ng_ro(logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage)
4334 else:
4335 await self._RO_scale(logging_text, RO_nsr_id, RO_scaling_info, db_nslcmop, db_vnfr,
4336 db_nslcmop_update, vdu_scaling_info)
4337 vdu_scaling_info.pop("vdu-create", None)
4338 vdu_scaling_info.pop("vdu-delete", None)
4339
4340 scale_process = None
4341 if db_nsr_update:
4342 self.update_db_2("nsrs", nsr_id, db_nsr_update)
4343
4344 # POST-SCALE BEGIN
4345 # execute primitive service POST-SCALING
4346 step = "Executing post-scale vnf-config-primitive"
4347 if scaling_descriptor.get("scaling-config-action"):
4348 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
4349 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
4350 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
4351 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
4352 step = db_nslcmop_update["detailed-status"] = \
4353 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
4354
4355 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
4356 if db_vnfr.get("additionalParamsForVnf"):
4357 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
4358
4359 # look for primitive
4360 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
4361 if config_primitive["name"] == vnf_config_primitive:
4362 break
4363 else:
4364 raise LcmException(
4365 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
4366 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
4367 "config-primitive".format(scaling_group, vnf_config_primitive))
4368 scale_process = "VCA"
4369 db_nsr_update["config-status"] = "configuring post-scaling"
4370 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
4371
4372 # Post-scale retry check: Check if this sub-operation has been executed before
4373 op_index = self._check_or_add_scale_suboperation(
4374 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
4375 if op_index == self.SUBOPERATION_STATUS_SKIP:
4376 # Skip sub-operation
4377 result = 'COMPLETED'
4378 result_detail = 'Done'
4379 self.logger.debug(logging_text +
4380 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
4381 format(vnf_config_primitive, result, result_detail))
4382 else:
4383 if op_index == self.SUBOPERATION_STATUS_NEW:
4384 # New sub-operation: Get index of this sub-operation
4385 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4386 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
4387 format(vnf_config_primitive))
4388 else:
4389 # retry: Get registered params for this existing sub-operation
4390 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4391 vnf_index = op.get('member_vnf_index')
4392 vnf_config_primitive = op.get('primitive')
4393 primitive_params = op.get('primitive_params')
4394 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
4395 format(vnf_config_primitive))
4396 # Execute the primitive, either with new (first-time) or registered (reintent) args
4397 ee_descriptor_id = config_primitive.get("execution-environment-ref")
4398 primitive_name = config_primitive.get("execution-environment-primitive",
4399 vnf_config_primitive)
4400 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
4401 member_vnf_index=vnf_index,
4402 vdu_id=None,
4403 vdu_count_index=None,
4404 ee_descriptor_id=ee_descriptor_id)
4405 result, result_detail = await self._ns_execute_primitive(
4406 ee_id, primitive_name, primitive_params, vca_type)
4407 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
4408 vnf_config_primitive, result, result_detail))
4409 # Update operationState = COMPLETED | FAILED
4410 self._update_suboperation_status(
4411 db_nslcmop, op_index, result, result_detail)
4412
4413 if result == "FAILED":
4414 raise LcmException(result_detail)
4415 db_nsr_update["config-status"] = old_config_status
4416 scale_process = None
4417 # POST-SCALE END
4418
4419 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
4420 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
4421 else old_operational_status
4422 db_nsr_update["config-status"] = old_config_status
4423 return
4424 except (ROclient.ROClientException, DbException, LcmException, NgRoException) as e:
4425 self.logger.error(logging_text + "Exit Exception {}".format(e))
4426 exc = e
4427 except asyncio.CancelledError:
4428 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
4429 exc = "Operation was cancelled"
4430 except Exception as e:
4431 exc = traceback.format_exc()
4432 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
4433 finally:
4434 self._write_ns_status(
4435 nsr_id=nsr_id,
4436 ns_state=None,
4437 current_operation="IDLE",
4438 current_operation_id=None
4439 )
4440 if exc:
4441 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
4442 nslcmop_operation_state = "FAILED"
4443 if db_nsr:
4444 db_nsr_update["operational-status"] = old_operational_status
4445 db_nsr_update["config-status"] = old_config_status
4446 db_nsr_update["detailed-status"] = ""
4447 if scale_process:
4448 if "VCA" in scale_process:
4449 db_nsr_update["config-status"] = "failed"
4450 if "RO" in scale_process:
4451 db_nsr_update["operational-status"] = "failed"
4452 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
4453 exc)
4454 else:
4455 error_description_nslcmop = None
4456 nslcmop_operation_state = "COMPLETED"
4457 db_nslcmop_update["detailed-status"] = "Done"
4458
4459 self._write_op_status(
4460 op_id=nslcmop_id,
4461 stage="",
4462 error_message=error_description_nslcmop,
4463 operation_state=nslcmop_operation_state,
4464 other_update=db_nslcmop_update,
4465 )
4466 if db_nsr:
4467 self._write_ns_status(
4468 nsr_id=nsr_id,
4469 ns_state=None,
4470 current_operation="IDLE",
4471 current_operation_id=None,
4472 other_update=db_nsr_update
4473 )
4474
4475 if nslcmop_operation_state:
4476 try:
4477 await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
4478 "operationState": nslcmop_operation_state},
4479 loop=self.loop)
4480 # if cooldown_time:
4481 # await asyncio.sleep(cooldown_time, loop=self.loop)
4482 # await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})
4483 except Exception as e:
4484 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4485 self.logger.debug(logging_text + "Exit")
4486 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
4487
4488 async def _scale_ng_ro(self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage):
4489 nsr_id = db_nslcmop["nsInstanceId"]
4490 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4491 db_vnfrs = {}
4492
4493 # read from db: vnfd's for every vnf
4494 db_vnfds = {} # every vnfd data indexed by vnf id
4495 db_vnfds_ref = {} # every vnfd data indexed by vnfd id
4496 db_vnfds = {}
4497
4498 # for each vnf in ns, read vnfd
4499 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
4500 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
4501 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
4502 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
4503 # if we haven't this vnfd, read it from db
4504 if vnfd_id not in db_vnfds:
4505 # read from db
4506 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4507 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
4508 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
4509 n2vc_key = self.n2vc.get_public_key()
4510 n2vc_key_list = [n2vc_key]
4511 self.scale_vnfr(db_vnfr, vdu_scaling_info.get("vdu-create"), vdu_scaling_info.get("vdu-delete"),
4512 mark_delete=True)
4513 # db_vnfr has been updated, update db_vnfrs to use it
4514 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
4515 await self._instantiate_ng_ro(logging_text, nsr_id, db_nsd, db_nsr, db_nslcmop, db_vnfrs,
4516 db_vnfds_ref, n2vc_key_list, stage=stage, start_deploy=time(),
4517 timeout_ns_deploy=self.timeout_ns_deploy)
4518 if vdu_scaling_info.get("vdu-delete"):
4519 self.scale_vnfr(db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False)
4520
4521 async def _RO_scale(self, logging_text, RO_nsr_id, RO_scaling_info, db_nslcmop, db_vnfr, db_nslcmop_update,
4522 vdu_scaling_info):
4523 nslcmop_id = db_nslcmop["_id"]
4524 nsr_id = db_nslcmop["nsInstanceId"]
4525 vdu_create = vdu_scaling_info.get("vdu-create")
4526 vdu_delete = vdu_scaling_info.get("vdu-delete")
4527 # Scale RO retry check: Check if this sub-operation has been executed before
4528 op_index = self._check_or_add_scale_suboperation(
4529 db_nslcmop, db_vnfr["member-vnf-index-ref"], None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
4530 if op_index == self.SUBOPERATION_STATUS_SKIP:
4531 # Skip sub-operation
4532 result = 'COMPLETED'
4533 result_detail = 'Done'
4534 self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(result, result_detail))
4535 else:
4536 if op_index == self.SUBOPERATION_STATUS_NEW:
4537 # New sub-operation: Get index of this sub-operation
4538 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
4539 self.logger.debug(logging_text + "New sub-operation RO")
4540 else:
4541 # retry: Get registered params for this existing sub-operation
4542 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
4543 RO_nsr_id = op.get('RO_nsr_id')
4544 RO_scaling_info = op.get('RO_scaling_info')
4545 self.logger.debug(logging_text + "Sub-operation RO retry")
4546
4547 RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
4548 # wait until ready
4549 RO_nslcmop_id = RO_desc["instance_action_id"]
4550 db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
4551
4552 RO_task_done = False
4553 step = detailed_status = "Waiting for VIM to scale. RO_task_id={}.".format(RO_nslcmop_id)
4554 detailed_status_old = None
4555 self.logger.debug(logging_text + step)
4556
4557 deployment_timeout = 1 * 3600 # One hour
4558 while deployment_timeout > 0:
4559 if not RO_task_done:
4560 desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
4561 extra_item_id=RO_nslcmop_id)
4562
4563 # deploymentStatus
4564 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4565
4566 ns_status, ns_status_info = self.RO.check_action_status(desc)
4567 if ns_status == "ERROR":
4568 raise ROclient.ROClientException(ns_status_info)
4569 elif ns_status == "BUILD":
4570 detailed_status = step + "; {}".format(ns_status_info)
4571 elif ns_status == "ACTIVE":
4572 RO_task_done = True
4573 self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
4574 step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
4575 self.logger.debug(logging_text + step)
4576 else:
4577 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
4578 else:
4579 desc = await self.RO.show("ns", RO_nsr_id)
4580 ns_status, ns_status_info = self.RO.check_ns_status(desc)
4581 # deploymentStatus
4582 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
4583
4584 if ns_status == "ERROR":
4585 raise ROclient.ROClientException(ns_status_info)
4586 elif ns_status == "BUILD":
4587 detailed_status = step + "; {}".format(ns_status_info)
4588 elif ns_status == "ACTIVE":
4589 step = detailed_status = \
4590 "Waiting for management IP address reported by the VIM. Updating VNFRs"
4591 try:
4592 # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
4593 self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
4594 break
4595 except LcmExceptionNoMgmtIP:
4596 pass
4597 else:
4598 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
4599 if detailed_status != detailed_status_old:
4600 self._update_suboperation_status(
4601 db_nslcmop, op_index, 'COMPLETED', detailed_status)
4602 detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
4603 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
4604
4605 await asyncio.sleep(5, loop=self.loop)
4606 deployment_timeout -= 5
4607 if deployment_timeout <= 0:
4608 self._update_suboperation_status(
4609 db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
4610 raise ROclient.ROClientException("Timeout waiting ns to be ready")
4611
4612 # update VDU_SCALING_INFO with the obtained ip_addresses
4613 if vdu_scaling_info["scaling_direction"] == "OUT":
4614 for vdur in reversed(db_vnfr["vdur"]):
4615 if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
4616 vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
4617 vdu_scaling_info["vdu"].append({
4618 "name": vdur["name"] or vdur.get("vdu-name"),
4619 "vdu_id": vdur["vdu-id-ref"],
4620 "interface": []
4621 })
4622 for interface in vdur["interfaces"]:
4623 vdu_scaling_info["vdu"][-1]["interface"].append({
4624 "name": interface["name"],
4625 "ip_address": interface["ip-address"],
4626 "mac_address": interface.get("mac-address"),
4627 })
4628 self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
4629
4630 async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip):
4631 if not self.prometheus:
4632 return
4633 # look if exist a file called 'prometheus*.j2' and
4634 artifact_content = self.fs.dir_ls(artifact_path)
4635 job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None)
4636 if not job_file:
4637 return
4638 with self.fs.file_open((artifact_path, job_file), "r") as f:
4639 job_data = f.read()
4640
4641 # TODO get_service
4642 _, _, service = ee_id.partition(".") # remove prefix "namespace."
4643 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
4644 host_port = "80"
4645 vnfr_id = vnfr_id.replace("-", "")
4646 variables = {
4647 "JOB_NAME": vnfr_id,
4648 "TARGET_IP": target_ip,
4649 "EXPORTER_POD_IP": host_name,
4650 "EXPORTER_POD_PORT": host_port,
4651 }
4652 job_list = self.prometheus.parse_job(job_data, variables)
4653 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
4654 for job in job_list:
4655 if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]:
4656 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
4657 job["nsr_id"] = nsr_id
4658 job_dict = {jl["job_name"]: jl for jl in job_list}
4659 if await self.prometheus.update(job_dict):
4660 return list(job_dict.keys())
4661
4662 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
4663 """
4664 Get VCA Cloud and VCA Cloud Credentials for the VIM account
4665
4666 :param: vim_account_id: VIM Account ID
4667
4668 :return: (cloud_name, cloud_credential)
4669 """
4670 config = self.get_vim_account_config(vim_account_id)
4671 return config.get("vca_cloud"), config.get("vca_cloud_credential")
4672
4673 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
4674 """
4675 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
4676
4677 :param: vim_account_id: VIM Account ID
4678
4679 :return: (cloud_name, cloud_credential)
4680 """
4681 config = self.get_vim_account_config(vim_account_id)
4682 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")
4683
4684 def get_vim_account_config(self, vim_account_id: str) -> dict:
4685 """
4686 Get VIM Account config from the OSM Database
4687
4688 :param: vim_account_id: VIM Account ID
4689
4690 :return: Dictionary with the config of the vim account
4691 """
4692 vim_account = self.db.get_one(table="vim_accounts", q_filter={"_id": vim_account_id}, fail_on_empty=False)
4693 return vim_account.get("config", {}) if vim_account else {}