Changes for IM change 10095: remove kdu-model
[osm/LCM.git] / osm_lcm / ns.py
1 # -*- coding: utf-8 -*-
2
3 ##
4 # Copyright 2018 Telefonica S.A.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License"); you may
7 # not use this file except in compliance with the License. You may obtain
8 # a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15 # License for the specific language governing permissions and limitations
16 # under the License.
17 ##
18
19 import asyncio
20 import yaml
21 import logging
22 import logging.handlers
23 import traceback
24 import json
25 from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError
26
27 from osm_lcm import ROclient
28 from osm_lcm.ng_ro import NgRoClient, NgRoException
29 from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
30 from osm_lcm.data_utils.nsd import get_vnf_profiles
31 from osm_lcm.data_utils.vnfd import get_vnf_configuration, get_vdu_list, get_vdu_profile, \
32 get_ee_sorted_initial_config_primitive_list, get_ee_sorted_terminate_config_primitive_list, \
33 get_kdu_list, get_virtual_link_profiles, get_vdu, get_vdu_configuration, get_kdu_configuration, \
34 get_vdu_index, get_scaling_aspect, get_number_of_instances
35 from osm_lcm.data_utils.list_utils import find_in_list
36 from osm_lcm.data_utils.vnfr import get_osm_params
37 from osm_lcm.data_utils.dict_utils import parse_yaml_strings
38 from osm_lcm.data_utils.database.vim_account import VimAccountDB
39 from n2vc.k8s_helm_conn import K8sHelmConnector
40 from n2vc.k8s_helm3_conn import K8sHelm3Connector
41 from n2vc.k8s_juju_conn import K8sJujuConnector
42
43 from osm_common.dbbase import DbException
44 from osm_common.fsbase import FsException
45
46 from osm_lcm.data_utils.database.database import Database
47 from osm_lcm.data_utils.filesystem.filesystem import Filesystem
48
49 from n2vc.n2vc_juju_conn import N2VCJujuConnector
50 from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
51
52 from osm_lcm.lcm_helm_conn import LCMHelmConn
53
54 from copy import copy, deepcopy
55 from time import time
56 from uuid import uuid4
57
58 from random import randint
59
60 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
61
62
63 class NsLcm(LcmBase):
64 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
65 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
66 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
67 timeout_charm_delete = 10 * 60
68 timeout_primitive = 30 * 60 # timeout for primitive execution
69 timeout_progress_primitive = 10 * 60 # timeout for some progress in a primitive execution
70
71 SUBOPERATION_STATUS_NOT_FOUND = -1
72 SUBOPERATION_STATUS_NEW = -2
73 SUBOPERATION_STATUS_SKIP = -3
74 task_name_deploy_vca = "Deploying VCA"
75
76 def __init__(self, msg, lcm_tasks, config, loop, prometheus=None):
77 """
78 Init, Connect to database, filesystem storage, and messaging
79 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
80 :return: None
81 """
82 super().__init__(
83 msg=msg,
84 logger=logging.getLogger('lcm.ns')
85 )
86
87 self.db = Database().instance.db
88 self.fs = Filesystem().instance.fs
89 self.loop = loop
90 self.lcm_tasks = lcm_tasks
91 self.timeout = config["timeout"]
92 self.ro_config = config["ro_config"]
93 self.ng_ro = config["ro_config"].get("ng")
94 self.vca_config = config["VCA"].copy()
95
96 # create N2VC connector
97 self.n2vc = N2VCJujuConnector(
98 log=self.logger,
99 loop=self.loop,
100 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
101 username=self.vca_config.get('user', None),
102 vca_config=self.vca_config,
103 on_update_db=self._on_update_n2vc_db,
104 fs=self.fs,
105 db=self.db
106 )
107
108 self.conn_helm_ee = LCMHelmConn(
109 log=self.logger,
110 loop=self.loop,
111 url=None,
112 username=None,
113 vca_config=self.vca_config,
114 on_update_db=self._on_update_n2vc_db
115 )
116
117 self.k8sclusterhelm2 = K8sHelmConnector(
118 kubectl_command=self.vca_config.get("kubectlpath"),
119 helm_command=self.vca_config.get("helmpath"),
120 log=self.logger,
121 on_update_db=None,
122 fs=self.fs,
123 db=self.db
124 )
125
126 self.k8sclusterhelm3 = K8sHelm3Connector(
127 kubectl_command=self.vca_config.get("kubectlpath"),
128 helm_command=self.vca_config.get("helm3path"),
129 fs=self.fs,
130 log=self.logger,
131 db=self.db,
132 on_update_db=None,
133 )
134
135 self.k8sclusterjuju = K8sJujuConnector(
136 kubectl_command=self.vca_config.get("kubectlpath"),
137 juju_command=self.vca_config.get("jujupath"),
138 log=self.logger,
139 loop=self.loop,
140 on_update_db=None,
141 vca_config=self.vca_config,
142 fs=self.fs,
143 db=self.db
144 )
145
146 self.k8scluster_map = {
147 "helm-chart": self.k8sclusterhelm2,
148 "helm-chart-v3": self.k8sclusterhelm3,
149 "chart": self.k8sclusterhelm3,
150 "juju-bundle": self.k8sclusterjuju,
151 "juju": self.k8sclusterjuju,
152 }
153
154 self.vca_map = {
155 "lxc_proxy_charm": self.n2vc,
156 "native_charm": self.n2vc,
157 "k8s_proxy_charm": self.n2vc,
158 "helm": self.conn_helm_ee,
159 "helm-v3": self.conn_helm_ee
160 }
161
162 self.prometheus = prometheus
163
164 # create RO client
165 self.RO = NgRoClient(self.loop, **self.ro_config)
166
167 @staticmethod
168 def increment_ip_mac(ip_mac, vm_index=1):
169 if not isinstance(ip_mac, str):
170 return ip_mac
171 try:
172 # try with ipv4 look for last dot
173 i = ip_mac.rfind(".")
174 if i > 0:
175 i += 1
176 return "{}{}".format(ip_mac[:i], int(ip_mac[i:]) + vm_index)
177 # try with ipv6 or mac look for last colon. Operate in hex
178 i = ip_mac.rfind(":")
179 if i > 0:
180 i += 1
181 # format in hex, len can be 2 for mac or 4 for ipv6
182 return ("{}{:0" + str(len(ip_mac) - i) + "x}").format(ip_mac[:i], int(ip_mac[i:], 16) + vm_index)
183 except Exception:
184 pass
185 return None
186
187 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
188
189 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
190
191 try:
192 # TODO filter RO descriptor fields...
193
194 # write to database
195 db_dict = dict()
196 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
197 db_dict['deploymentStatus'] = ro_descriptor
198 self.update_db_2("nsrs", nsrs_id, db_dict)
199
200 except Exception as e:
201 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
202
203 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
204
205 # remove last dot from path (if exists)
206 if path.endswith('.'):
207 path = path[:-1]
208
209 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
210 # .format(table, filter, path, updated_data))
211
212 try:
213
214 nsr_id = filter.get('_id')
215
216 # read ns record from database
217 nsr = self.db.get_one(table='nsrs', q_filter=filter)
218 current_ns_status = nsr.get('nsState')
219
220 # get vca status for NS
221 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
222
223 # vcaStatus
224 db_dict = dict()
225 db_dict['vcaStatus'] = status_dict
226
227 # update configurationStatus for this VCA
228 try:
229 vca_index = int(path[path.rfind(".")+1:])
230
231 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
232 vca_status = vca_list[vca_index].get('status')
233
234 configuration_status_list = nsr.get('configurationStatus')
235 config_status = configuration_status_list[vca_index].get('status')
236
237 if config_status == 'BROKEN' and vca_status != 'failed':
238 db_dict['configurationStatus'][vca_index] = 'READY'
239 elif config_status != 'BROKEN' and vca_status == 'failed':
240 db_dict['configurationStatus'][vca_index] = 'BROKEN'
241 except Exception as e:
242 # not update configurationStatus
243 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
244
245 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
246 # if nsState = 'DEGRADED' check if all is OK
247 is_degraded = False
248 if current_ns_status in ('READY', 'DEGRADED'):
249 error_description = ''
250 # check machines
251 if status_dict.get('machines'):
252 for machine_id in status_dict.get('machines'):
253 machine = status_dict.get('machines').get(machine_id)
254 # check machine agent-status
255 if machine.get('agent-status'):
256 s = machine.get('agent-status').get('status')
257 if s != 'started':
258 is_degraded = True
259 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
260 # check machine instance status
261 if machine.get('instance-status'):
262 s = machine.get('instance-status').get('status')
263 if s != 'running':
264 is_degraded = True
265 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
266 # check applications
267 if status_dict.get('applications'):
268 for app_id in status_dict.get('applications'):
269 app = status_dict.get('applications').get(app_id)
270 # check application status
271 if app.get('status'):
272 s = app.get('status').get('status')
273 if s != 'active':
274 is_degraded = True
275 error_description += 'application {} status={} ; '.format(app_id, s)
276
277 if error_description:
278 db_dict['errorDescription'] = error_description
279 if current_ns_status == 'READY' and is_degraded:
280 db_dict['nsState'] = 'DEGRADED'
281 if current_ns_status == 'DEGRADED' and not is_degraded:
282 db_dict['nsState'] = 'READY'
283
284 # write to database
285 self.update_db_2("nsrs", nsr_id, db_dict)
286
287 except (asyncio.CancelledError, asyncio.TimeoutError):
288 raise
289 except Exception as e:
290 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
291
292 @staticmethod
293 def _parse_cloud_init(cloud_init_text, additional_params, vnfd_id, vdu_id):
294 try:
295 env = Environment(undefined=StrictUndefined)
296 template = env.from_string(cloud_init_text)
297 return template.render(additional_params or {})
298 except UndefinedError as e:
299 raise LcmException("Variable {} at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
300 "file, must be provided in the instantiation parameters inside the "
301 "'additionalParamsForVnf/Vdu' block".format(e, vnfd_id, vdu_id))
302 except (TemplateError, TemplateNotFound) as e:
303 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
304 format(vnfd_id, vdu_id, e))
305
306 def _get_vdu_cloud_init_content(self, vdu, vnfd):
307 cloud_init_content = cloud_init_file = None
308 try:
309 if vdu.get("cloud-init-file"):
310 base_folder = vnfd["_admin"]["storage"]
311 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
312 vdu["cloud-init-file"])
313 with self.fs.file_open(cloud_init_file, "r") as ci_file:
314 cloud_init_content = ci_file.read()
315 elif vdu.get("cloud-init"):
316 cloud_init_content = vdu["cloud-init"]
317
318 return cloud_init_content
319 except FsException as e:
320 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
321 format(vnfd["id"], vdu["id"], cloud_init_file, e))
322
323 def _get_vdu_additional_params(self, db_vnfr, vdu_id):
324 vdur = next(vdur for vdur in db_vnfr.get("vdur") if vdu_id == vdur["vdu-id-ref"])
325 additional_params = vdur.get("additionalParams")
326 return parse_yaml_strings(additional_params)
327
328 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
329 """
330 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
331 :param vnfd: input vnfd
332 :param new_id: overrides vnf id if provided
333 :param additionalParams: Instantiation params for VNFs provided
334 :param nsrId: Id of the NSR
335 :return: copy of vnfd
336 """
337 vnfd_RO = deepcopy(vnfd)
338 # remove unused by RO configuration, monitoring, scaling and internal keys
339 vnfd_RO.pop("_id", None)
340 vnfd_RO.pop("_admin", None)
341 vnfd_RO.pop("vnf-configuration", None)
342 vnfd_RO.pop("monitoring-param", None)
343 vnfd_RO.pop("scaling-group-descriptor", None)
344 vnfd_RO.pop("kdu", None)
345 vnfd_RO.pop("k8s-cluster", None)
346 if new_id:
347 vnfd_RO["id"] = new_id
348
349 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
350 for vdu in get_iterable(vnfd_RO, "vdu"):
351 vdu.pop("cloud-init-file", None)
352 vdu.pop("cloud-init", None)
353 return vnfd_RO
354
355 @staticmethod
356 def ip_profile_2_RO(ip_profile):
357 RO_ip_profile = deepcopy(ip_profile)
358 if "dns-server" in RO_ip_profile:
359 if isinstance(RO_ip_profile["dns-server"], list):
360 RO_ip_profile["dns-address"] = []
361 for ds in RO_ip_profile.pop("dns-server"):
362 RO_ip_profile["dns-address"].append(ds['address'])
363 else:
364 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
365 if RO_ip_profile.get("ip-version") == "ipv4":
366 RO_ip_profile["ip-version"] = "IPv4"
367 if RO_ip_profile.get("ip-version") == "ipv6":
368 RO_ip_profile["ip-version"] = "IPv6"
369 if "dhcp-params" in RO_ip_profile:
370 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
371 return RO_ip_profile
372
373 def _get_ro_vim_id_for_vim_account(self, vim_account):
374 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
375 if db_vim["_admin"]["operationalState"] != "ENABLED":
376 raise LcmException("VIM={} is not available. operationalState={}".format(
377 vim_account, db_vim["_admin"]["operationalState"]))
378 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
379 return RO_vim_id
380
381 def get_ro_wim_id_for_wim_account(self, wim_account):
382 if isinstance(wim_account, str):
383 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
384 if db_wim["_admin"]["operationalState"] != "ENABLED":
385 raise LcmException("WIM={} is not available. operationalState={}".format(
386 wim_account, db_wim["_admin"]["operationalState"]))
387 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
388 return RO_wim_id
389 else:
390 return wim_account
391
392 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None, mark_delete=False):
393
394 db_vdu_push_list = []
395 db_update = {"_admin.modified": time()}
396 if vdu_create:
397 for vdu_id, vdu_count in vdu_create.items():
398 vdur = next((vdur for vdur in reversed(db_vnfr["vdur"]) if vdur["vdu-id-ref"] == vdu_id), None)
399 if not vdur:
400 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".
401 format(vdu_id))
402
403 for count in range(vdu_count):
404 vdur_copy = deepcopy(vdur)
405 vdur_copy["status"] = "BUILD"
406 vdur_copy["status-detailed"] = None
407 vdur_copy["ip-address"]: None
408 vdur_copy["_id"] = str(uuid4())
409 vdur_copy["count-index"] += count + 1
410 vdur_copy["id"] = "{}-{}".format(vdur_copy["vdu-id-ref"], vdur_copy["count-index"])
411 vdur_copy.pop("vim_info", None)
412 for iface in vdur_copy["interfaces"]:
413 if iface.get("fixed-ip"):
414 iface["ip-address"] = self.increment_ip_mac(iface["ip-address"], count+1)
415 else:
416 iface.pop("ip-address", None)
417 if iface.get("fixed-mac"):
418 iface["mac-address"] = self.increment_ip_mac(iface["mac-address"], count+1)
419 else:
420 iface.pop("mac-address", None)
421 iface.pop("mgmt_vnf", None) # only first vdu can be managment of vnf
422 db_vdu_push_list.append(vdur_copy)
423 # self.logger.debug("scale out, adding vdu={}".format(vdur_copy))
424 if vdu_delete:
425 for vdu_id, vdu_count in vdu_delete.items():
426 if mark_delete:
427 indexes_to_delete = [iv[0] for iv in enumerate(db_vnfr["vdur"]) if iv[1]["vdu-id-ref"] == vdu_id]
428 db_update.update({"vdur.{}.status".format(i): "DELETING" for i in indexes_to_delete[-vdu_count:]})
429 else:
430 # it must be deleted one by one because common.db does not allow otherwise
431 vdus_to_delete = [v for v in reversed(db_vnfr["vdur"]) if v["vdu-id-ref"] == vdu_id]
432 for vdu in vdus_to_delete[:vdu_count]:
433 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, None, pull={"vdur": {"_id": vdu["_id"]}})
434 db_push = {"vdur": db_vdu_push_list} if db_vdu_push_list else None
435 self.db.set_one("vnfrs", {"_id": db_vnfr["_id"]}, db_update, push_list=db_push)
436 # modify passed dictionary db_vnfr
437 db_vnfr_ = self.db.get_one("vnfrs", {"_id": db_vnfr["_id"]})
438 db_vnfr["vdur"] = db_vnfr_["vdur"]
439
440 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
441 """
442 Updates database nsr with the RO info for the created vld
443 :param ns_update_nsr: dictionary to be filled with the updated info
444 :param db_nsr: content of db_nsr. This is also modified
445 :param nsr_desc_RO: nsr descriptor from RO
446 :return: Nothing, LcmException is raised on errors
447 """
448
449 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
450 for net_RO in get_iterable(nsr_desc_RO, "nets"):
451 if vld["id"] != net_RO.get("ns_net_osm_id"):
452 continue
453 vld["vim-id"] = net_RO.get("vim_net_id")
454 vld["name"] = net_RO.get("vim_name")
455 vld["status"] = net_RO.get("status")
456 vld["status-detailed"] = net_RO.get("error_msg")
457 ns_update_nsr["vld.{}".format(vld_index)] = vld
458 break
459 else:
460 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
461
462 def set_vnfr_at_error(self, db_vnfrs, error_text):
463 try:
464 for db_vnfr in db_vnfrs.values():
465 vnfr_update = {"status": "ERROR"}
466 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
467 if "status" not in vdur:
468 vdur["status"] = "ERROR"
469 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
470 if error_text:
471 vdur["status-detailed"] = str(error_text)
472 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
473 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
474 except DbException as e:
475 self.logger.error("Cannot update vnf. {}".format(e))
476
477 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
478 """
479 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
480 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
481 :param nsr_desc_RO: nsr descriptor from RO
482 :return: Nothing, LcmException is raised on errors
483 """
484 for vnf_index, db_vnfr in db_vnfrs.items():
485 for vnf_RO in nsr_desc_RO["vnfs"]:
486 if vnf_RO["member_vnf_index"] != vnf_index:
487 continue
488 vnfr_update = {}
489 if vnf_RO.get("ip_address"):
490 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
491 elif not db_vnfr.get("ip-address"):
492 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
493 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
494
495 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
496 vdur_RO_count_index = 0
497 if vdur.get("pdu-type"):
498 continue
499 for vdur_RO in get_iterable(vnf_RO, "vms"):
500 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
501 continue
502 if vdur["count-index"] != vdur_RO_count_index:
503 vdur_RO_count_index += 1
504 continue
505 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
506 if vdur_RO.get("ip_address"):
507 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
508 else:
509 vdur["ip-address"] = None
510 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
511 vdur["name"] = vdur_RO.get("vim_name")
512 vdur["status"] = vdur_RO.get("status")
513 vdur["status-detailed"] = vdur_RO.get("error_msg")
514 for ifacer in get_iterable(vdur, "interfaces"):
515 for interface_RO in get_iterable(vdur_RO, "interfaces"):
516 if ifacer["name"] == interface_RO.get("internal_name"):
517 ifacer["ip-address"] = interface_RO.get("ip_address")
518 ifacer["mac-address"] = interface_RO.get("mac_address")
519 break
520 else:
521 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
522 "from VIM info"
523 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
524 vnfr_update["vdur.{}".format(vdu_index)] = vdur
525 break
526 else:
527 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
528 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
529
530 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
531 for net_RO in get_iterable(nsr_desc_RO, "nets"):
532 if vld["id"] != net_RO.get("vnf_net_osm_id"):
533 continue
534 vld["vim-id"] = net_RO.get("vim_net_id")
535 vld["name"] = net_RO.get("vim_name")
536 vld["status"] = net_RO.get("status")
537 vld["status-detailed"] = net_RO.get("error_msg")
538 vnfr_update["vld.{}".format(vld_index)] = vld
539 break
540 else:
541 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
542 vnf_index, vld["id"]))
543
544 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
545 break
546
547 else:
548 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
549
550 def _get_ns_config_info(self, nsr_id):
551 """
552 Generates a mapping between vnf,vdu elements and the N2VC id
553 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
554 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
555 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
556 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
557 """
558 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
559 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
560 mapping = {}
561 ns_config_info = {"osm-config-mapping": mapping}
562 for vca in vca_deployed_list:
563 if not vca["member-vnf-index"]:
564 continue
565 if not vca["vdu_id"]:
566 mapping[vca["member-vnf-index"]] = vca["application"]
567 else:
568 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
569 vca["application"]
570 return ns_config_info
571
572 async def _instantiate_ng_ro(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds,
573 n2vc_key_list, stage, start_deploy, timeout_ns_deploy):
574
575 db_vims = {}
576
577 def get_vim_account(vim_account_id):
578 nonlocal db_vims
579 if vim_account_id in db_vims:
580 return db_vims[vim_account_id]
581 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account_id})
582 db_vims[vim_account_id] = db_vim
583 return db_vim
584
585 # modify target_vld info with instantiation parameters
586 def parse_vld_instantiation_params(target_vim, target_vld, vld_params, target_sdn):
587 if vld_params.get("ip-profile"):
588 target_vld["vim_info"][target_vim]["ip_profile"] = vld_params["ip-profile"]
589 if vld_params.get("provider-network"):
590 target_vld["vim_info"][target_vim]["provider_network"] = vld_params["provider-network"]
591 if "sdn-ports" in vld_params["provider-network"] and target_sdn:
592 target_vld["vim_info"][target_sdn]["sdn-ports"] = vld_params["provider-network"]["sdn-ports"]
593 if vld_params.get("wimAccountId"):
594 target_wim = "wim:{}".format(vld_params["wimAccountId"])
595 target_vld["vim_info"][target_wim] = {}
596 for param in ("vim-network-name", "vim-network-id"):
597 if vld_params.get(param):
598 if isinstance(vld_params[param], dict):
599 for vim, vim_net in vld_params[param]:
600 other_target_vim = "vim:" + vim
601 populate_dict(target_vld["vim_info"], (other_target_vim, param.replace("-", "_")), vim_net)
602 else: # isinstance str
603 target_vld["vim_info"][target_vim][param.replace("-", "_")] = vld_params[param]
604 if vld_params.get("common_id"):
605 target_vld["common_id"] = vld_params.get("common_id")
606
607 nslcmop_id = db_nslcmop["_id"]
608 target = {
609 "name": db_nsr["name"],
610 "ns": {"vld": []},
611 "vnf": [],
612 "image": deepcopy(db_nsr["image"]),
613 "flavor": deepcopy(db_nsr["flavor"]),
614 "action_id": nslcmop_id,
615 "cloud_init_content": {},
616 }
617 for image in target["image"]:
618 image["vim_info"] = {}
619 for flavor in target["flavor"]:
620 flavor["vim_info"] = {}
621
622 if db_nslcmop.get("lcmOperationType") != "instantiate":
623 # get parameters of instantiation:
624 db_nslcmop_instantiate = self.db.get_list("nslcmops", {"nsInstanceId": db_nslcmop["nsInstanceId"],
625 "lcmOperationType": "instantiate"})[-1]
626 ns_params = db_nslcmop_instantiate.get("operationParams")
627 else:
628 ns_params = db_nslcmop.get("operationParams")
629 ssh_keys_instantiation = ns_params.get("ssh_keys") or []
630 ssh_keys_all = ssh_keys_instantiation + (n2vc_key_list or [])
631
632 cp2target = {}
633 for vld_index, vld in enumerate(db_nsr.get("vld")):
634 target_vim = "vim:{}".format(ns_params["vimAccountId"])
635 target_vld = {
636 "id": vld["id"],
637 "name": vld["name"],
638 "mgmt-network": vld.get("mgmt-network", False),
639 "type": vld.get("type"),
640 "vim_info": {
641 target_vim: {
642 "vim_network_name": vld.get("vim-network-name"),
643 "vim_account_id": ns_params["vimAccountId"]
644 }
645 }
646 }
647 # check if this network needs SDN assist
648 if vld.get("pci-interfaces"):
649 db_vim = VimAccountDB.get_vim_account_with_id(target_vld["vim_info"][0]["vim_account_id"])
650 sdnc_id = db_vim["config"].get("sdn-controller")
651 if sdnc_id:
652 target_vld["vim_info"].append({"sdnc_id": sdnc_id})
653
654 nsd_vnf_profiles = get_vnf_profiles(nsd)
655 for nsd_vnf_profile in nsd_vnf_profiles:
656 for cp in nsd_vnf_profile["virtual-link-connectivity"]:
657 if cp["virtual-link-profile-id"] == vld["id"]:
658 cp2target["member_vnf:{}.{}".format(
659 cp["constituent-cpd-id"][0]["constituent-base-element-id"],
660 cp["constituent-cpd-id"][0]["constituent-cpd-id"]
661 )] = "nsrs:{}:vld.{}".format(nsr_id, vld_index)
662
663 # check at nsd descriptor, if there is an ip-profile
664 vld_params = {}
665 virtual_link_profiles = get_virtual_link_profiles(nsd)
666
667 for vlp in virtual_link_profiles:
668 ip_profile = find_in_list(nsd["ip-profiles"],
669 lambda profile: profile["name"] == vlp["ip-profile-ref"])
670 vld_params["ip-profile"] = ip_profile["ip-profile-params"]
671 # update vld_params with instantiation params
672 vld_instantiation_params = find_in_list(get_iterable(ns_params, "vld"),
673 lambda a_vld: a_vld["name"] in (vld["name"], vld["id"]))
674 if vld_instantiation_params:
675 vld_params.update(vld_instantiation_params)
676 parse_vld_instantiation_params(target_vim, target_vld, vld_params, None)
677 target["ns"]["vld"].append(target_vld)
678
679 for vnfr in db_vnfrs.values():
680 vnfd = find_in_list(db_vnfds, lambda db_vnf: db_vnf["id"] == vnfr["vnfd-ref"])
681 vnf_params = find_in_list(get_iterable(ns_params, "vnf"),
682 lambda a_vnf: a_vnf["member-vnf-index"] == vnfr["member-vnf-index-ref"])
683 target_vnf = deepcopy(vnfr)
684 target_vim = "vim:{}".format(vnfr["vim-account-id"])
685 for vld in target_vnf.get("vld", ()):
686 # check if connected to a ns.vld, to fill target'
687 vnf_cp = find_in_list(vnfd.get("int-virtual-link-desc", ()),
688 lambda cpd: cpd.get("id") == vld["id"])
689 if vnf_cp:
690 ns_cp = "member_vnf:{}.{}".format(vnfr["member-vnf-index-ref"], vnf_cp["id"])
691 if cp2target.get(ns_cp):
692 vld["target"] = cp2target[ns_cp]
693
694 vld["vim_info"] = {target_vim: {"vim_network_name": vld.get("vim-network-name")}}
695 # check if this network needs SDN assist
696 target_sdn = None
697 if vld.get("pci-interfaces"):
698 db_vim = get_vim_account(vnfr["vim-account-id"])
699 sdnc_id = db_vim["config"].get("sdn-controller")
700 if sdnc_id:
701 sdn_vld = "vnfrs:{}:vld.{}".format(target_vnf["_id"], vld["id"])
702 target_sdn = "sdn:{}".format(sdnc_id)
703 vld["vim_info"][target_sdn] = {
704 "sdn": True, "target_vim": target_vim, "vlds": [sdn_vld], "type": vld.get("type")}
705
706 # check at vnfd descriptor, if there is an ip-profile
707 vld_params = {}
708 vnfd_vlp = find_in_list(
709 get_virtual_link_profiles(vnfd),
710 lambda a_link_profile: a_link_profile["id"] == vld["id"]
711 )
712 if vnfd_vlp and vnfd_vlp.get("virtual-link-protocol-data") and \
713 vnfd_vlp["virtual-link-protocol-data"].get("l3-protocol-data"):
714 ip_profile_source_data = vnfd_vlp["virtual-link-protocol-data"]["l3-protocol-data"]
715 ip_profile_dest_data = {}
716 if "ip-version" in ip_profile_source_data:
717 ip_profile_dest_data["ip-version"] = ip_profile_source_data["ip-version"]
718 if "cidr" in ip_profile_source_data:
719 ip_profile_dest_data["subnet-address"] = ip_profile_source_data["cidr"]
720 if "gateway-ip" in ip_profile_source_data:
721 ip_profile_dest_data["gateway-address"] = ip_profile_source_data["gateway-ip"]
722 if "dhcp-enabled" in ip_profile_source_data:
723 ip_profile_dest_data["dhcp-params"] = {
724 "enabled": ip_profile_source_data["dhcp-enabled"]
725 }
726
727 vld_params["ip-profile"] = ip_profile_dest_data
728 # update vld_params with instantiation params
729 if vnf_params:
730 vld_instantiation_params = find_in_list(get_iterable(vnf_params, "internal-vld"),
731 lambda i_vld: i_vld["name"] == vld["id"])
732 if vld_instantiation_params:
733 vld_params.update(vld_instantiation_params)
734 parse_vld_instantiation_params(target_vim, vld, vld_params, target_sdn)
735
736 vdur_list = []
737 for vdur in target_vnf.get("vdur", ()):
738 if vdur.get("status") == "DELETING" or vdur.get("pdu-type"):
739 continue # This vdu must not be created
740 vdur["vim_info"] = {"vim_account_id": vnfr["vim-account-id"]}
741
742 self.logger.debug("NS > ssh_keys > {}".format(ssh_keys_all))
743
744 if ssh_keys_all:
745 vdu_configuration = get_vdu_configuration(vnfd, vdur["vdu-id-ref"])
746 vnf_configuration = get_vnf_configuration(vnfd)
747 if vdu_configuration and vdu_configuration.get("config-access") and \
748 vdu_configuration.get("config-access").get("ssh-access"):
749 vdur["ssh-keys"] = ssh_keys_all
750 vdur["ssh-access-required"] = vdu_configuration["config-access"]["ssh-access"]["required"]
751 elif vnf_configuration and vnf_configuration.get("config-access") and \
752 vnf_configuration.get("config-access").get("ssh-access") and \
753 any(iface.get("mgmt-vnf") for iface in vdur["interfaces"]):
754 vdur["ssh-keys"] = ssh_keys_all
755 vdur["ssh-access-required"] = vnf_configuration["config-access"]["ssh-access"]["required"]
756 elif ssh_keys_instantiation and \
757 find_in_list(vdur["interfaces"], lambda iface: iface.get("mgmt-vnf")):
758 vdur["ssh-keys"] = ssh_keys_instantiation
759
760 self.logger.debug("NS > vdur > {}".format(vdur))
761
762 vdud = get_vdu(vnfd, vdur["vdu-id-ref"])
763 # cloud-init
764 if vdud.get("cloud-init-file"):
765 vdur["cloud-init"] = "{}:file:{}".format(vnfd["_id"], vdud.get("cloud-init-file"))
766 # read file and put content at target.cloul_init_content. Avoid ng_ro to use shared package system
767 if vdur["cloud-init"] not in target["cloud_init_content"]:
768 base_folder = vnfd["_admin"]["storage"]
769 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
770 vdud.get("cloud-init-file"))
771 with self.fs.file_open(cloud_init_file, "r") as ci_file:
772 target["cloud_init_content"][vdur["cloud-init"]] = ci_file.read()
773 elif vdud.get("cloud-init"):
774 vdur["cloud-init"] = "{}:vdu:{}".format(vnfd["_id"], get_vdu_index(vnfd, vdur["vdu-id-ref"]))
775 # put content at target.cloul_init_content. Avoid ng_ro read vnfd descriptor
776 target["cloud_init_content"][vdur["cloud-init"]] = vdud["cloud-init"]
777 vdur["additionalParams"] = vdur.get("additionalParams") or {}
778 deploy_params_vdu = self._format_additional_params(vdur.get("additionalParams") or {})
779 deploy_params_vdu["OSM"] = get_osm_params(vnfr, vdur["vdu-id-ref"], vdur["count-index"])
780 vdur["additionalParams"] = deploy_params_vdu
781
782 # flavor
783 ns_flavor = target["flavor"][int(vdur["ns-flavor-id"])]
784 if target_vim not in ns_flavor["vim_info"]:
785 ns_flavor["vim_info"][target_vim] = {}
786 # image
787 ns_image = target["image"][int(vdur["ns-image-id"])]
788 if target_vim not in ns_image["vim_info"]:
789 ns_image["vim_info"][target_vim] = {}
790
791 vdur["vim_info"] = {target_vim: {}}
792 # instantiation parameters
793 # if vnf_params:
794 # vdu_instantiation_params = next((v for v in get_iterable(vnf_params, "vdu") if v["id"] ==
795 # vdud["id"]), None)
796 vdur_list.append(vdur)
797 target_vnf["vdur"] = vdur_list
798 target["vnf"].append(target_vnf)
799
800 desc = await self.RO.deploy(nsr_id, target)
801 self.logger.debug("RO return > {}".format(desc))
802 action_id = desc["action_id"]
803 await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, timeout_ns_deploy, stage)
804
805 # Updating NSR
806 db_nsr_update = {
807 "_admin.deployed.RO.operational-status": "running",
808 "detailed-status": " ".join(stage)
809 }
810 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
811 self.update_db_2("nsrs", nsr_id, db_nsr_update)
812 self._write_op_status(nslcmop_id, stage)
813 self.logger.debug(logging_text + "ns deployed at RO. RO_id={}".format(action_id))
814 return
815
816 async def _wait_ng_ro(self, nsr_id, action_id, nslcmop_id=None, start_time=None, timeout=600, stage=None):
817 detailed_status_old = None
818 db_nsr_update = {}
819 start_time = start_time or time()
820 while time() <= start_time + timeout:
821 desc_status = await self.RO.status(nsr_id, action_id)
822 self.logger.debug("Wait NG RO > {}".format(desc_status))
823 if desc_status["status"] == "FAILED":
824 raise NgRoException(desc_status["details"])
825 elif desc_status["status"] == "BUILD":
826 if stage:
827 stage[2] = "VIM: ({})".format(desc_status["details"])
828 elif desc_status["status"] == "DONE":
829 if stage:
830 stage[2] = "Deployed at VIM"
831 break
832 else:
833 assert False, "ROclient.check_ns_status returns unknown {}".format(desc_status["status"])
834 if stage and nslcmop_id and stage[2] != detailed_status_old:
835 detailed_status_old = stage[2]
836 db_nsr_update["detailed-status"] = " ".join(stage)
837 self.update_db_2("nsrs", nsr_id, db_nsr_update)
838 self._write_op_status(nslcmop_id, stage)
839 await asyncio.sleep(15, loop=self.loop)
840 else: # timeout_ns_deploy
841 raise NgRoException("Timeout waiting ns to deploy")
842
843 async def _terminate_ng_ro(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
844 db_nsr_update = {}
845 failed_detail = []
846 action_id = None
847 start_deploy = time()
848 try:
849 target = {
850 "ns": {"vld": []},
851 "vnf": [],
852 "image": [],
853 "flavor": [],
854 "action_id": nslcmop_id
855 }
856 desc = await self.RO.deploy(nsr_id, target)
857 action_id = desc["action_id"]
858 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = action_id
859 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETING"
860 self.logger.debug(logging_text + "ns terminate action at RO. action_id={}".format(action_id))
861
862 # wait until done
863 delete_timeout = 20 * 60 # 20 minutes
864 await self._wait_ng_ro(nsr_id, action_id, nslcmop_id, start_deploy, delete_timeout, stage)
865
866 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
867 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
868 # delete all nsr
869 await self.RO.delete(nsr_id)
870 except Exception as e:
871 if isinstance(e, NgRoException) and e.http_code == 404: # not found
872 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
873 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
874 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
875 self.logger.debug(logging_text + "RO_action_id={} already deleted".format(action_id))
876 elif isinstance(e, NgRoException) and e.http_code == 409: # conflict
877 failed_detail.append("delete conflict: {}".format(e))
878 self.logger.debug(logging_text + "RO_action_id={} delete conflict: {}".format(action_id, e))
879 else:
880 failed_detail.append("delete error: {}".format(e))
881 self.logger.error(logging_text + "RO_action_id={} delete error: {}".format(action_id, e))
882
883 if failed_detail:
884 stage[2] = "Error deleting from VIM"
885 else:
886 stage[2] = "Deleted from VIM"
887 db_nsr_update["detailed-status"] = " ".join(stage)
888 self.update_db_2("nsrs", nsr_id, db_nsr_update)
889 self._write_op_status(nslcmop_id, stage)
890
891 if failed_detail:
892 raise LcmException("; ".join(failed_detail))
893 return
894
895 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds,
896 n2vc_key_list, stage):
897 """
898 Instantiate at RO
899 :param logging_text: preffix text to use at logging
900 :param nsr_id: nsr identity
901 :param nsd: database content of ns descriptor
902 :param db_nsr: database content of ns record
903 :param db_nslcmop: database content of ns operation, in this case, 'instantiate'
904 :param db_vnfrs:
905 :param db_vnfds: database content of vnfds, indexed by id (not _id). {id: {vnfd_object}, ...}
906 :param n2vc_key_list: ssh-public-key list to be inserted to management vdus via cloud-init
907 :param stage: list with 3 items: [general stage, tasks, vim_specific]. This task will write over vim_specific
908 :return: None or exception
909 """
910 try:
911 start_deploy = time()
912 ns_params = db_nslcmop.get("operationParams")
913 if ns_params and ns_params.get("timeout_ns_deploy"):
914 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
915 else:
916 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
917
918 # Check for and optionally request placement optimization. Database will be updated if placement activated
919 stage[2] = "Waiting for Placement."
920 if await self._do_placement(logging_text, db_nslcmop, db_vnfrs):
921 # in case of placement change ns_params[vimAcountId) if not present at any vnfrs
922 for vnfr in db_vnfrs.values():
923 if ns_params["vimAccountId"] == vnfr["vim-account-id"]:
924 break
925 else:
926 ns_params["vimAccountId"] == vnfr["vim-account-id"]
927
928 return await self._instantiate_ng_ro(logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs,
929 db_vnfds, n2vc_key_list, stage, start_deploy, timeout_ns_deploy)
930 except Exception as e:
931 stage[2] = "ERROR deploying at VIM"
932 self.set_vnfr_at_error(db_vnfrs, str(e))
933 self.logger.error("Error deploying at VIM {}".format(e),
934 exc_info=not isinstance(e, (ROclient.ROClientException, LcmException, DbException,
935 NgRoException)))
936 raise
937
938 async def wait_kdu_up(self, logging_text, nsr_id, vnfr_id, kdu_name):
939 """
940 Wait for kdu to be up, get ip address
941 :param logging_text: prefix use for logging
942 :param nsr_id:
943 :param vnfr_id:
944 :param kdu_name:
945 :return: IP address
946 """
947
948 # self.logger.debug(logging_text + "Starting wait_kdu_up")
949 nb_tries = 0
950
951 while nb_tries < 360:
952 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
953 kdur = next((x for x in get_iterable(db_vnfr, "kdur") if x.get("kdu-name") == kdu_name), None)
954 if not kdur:
955 raise LcmException("Not found vnfr_id={}, kdu_name={}".format(vnfr_id, kdu_name))
956 if kdur.get("status"):
957 if kdur["status"] in ("READY", "ENABLED"):
958 return kdur.get("ip-address")
959 else:
960 raise LcmException("target KDU={} is in error state".format(kdu_name))
961
962 await asyncio.sleep(10, loop=self.loop)
963 nb_tries += 1
964 raise LcmException("Timeout waiting KDU={} instantiated".format(kdu_name))
965
966 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
967 """
968 Wait for ip addres at RO, and optionally, insert public key in virtual machine
969 :param logging_text: prefix use for logging
970 :param nsr_id:
971 :param vnfr_id:
972 :param vdu_id:
973 :param vdu_index:
974 :param pub_key: public ssh key to inject, None to skip
975 :param user: user to apply the public ssh key
976 :return: IP address
977 """
978
979 self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
980 ro_nsr_id = None
981 ip_address = None
982 nb_tries = 0
983 target_vdu_id = None
984 ro_retries = 0
985
986 while True:
987
988 ro_retries += 1
989 if ro_retries >= 360: # 1 hour
990 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
991
992 await asyncio.sleep(10, loop=self.loop)
993
994 # get ip address
995 if not target_vdu_id:
996 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
997
998 if not vdu_id: # for the VNF case
999 if db_vnfr.get("status") == "ERROR":
1000 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
1001 ip_address = db_vnfr.get("ip-address")
1002 if not ip_address:
1003 continue
1004 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1005 else: # VDU case
1006 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1007 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1008
1009 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1010 vdur = db_vnfr["vdur"][0]
1011 if not vdur:
1012 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1013 vdu_index))
1014 # New generation RO stores information at "vim_info"
1015 ng_ro_status = None
1016 target_vim = None
1017 if vdur.get("vim_info"):
1018 target_vim = next(t for t in vdur["vim_info"]) # there should be only one key
1019 ng_ro_status = vdur["vim_info"][target_vim].get("vim_status")
1020 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE" or ng_ro_status == "ACTIVE":
1021 ip_address = vdur.get("ip-address")
1022 if not ip_address:
1023 continue
1024 target_vdu_id = vdur["vdu-id-ref"]
1025 elif vdur.get("status") == "ERROR" or ng_ro_status == "ERROR":
1026 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1027
1028 if not target_vdu_id:
1029 continue
1030
1031 # inject public key into machine
1032 if pub_key and user:
1033 self.logger.debug(logging_text + "Inserting RO key")
1034 self.logger.debug("SSH > PubKey > {}".format(pub_key))
1035 if vdur.get("pdu-type"):
1036 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1037 return ip_address
1038 try:
1039 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
1040 if self.ng_ro:
1041 target = {"action": {"action": "inject_ssh_key", "key": pub_key, "user": user},
1042 "vnf": [{"_id": vnfr_id, "vdur": [{"id": vdur["id"]}]}],
1043 }
1044 desc = await self.RO.deploy(nsr_id, target)
1045 action_id = desc["action_id"]
1046 await self._wait_ng_ro(nsr_id, action_id, timeout=600)
1047 break
1048 else:
1049 # wait until NS is deployed at RO
1050 if not ro_nsr_id:
1051 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1052 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1053 if not ro_nsr_id:
1054 continue
1055 result_dict = await self.RO.create_action(
1056 item="ns",
1057 item_id_name=ro_nsr_id,
1058 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1059 )
1060 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1061 if not result_dict or not isinstance(result_dict, dict):
1062 raise LcmException("Unknown response from RO when injecting key")
1063 for result in result_dict.values():
1064 if result.get("vim_result") == 200:
1065 break
1066 else:
1067 raise ROclient.ROClientException("error injecting key: {}".format(
1068 result.get("description")))
1069 break
1070 except NgRoException as e:
1071 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
1072 except ROclient.ROClientException as e:
1073 if not nb_tries:
1074 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1075 format(e, 20*10))
1076 nb_tries += 1
1077 if nb_tries >= 20:
1078 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
1079 else:
1080 break
1081
1082 return ip_address
1083
1084 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1085 """
1086 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1087 """
1088 my_vca = vca_deployed_list[vca_index]
1089 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1090 # vdu or kdu: no dependencies
1091 return
1092 timeout = 300
1093 while timeout >= 0:
1094 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1095 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1096 configuration_status_list = db_nsr["configurationStatus"]
1097 for index, vca_deployed in enumerate(configuration_status_list):
1098 if index == vca_index:
1099 # myself
1100 continue
1101 if not my_vca.get("member-vnf-index") or \
1102 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
1103 internal_status = configuration_status_list[index].get("status")
1104 if internal_status == 'READY':
1105 continue
1106 elif internal_status == 'BROKEN':
1107 raise LcmException("Configuration aborted because dependent charm/s has failed")
1108 else:
1109 break
1110 else:
1111 # no dependencies, return
1112 return
1113 await asyncio.sleep(10)
1114 timeout -= 1
1115
1116 raise LcmException("Configuration aborted because dependent charm/s timeout")
1117
1118 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
1119 config_descriptor, deploy_params, base_folder, nslcmop_id, stage, vca_type, vca_name,
1120 ee_config_descriptor):
1121 nsr_id = db_nsr["_id"]
1122 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
1123 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1124 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
1125 osm_config = {"osm": {"ns_id": db_nsr["_id"]}}
1126 db_dict = {
1127 'collection': 'nsrs',
1128 'filter': {'_id': nsr_id},
1129 'path': db_update_entry
1130 }
1131 step = ""
1132 try:
1133
1134 element_type = 'NS'
1135 element_under_configuration = nsr_id
1136
1137 vnfr_id = None
1138 if db_vnfr:
1139 vnfr_id = db_vnfr["_id"]
1140 osm_config["osm"]["vnf_id"] = vnfr_id
1141
1142 namespace = "{nsi}.{ns}".format(
1143 nsi=nsi_id if nsi_id else "",
1144 ns=nsr_id)
1145
1146 if vnfr_id:
1147 element_type = 'VNF'
1148 element_under_configuration = vnfr_id
1149 namespace += ".{}".format(vnfr_id)
1150 if vdu_id:
1151 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
1152 element_type = 'VDU'
1153 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
1154 osm_config["osm"]["vdu_id"] = vdu_id
1155 elif kdu_name:
1156 namespace += ".{}".format(kdu_name)
1157 element_type = 'KDU'
1158 element_under_configuration = kdu_name
1159 osm_config["osm"]["kdu_name"] = kdu_name
1160
1161 # Get artifact path
1162 artifact_path = "{}/{}/{}/{}".format(
1163 base_folder["folder"],
1164 base_folder["pkg-dir"],
1165 "charms" if vca_type in ("native_charm", "lxc_proxy_charm", "k8s_proxy_charm") else "helm-charts",
1166 vca_name
1167 )
1168
1169 self.logger.debug("Artifact path > {}".format(artifact_path))
1170
1171 # get initial_config_primitive_list that applies to this element
1172 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1173
1174 self.logger.debug("Initial config primitive list > {}".format(initial_config_primitive_list))
1175
1176 # add config if not present for NS charm
1177 ee_descriptor_id = ee_config_descriptor.get("id")
1178 self.logger.debug("EE Descriptor > {}".format(ee_descriptor_id))
1179 initial_config_primitive_list = get_ee_sorted_initial_config_primitive_list(initial_config_primitive_list,
1180 vca_deployed, ee_descriptor_id)
1181
1182 self.logger.debug("Initial config primitive list #2 > {}".format(initial_config_primitive_list))
1183 # n2vc_redesign STEP 3.1
1184 # find old ee_id if exists
1185 ee_id = vca_deployed.get("ee_id")
1186
1187 vim_account_id = (
1188 deep_get(db_vnfr, ("vim-account-id",)) or
1189 deep_get(deploy_params, ("OSM", "vim_account_id"))
1190 )
1191 vca_cloud, vca_cloud_credential = self.get_vca_cloud_and_credentials(vim_account_id)
1192 vca_k8s_cloud, vca_k8s_cloud_credential = self.get_vca_k8s_cloud_and_credentials(vim_account_id)
1193 # create or register execution environment in VCA
1194 if vca_type in ("lxc_proxy_charm", "k8s_proxy_charm", "helm", "helm-v3"):
1195
1196 self._write_configuration_status(
1197 nsr_id=nsr_id,
1198 vca_index=vca_index,
1199 status='CREATING',
1200 element_under_configuration=element_under_configuration,
1201 element_type=element_type
1202 )
1203
1204 step = "create execution environment"
1205 self.logger.debug(logging_text + step)
1206
1207 ee_id = None
1208 credentials = None
1209 if vca_type == "k8s_proxy_charm":
1210 ee_id = await self.vca_map[vca_type].install_k8s_proxy_charm(
1211 charm_name=artifact_path[artifact_path.rfind("/") + 1:],
1212 namespace=namespace,
1213 artifact_path=artifact_path,
1214 db_dict=db_dict,
1215 cloud_name=vca_k8s_cloud,
1216 credential_name=vca_k8s_cloud_credential,
1217 )
1218 elif vca_type == "helm" or vca_type == "helm-v3":
1219 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1220 namespace=namespace,
1221 reuse_ee_id=ee_id,
1222 db_dict=db_dict,
1223 config=osm_config,
1224 artifact_path=artifact_path,
1225 vca_type=vca_type
1226 )
1227 else:
1228 ee_id, credentials = await self.vca_map[vca_type].create_execution_environment(
1229 namespace=namespace,
1230 reuse_ee_id=ee_id,
1231 db_dict=db_dict,
1232 cloud_name=vca_cloud,
1233 credential_name=vca_cloud_credential,
1234 )
1235
1236 elif vca_type == "native_charm":
1237 step = "Waiting to VM being up and getting IP address"
1238 self.logger.debug(logging_text + step)
1239 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1240 user=None, pub_key=None)
1241 credentials = {"hostname": rw_mgmt_ip}
1242 # get username
1243 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1244 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1245 # merged. Meanwhile let's get username from initial-config-primitive
1246 if not username and initial_config_primitive_list:
1247 for config_primitive in initial_config_primitive_list:
1248 for param in config_primitive.get("parameter", ()):
1249 if param["name"] == "ssh-username":
1250 username = param["value"]
1251 break
1252 if not username:
1253 raise LcmException("Cannot determine the username neither with 'initial-config-primitive' nor with "
1254 "'config-access.ssh-access.default-user'")
1255 credentials["username"] = username
1256 # n2vc_redesign STEP 3.2
1257
1258 self._write_configuration_status(
1259 nsr_id=nsr_id,
1260 vca_index=vca_index,
1261 status='REGISTERING',
1262 element_under_configuration=element_under_configuration,
1263 element_type=element_type
1264 )
1265
1266 step = "register execution environment {}".format(credentials)
1267 self.logger.debug(logging_text + step)
1268 ee_id = await self.vca_map[vca_type].register_execution_environment(
1269 credentials=credentials,
1270 namespace=namespace,
1271 db_dict=db_dict,
1272 cloud_name=vca_cloud,
1273 credential_name=vca_cloud_credential,
1274 )
1275
1276 # for compatibility with MON/POL modules, the need model and application name at database
1277 # TODO ask MON/POL if needed to not assuming anymore the format "model_name.application_name"
1278 ee_id_parts = ee_id.split('.')
1279 db_nsr_update = {db_update_entry + "ee_id": ee_id}
1280 if len(ee_id_parts) >= 2:
1281 model_name = ee_id_parts[0]
1282 application_name = ee_id_parts[1]
1283 db_nsr_update[db_update_entry + "model"] = model_name
1284 db_nsr_update[db_update_entry + "application"] = application_name
1285
1286 # n2vc_redesign STEP 3.3
1287 step = "Install configuration Software"
1288
1289 self._write_configuration_status(
1290 nsr_id=nsr_id,
1291 vca_index=vca_index,
1292 status='INSTALLING SW',
1293 element_under_configuration=element_under_configuration,
1294 element_type=element_type,
1295 other_update=db_nsr_update
1296 )
1297
1298 # TODO check if already done
1299 self.logger.debug(logging_text + step)
1300 config = None
1301 if vca_type == "native_charm":
1302 config_primitive = next((p for p in initial_config_primitive_list if p["name"] == "config"), None)
1303 if config_primitive:
1304 config = self._map_primitive_params(
1305 config_primitive,
1306 {},
1307 deploy_params
1308 )
1309 num_units = 1
1310 if vca_type == "lxc_proxy_charm":
1311 if element_type == "NS":
1312 num_units = db_nsr.get("config-units") or 1
1313 elif element_type == "VNF":
1314 num_units = db_vnfr.get("config-units") or 1
1315 elif element_type == "VDU":
1316 for v in db_vnfr["vdur"]:
1317 if vdu_id == v["vdu-id-ref"]:
1318 num_units = v.get("config-units") or 1
1319 break
1320 if vca_type != "k8s_proxy_charm":
1321 await self.vca_map[vca_type].install_configuration_sw(
1322 ee_id=ee_id,
1323 artifact_path=artifact_path,
1324 db_dict=db_dict,
1325 config=config,
1326 num_units=num_units,
1327 )
1328
1329 # write in db flag of configuration_sw already installed
1330 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1331
1332 # add relations for this VCA (wait for other peers related with this VCA)
1333 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id,
1334 vca_index=vca_index, vca_type=vca_type)
1335
1336 # if SSH access is required, then get execution environment SSH public
1337 # if native charm we have waited already to VM be UP
1338 if vca_type in ("k8s_proxy_charm", "lxc_proxy_charm", "helm", "helm-v3"):
1339 pub_key = None
1340 user = None
1341 # self.logger.debug("get ssh key block")
1342 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
1343 # self.logger.debug("ssh key needed")
1344 # Needed to inject a ssh key
1345 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1346 step = "Install configuration Software, getting public ssh key"
1347 pub_key = await self.vca_map[vca_type].get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
1348
1349 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
1350 else:
1351 # self.logger.debug("no need to get ssh key")
1352 step = "Waiting to VM being up and getting IP address"
1353 self.logger.debug(logging_text + step)
1354
1355 # n2vc_redesign STEP 5.1
1356 # wait for RO (ip-address) Insert pub_key into VM
1357 if vnfr_id:
1358 if kdu_name:
1359 rw_mgmt_ip = await self.wait_kdu_up(logging_text, nsr_id, vnfr_id, kdu_name)
1360 else:
1361 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id,
1362 vdu_index, user=user, pub_key=pub_key)
1363 else:
1364 rw_mgmt_ip = None # This is for a NS configuration
1365
1366 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
1367
1368 # store rw_mgmt_ip in deploy params for later replacement
1369 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
1370
1371 # n2vc_redesign STEP 6 Execute initial config primitive
1372 step = 'execute initial config primitive'
1373
1374 # wait for dependent primitives execution (NS -> VNF -> VDU)
1375 if initial_config_primitive_list:
1376 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
1377
1378 # stage, in function of element type: vdu, kdu, vnf or ns
1379 my_vca = vca_deployed_list[vca_index]
1380 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1381 # VDU or KDU
1382 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
1383 elif my_vca.get("member-vnf-index"):
1384 # VNF
1385 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
1386 else:
1387 # NS
1388 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
1389
1390 self._write_configuration_status(
1391 nsr_id=nsr_id,
1392 vca_index=vca_index,
1393 status='EXECUTING PRIMITIVE'
1394 )
1395
1396 self._write_op_status(
1397 op_id=nslcmop_id,
1398 stage=stage
1399 )
1400
1401 check_if_terminated_needed = True
1402 for initial_config_primitive in initial_config_primitive_list:
1403 # adding information on the vca_deployed if it is a NS execution environment
1404 if not vca_deployed["member-vnf-index"]:
1405 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
1406 # TODO check if already done
1407 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
1408
1409 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1410 self.logger.debug(logging_text + step)
1411 await self.vca_map[vca_type].exec_primitive(
1412 ee_id=ee_id,
1413 primitive_name=initial_config_primitive["name"],
1414 params_dict=primitive_params_,
1415 db_dict=db_dict
1416 )
1417 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1418 if check_if_terminated_needed:
1419 if config_descriptor.get('terminate-config-primitive'):
1420 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1421 check_if_terminated_needed = False
1422
1423 # TODO register in database that primitive is done
1424
1425 # STEP 7 Configure metrics
1426 if vca_type == "helm" or vca_type == "helm-v3":
1427 prometheus_jobs = await self.add_prometheus_metrics(
1428 ee_id=ee_id,
1429 artifact_path=artifact_path,
1430 ee_config_descriptor=ee_config_descriptor,
1431 vnfr_id=vnfr_id,
1432 nsr_id=nsr_id,
1433 target_ip=rw_mgmt_ip,
1434 )
1435 if prometheus_jobs:
1436 self.update_db_2("nsrs", nsr_id, {db_update_entry + "prometheus_jobs": prometheus_jobs})
1437
1438 step = "instantiated at VCA"
1439 self.logger.debug(logging_text + step)
1440
1441 self._write_configuration_status(
1442 nsr_id=nsr_id,
1443 vca_index=vca_index,
1444 status='READY'
1445 )
1446
1447 except Exception as e: # TODO not use Exception but N2VC exception
1448 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
1449 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1450 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
1451 self._write_configuration_status(
1452 nsr_id=nsr_id,
1453 vca_index=vca_index,
1454 status='BROKEN'
1455 )
1456 raise LcmException("{} {}".format(step, e)) from e
1457
1458 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
1459 error_description: str = None, error_detail: str = None, other_update: dict = None):
1460 """
1461 Update db_nsr fields.
1462 :param nsr_id:
1463 :param ns_state:
1464 :param current_operation:
1465 :param current_operation_id:
1466 :param error_description:
1467 :param error_detail:
1468 :param other_update: Other required changes at database if provided, will be cleared
1469 :return:
1470 """
1471 try:
1472 db_dict = other_update or {}
1473 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1474 db_dict["_admin.current-operation"] = current_operation_id
1475 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
1476 db_dict["currentOperation"] = current_operation
1477 db_dict["currentOperationID"] = current_operation_id
1478 db_dict["errorDescription"] = error_description
1479 db_dict["errorDetail"] = error_detail
1480
1481 if ns_state:
1482 db_dict["nsState"] = ns_state
1483 self.update_db_2("nsrs", nsr_id, db_dict)
1484 except DbException as e:
1485 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1486
1487 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1488 operation_state: str = None, other_update: dict = None):
1489 try:
1490 db_dict = other_update or {}
1491 db_dict['queuePosition'] = queuePosition
1492 if isinstance(stage, list):
1493 db_dict['stage'] = stage[0]
1494 db_dict['detailed-status'] = " ".join(stage)
1495 elif stage is not None:
1496 db_dict['stage'] = str(stage)
1497
1498 if error_message is not None:
1499 db_dict['errorMessage'] = error_message
1500 if operation_state is not None:
1501 db_dict['operationState'] = operation_state
1502 db_dict["statusEnteredTime"] = time()
1503 self.update_db_2("nslcmops", op_id, db_dict)
1504 except DbException as e:
1505 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1506
1507 def _write_all_config_status(self, db_nsr: dict, status: str):
1508 try:
1509 nsr_id = db_nsr["_id"]
1510 # configurationStatus
1511 config_status = db_nsr.get('configurationStatus')
1512 if config_status:
1513 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1514 enumerate(config_status) if v}
1515 # update status
1516 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1517
1518 except DbException as e:
1519 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1520
1521 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
1522 element_under_configuration: str = None, element_type: str = None,
1523 other_update: dict = None):
1524
1525 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1526 # .format(vca_index, status))
1527
1528 try:
1529 db_path = 'configurationStatus.{}.'.format(vca_index)
1530 db_dict = other_update or {}
1531 if status:
1532 db_dict[db_path + 'status'] = status
1533 if element_under_configuration:
1534 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
1535 if element_type:
1536 db_dict[db_path + 'elementType'] = element_type
1537 self.update_db_2("nsrs", nsr_id, db_dict)
1538 except DbException as e:
1539 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
1540 .format(status, nsr_id, vca_index, e))
1541
1542 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
1543 """
1544 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
1545 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
1546 Database is used because the result can be obtained from a different LCM worker in case of HA.
1547 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
1548 :param db_nslcmop: database content of nslcmop
1549 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
1550 :return: True if some modification is done. Modifies database vnfrs and parameter db_vnfr with the
1551 computed 'vim-account-id'
1552 """
1553 modified = False
1554 nslcmop_id = db_nslcmop['_id']
1555 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
1556 if placement_engine == "PLA":
1557 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
1558 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
1559 db_poll_interval = 5
1560 wait = db_poll_interval * 10
1561 pla_result = None
1562 while not pla_result and wait >= 0:
1563 await asyncio.sleep(db_poll_interval)
1564 wait -= db_poll_interval
1565 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
1566 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
1567
1568 if not pla_result:
1569 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
1570
1571 for pla_vnf in pla_result['vnf']:
1572 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
1573 if not pla_vnf.get('vimAccountId') or not vnfr:
1574 continue
1575 modified = True
1576 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
1577 # Modifies db_vnfrs
1578 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
1579 return modified
1580
1581 def update_nsrs_with_pla_result(self, params):
1582 try:
1583 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
1584 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
1585 except Exception as e:
1586 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
1587
1588 async def instantiate(self, nsr_id, nslcmop_id):
1589 """
1590
1591 :param nsr_id: ns instance to deploy
1592 :param nslcmop_id: operation to run
1593 :return:
1594 """
1595
1596 # Try to lock HA task here
1597 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
1598 if not task_is_locked_by_me:
1599 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
1600 return
1601
1602 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
1603 self.logger.debug(logging_text + "Enter")
1604
1605 # get all needed from database
1606
1607 # database nsrs record
1608 db_nsr = None
1609
1610 # database nslcmops record
1611 db_nslcmop = None
1612
1613 # update operation on nsrs
1614 db_nsr_update = {}
1615 # update operation on nslcmops
1616 db_nslcmop_update = {}
1617
1618 nslcmop_operation_state = None
1619 db_vnfrs = {} # vnf's info indexed by member-index
1620 # n2vc_info = {}
1621 tasks_dict_info = {} # from task to info text
1622 exc = None
1623 error_list = []
1624 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
1625 # ^ stage, step, VIM progress
1626 try:
1627 # wait for any previous tasks in process
1628 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
1629
1630 stage[1] = "Sync filesystem from database."
1631 self.fs.sync() # TODO, make use of partial sync, only for the needed packages
1632
1633 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
1634 stage[1] = "Reading from database."
1635 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
1636 db_nsr_update["detailed-status"] = "creating"
1637 db_nsr_update["operational-status"] = "init"
1638 self._write_ns_status(
1639 nsr_id=nsr_id,
1640 ns_state="BUILDING",
1641 current_operation="INSTANTIATING",
1642 current_operation_id=nslcmop_id,
1643 other_update=db_nsr_update
1644 )
1645 self._write_op_status(
1646 op_id=nslcmop_id,
1647 stage=stage,
1648 queuePosition=0
1649 )
1650
1651 # read from db: operation
1652 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
1653 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
1654 ns_params = db_nslcmop.get("operationParams")
1655 if ns_params and ns_params.get("timeout_ns_deploy"):
1656 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1657 else:
1658 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
1659
1660 # read from db: ns
1661 stage[1] = "Getting nsr={} from db.".format(nsr_id)
1662 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1663 stage[1] = "Getting nsd={} from db.".format(db_nsr["nsd-id"])
1664 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
1665 db_nsr["nsd"] = nsd
1666 # nsr_name = db_nsr["name"] # TODO short-name??
1667
1668 # read from db: vnf's of this ns
1669 stage[1] = "Getting vnfrs from db."
1670 self.logger.debug(logging_text + stage[1])
1671 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
1672
1673 # read from db: vnfd's for every vnf
1674 db_vnfds = [] # every vnfd data
1675
1676 # for each vnf in ns, read vnfd
1677 for vnfr in db_vnfrs_list:
1678 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
1679 vnfd_id = vnfr["vnfd-id"]
1680 vnfd_ref = vnfr["vnfd-ref"]
1681
1682 # if we haven't this vnfd, read it from db
1683 if vnfd_id not in db_vnfds:
1684 # read from db
1685 stage[1] = "Getting vnfd={} id='{}' from db.".format(vnfd_id, vnfd_ref)
1686 self.logger.debug(logging_text + stage[1])
1687 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
1688
1689 # store vnfd
1690 db_vnfds.append(vnfd)
1691
1692 # Get or generates the _admin.deployed.VCA list
1693 vca_deployed_list = None
1694 if db_nsr["_admin"].get("deployed"):
1695 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
1696 if vca_deployed_list is None:
1697 vca_deployed_list = []
1698 configuration_status_list = []
1699 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
1700 db_nsr_update["configurationStatus"] = configuration_status_list
1701 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
1702 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
1703 elif isinstance(vca_deployed_list, dict):
1704 # maintain backward compatibility. Change a dict to list at database
1705 vca_deployed_list = list(vca_deployed_list.values())
1706 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
1707 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
1708
1709 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
1710 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
1711 db_nsr_update["_admin.deployed.RO.vnfd"] = []
1712
1713 # set state to INSTANTIATED. When instantiated NBI will not delete directly
1714 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1715 self.update_db_2("nsrs", nsr_id, db_nsr_update)
1716 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "INSTANTIATED"})
1717
1718 # n2vc_redesign STEP 2 Deploy Network Scenario
1719 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
1720 self._write_op_status(
1721 op_id=nslcmop_id,
1722 stage=stage
1723 )
1724
1725 stage[1] = "Deploying KDUs."
1726 # self.logger.debug(logging_text + "Before deploy_kdus")
1727 # Call to deploy_kdus in case exists the "vdu:kdu" param
1728 await self.deploy_kdus(
1729 logging_text=logging_text,
1730 nsr_id=nsr_id,
1731 nslcmop_id=nslcmop_id,
1732 db_vnfrs=db_vnfrs,
1733 db_vnfds=db_vnfds,
1734 task_instantiation_info=tasks_dict_info,
1735 )
1736
1737 stage[1] = "Getting VCA public key."
1738 # n2vc_redesign STEP 1 Get VCA public ssh-key
1739 # feature 1429. Add n2vc public key to needed VMs
1740 n2vc_key = self.n2vc.get_public_key()
1741 n2vc_key_list = [n2vc_key]
1742 if self.vca_config.get("public_key"):
1743 n2vc_key_list.append(self.vca_config["public_key"])
1744
1745 stage[1] = "Deploying NS at VIM."
1746 task_ro = asyncio.ensure_future(
1747 self.instantiate_RO(
1748 logging_text=logging_text,
1749 nsr_id=nsr_id,
1750 nsd=nsd,
1751 db_nsr=db_nsr,
1752 db_nslcmop=db_nslcmop,
1753 db_vnfrs=db_vnfrs,
1754 db_vnfds=db_vnfds,
1755 n2vc_key_list=n2vc_key_list,
1756 stage=stage
1757 )
1758 )
1759 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
1760 tasks_dict_info[task_ro] = "Deploying at VIM"
1761
1762 # n2vc_redesign STEP 3 to 6 Deploy N2VC
1763 stage[1] = "Deploying Execution Environments."
1764 self.logger.debug(logging_text + stage[1])
1765
1766 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
1767 for vnf_profile in get_vnf_profiles(nsd):
1768 vnfd_id = vnf_profile["vnfd-id"]
1769 vnfd = find_in_list(db_vnfds, lambda a_vnf: a_vnf["id"] == vnfd_id)
1770 member_vnf_index = str(vnf_profile["id"])
1771 db_vnfr = db_vnfrs[member_vnf_index]
1772 base_folder = vnfd["_admin"]["storage"]
1773 vdu_id = None
1774 vdu_index = 0
1775 vdu_name = None
1776 kdu_name = None
1777
1778 # Get additional parameters
1779 deploy_params = {"OSM": get_osm_params(db_vnfr)}
1780 if db_vnfr.get("additionalParamsForVnf"):
1781 deploy_params.update(parse_yaml_strings(db_vnfr["additionalParamsForVnf"].copy()))
1782
1783 descriptor_config = get_vnf_configuration(vnfd)
1784 if descriptor_config:
1785 self._deploy_n2vc(
1786 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
1787 db_nsr=db_nsr,
1788 db_vnfr=db_vnfr,
1789 nslcmop_id=nslcmop_id,
1790 nsr_id=nsr_id,
1791 nsi_id=nsi_id,
1792 vnfd_id=vnfd_id,
1793 vdu_id=vdu_id,
1794 kdu_name=kdu_name,
1795 member_vnf_index=member_vnf_index,
1796 vdu_index=vdu_index,
1797 vdu_name=vdu_name,
1798 deploy_params=deploy_params,
1799 descriptor_config=descriptor_config,
1800 base_folder=base_folder,
1801 task_instantiation_info=tasks_dict_info,
1802 stage=stage
1803 )
1804
1805 # Deploy charms for each VDU that supports one.
1806 for vdud in get_vdu_list(vnfd):
1807 vdu_id = vdud["id"]
1808 descriptor_config = get_vdu_configuration(vnfd, vdu_id)
1809 vdur = find_in_list(db_vnfr["vdur"], lambda vdu: vdu["vdu-id-ref"] == vdu_id)
1810
1811 if vdur.get("additionalParams"):
1812 deploy_params_vdu = parse_yaml_strings(vdur["additionalParams"])
1813 else:
1814 deploy_params_vdu = deploy_params
1815 deploy_params_vdu["OSM"] = get_osm_params(db_vnfr, vdu_id, vdu_count_index=0)
1816 vdud_count = get_vdu_profile(vnfd, vdu_id).get("max-number-of-instances", 1)
1817
1818 self.logger.debug("VDUD > {}".format(vdud))
1819 self.logger.debug("Descriptor config > {}".format(descriptor_config))
1820 if descriptor_config:
1821 vdu_name = None
1822 kdu_name = None
1823 for vdu_index in range(vdud_count):
1824 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
1825 self._deploy_n2vc(
1826 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
1827 member_vnf_index, vdu_id, vdu_index),
1828 db_nsr=db_nsr,
1829 db_vnfr=db_vnfr,
1830 nslcmop_id=nslcmop_id,
1831 nsr_id=nsr_id,
1832 nsi_id=nsi_id,
1833 vnfd_id=vnfd_id,
1834 vdu_id=vdu_id,
1835 kdu_name=kdu_name,
1836 member_vnf_index=member_vnf_index,
1837 vdu_index=vdu_index,
1838 vdu_name=vdu_name,
1839 deploy_params=deploy_params_vdu,
1840 descriptor_config=descriptor_config,
1841 base_folder=base_folder,
1842 task_instantiation_info=tasks_dict_info,
1843 stage=stage
1844 )
1845 for kdud in get_kdu_list(vnfd):
1846 kdu_name = kdud["name"]
1847 descriptor_config = kdud.get('kdu-configuration')
1848 if descriptor_config:
1849 vdu_id = None
1850 vdu_index = 0
1851 vdu_name = None
1852 kdur = next(x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name)
1853 deploy_params_kdu = {"OSM": get_osm_params(db_vnfr)}
1854 if kdur.get("additionalParams"):
1855 deploy_params_kdu = parse_yaml_strings(kdur["additionalParams"])
1856
1857 self._deploy_n2vc(
1858 logging_text=logging_text,
1859 db_nsr=db_nsr,
1860 db_vnfr=db_vnfr,
1861 nslcmop_id=nslcmop_id,
1862 nsr_id=nsr_id,
1863 nsi_id=nsi_id,
1864 vnfd_id=vnfd_id,
1865 vdu_id=vdu_id,
1866 kdu_name=kdu_name,
1867 member_vnf_index=member_vnf_index,
1868 vdu_index=vdu_index,
1869 vdu_name=vdu_name,
1870 deploy_params=deploy_params_kdu,
1871 descriptor_config=descriptor_config,
1872 base_folder=base_folder,
1873 task_instantiation_info=tasks_dict_info,
1874 stage=stage
1875 )
1876
1877 # Check if this NS has a charm configuration
1878 descriptor_config = nsd.get("ns-configuration")
1879 if descriptor_config and descriptor_config.get("juju"):
1880 vnfd_id = None
1881 db_vnfr = None
1882 member_vnf_index = None
1883 vdu_id = None
1884 kdu_name = None
1885 vdu_index = 0
1886 vdu_name = None
1887
1888 # Get additional parameters
1889 deploy_params = {"OSM": {"vim_account_id": ns_params["vimAccountId"]}}
1890 if db_nsr.get("additionalParamsForNs"):
1891 deploy_params.update(parse_yaml_strings(db_nsr["additionalParamsForNs"].copy()))
1892 base_folder = nsd["_admin"]["storage"]
1893 self._deploy_n2vc(
1894 logging_text=logging_text,
1895 db_nsr=db_nsr,
1896 db_vnfr=db_vnfr,
1897 nslcmop_id=nslcmop_id,
1898 nsr_id=nsr_id,
1899 nsi_id=nsi_id,
1900 vnfd_id=vnfd_id,
1901 vdu_id=vdu_id,
1902 kdu_name=kdu_name,
1903 member_vnf_index=member_vnf_index,
1904 vdu_index=vdu_index,
1905 vdu_name=vdu_name,
1906 deploy_params=deploy_params,
1907 descriptor_config=descriptor_config,
1908 base_folder=base_folder,
1909 task_instantiation_info=tasks_dict_info,
1910 stage=stage
1911 )
1912
1913 # rest of staff will be done at finally
1914
1915 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
1916 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
1917 exc = e
1918 except asyncio.CancelledError:
1919 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
1920 exc = "Operation was cancelled"
1921 except Exception as e:
1922 exc = traceback.format_exc()
1923 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
1924 finally:
1925 if exc:
1926 error_list.append(str(exc))
1927 try:
1928 # wait for pending tasks
1929 if tasks_dict_info:
1930 stage[1] = "Waiting for instantiate pending tasks."
1931 self.logger.debug(logging_text + stage[1])
1932 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
1933 stage, nslcmop_id, nsr_id=nsr_id)
1934 stage[1] = stage[2] = ""
1935 except asyncio.CancelledError:
1936 error_list.append("Cancelled")
1937 # TODO cancel all tasks
1938 except Exception as exc:
1939 error_list.append(str(exc))
1940
1941 # update operation-status
1942 db_nsr_update["operational-status"] = "running"
1943 # let's begin with VCA 'configured' status (later we can change it)
1944 db_nsr_update["config-status"] = "configured"
1945 for task, task_name in tasks_dict_info.items():
1946 if not task.done() or task.cancelled() or task.exception():
1947 if task_name.startswith(self.task_name_deploy_vca):
1948 # A N2VC task is pending
1949 db_nsr_update["config-status"] = "failed"
1950 else:
1951 # RO or KDU task is pending
1952 db_nsr_update["operational-status"] = "failed"
1953
1954 # update status at database
1955 if error_list:
1956 error_detail = ". ".join(error_list)
1957 self.logger.error(logging_text + error_detail)
1958 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
1959 error_description_nsr = 'Operation: INSTANTIATING.{}, {}'.format(nslcmop_id, stage[0])
1960
1961 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
1962 db_nslcmop_update["detailed-status"] = error_detail
1963 nslcmop_operation_state = "FAILED"
1964 ns_state = "BROKEN"
1965 else:
1966 error_detail = None
1967 error_description_nsr = error_description_nslcmop = None
1968 ns_state = "READY"
1969 db_nsr_update["detailed-status"] = "Done"
1970 db_nslcmop_update["detailed-status"] = "Done"
1971 nslcmop_operation_state = "COMPLETED"
1972
1973 if db_nsr:
1974 self._write_ns_status(
1975 nsr_id=nsr_id,
1976 ns_state=ns_state,
1977 current_operation="IDLE",
1978 current_operation_id=None,
1979 error_description=error_description_nsr,
1980 error_detail=error_detail,
1981 other_update=db_nsr_update
1982 )
1983 self._write_op_status(
1984 op_id=nslcmop_id,
1985 stage="",
1986 error_message=error_description_nslcmop,
1987 operation_state=nslcmop_operation_state,
1988 other_update=db_nslcmop_update,
1989 )
1990
1991 if nslcmop_operation_state:
1992 try:
1993 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
1994 "operationState": nslcmop_operation_state},
1995 loop=self.loop)
1996 except Exception as e:
1997 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
1998
1999 self.logger.debug(logging_text + "Exit")
2000 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
2001
2002 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int,
2003 timeout: int = 3600, vca_type: str = None) -> bool:
2004
2005 # steps:
2006 # 1. find all relations for this VCA
2007 # 2. wait for other peers related
2008 # 3. add relations
2009
2010 try:
2011 vca_type = vca_type or "lxc_proxy_charm"
2012
2013 # STEP 1: find all relations for this VCA
2014
2015 # read nsr record
2016 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2017 nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
2018
2019 # this VCA data
2020 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
2021
2022 # read all ns-configuration relations
2023 ns_relations = list()
2024 db_ns_relations = deep_get(nsd, ('ns-configuration', 'relation'))
2025 if db_ns_relations:
2026 for r in db_ns_relations:
2027 # check if this VCA is in the relation
2028 if my_vca.get('member-vnf-index') in\
2029 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2030 ns_relations.append(r)
2031
2032 # read all vnf-configuration relations
2033 vnf_relations = list()
2034 db_vnfd_list = db_nsr.get('vnfd-id')
2035 if db_vnfd_list:
2036 for vnfd in db_vnfd_list:
2037 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
2038 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
2039 if db_vnf_relations:
2040 for r in db_vnf_relations:
2041 # check if this VCA is in the relation
2042 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
2043 vnf_relations.append(r)
2044
2045 # if no relations, terminate
2046 if not ns_relations and not vnf_relations:
2047 self.logger.debug(logging_text + ' No relations')
2048 return True
2049
2050 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
2051
2052 # add all relations
2053 start = time()
2054 while True:
2055 # check timeout
2056 now = time()
2057 if now - start >= timeout:
2058 self.logger.error(logging_text + ' : timeout adding relations')
2059 return False
2060
2061 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
2062 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2063
2064 # for each defined NS relation, find the VCA's related
2065 for r in ns_relations.copy():
2066 from_vca_ee_id = None
2067 to_vca_ee_id = None
2068 from_vca_endpoint = None
2069 to_vca_endpoint = None
2070 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2071 for vca in vca_list:
2072 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
2073 and vca.get('config_sw_installed'):
2074 from_vca_ee_id = vca.get('ee_id')
2075 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2076 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
2077 and vca.get('config_sw_installed'):
2078 to_vca_ee_id = vca.get('ee_id')
2079 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2080 if from_vca_ee_id and to_vca_ee_id:
2081 # add relation
2082 await self.vca_map[vca_type].add_relation(
2083 ee_id_1=from_vca_ee_id,
2084 ee_id_2=to_vca_ee_id,
2085 endpoint_1=from_vca_endpoint,
2086 endpoint_2=to_vca_endpoint)
2087 # remove entry from relations list
2088 ns_relations.remove(r)
2089 else:
2090 # check failed peers
2091 try:
2092 vca_status_list = db_nsr.get('configurationStatus')
2093 if vca_status_list:
2094 for i in range(len(vca_list)):
2095 vca = vca_list[i]
2096 vca_status = vca_status_list[i]
2097 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2098 if vca_status.get('status') == 'BROKEN':
2099 # peer broken: remove relation from list
2100 ns_relations.remove(r)
2101 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2102 if vca_status.get('status') == 'BROKEN':
2103 # peer broken: remove relation from list
2104 ns_relations.remove(r)
2105 except Exception:
2106 # ignore
2107 pass
2108
2109 # for each defined VNF relation, find the VCA's related
2110 for r in vnf_relations.copy():
2111 from_vca_ee_id = None
2112 to_vca_ee_id = None
2113 from_vca_endpoint = None
2114 to_vca_endpoint = None
2115 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2116 for vca in vca_list:
2117 key_to_check = "vdu_id"
2118 if vca.get("vdu_id") is None:
2119 key_to_check = "vnfd_id"
2120 if vca.get(key_to_check) == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
2121 from_vca_ee_id = vca.get('ee_id')
2122 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2123 if vca.get(key_to_check) == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
2124 to_vca_ee_id = vca.get('ee_id')
2125 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2126 if from_vca_ee_id and to_vca_ee_id:
2127 # add relation
2128 await self.vca_map[vca_type].add_relation(
2129 ee_id_1=from_vca_ee_id,
2130 ee_id_2=to_vca_ee_id,
2131 endpoint_1=from_vca_endpoint,
2132 endpoint_2=to_vca_endpoint)
2133 # remove entry from relations list
2134 vnf_relations.remove(r)
2135 else:
2136 # check failed peers
2137 try:
2138 vca_status_list = db_nsr.get('configurationStatus')
2139 if vca_status_list:
2140 for i in range(len(vca_list)):
2141 vca = vca_list[i]
2142 vca_status = vca_status_list[i]
2143 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2144 if vca_status.get('status') == 'BROKEN':
2145 # peer broken: remove relation from list
2146 vnf_relations.remove(r)
2147 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2148 if vca_status.get('status') == 'BROKEN':
2149 # peer broken: remove relation from list
2150 vnf_relations.remove(r)
2151 except Exception:
2152 # ignore
2153 pass
2154
2155 # wait for next try
2156 await asyncio.sleep(5.0)
2157
2158 if not ns_relations and not vnf_relations:
2159 self.logger.debug('Relations added')
2160 break
2161
2162 return True
2163
2164 except Exception as e:
2165 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2166 return False
2167
2168 async def _install_kdu(self, nsr_id: str, nsr_db_path: str, vnfr_data: dict, kdu_index: int, kdud: dict,
2169 vnfd: dict, k8s_instance_info: dict, k8params: dict = None, timeout: int = 600):
2170
2171 try:
2172 k8sclustertype = k8s_instance_info["k8scluster-type"]
2173 # Instantiate kdu
2174 db_dict_install = {"collection": "nsrs",
2175 "filter": {"_id": nsr_id},
2176 "path": nsr_db_path}
2177
2178 kdu_instance = await self.k8scluster_map[k8sclustertype].install(
2179 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2180 kdu_model=k8s_instance_info["kdu-model"],
2181 atomic=True,
2182 params=k8params,
2183 db_dict=db_dict_install,
2184 timeout=timeout,
2185 kdu_name=k8s_instance_info["kdu-name"],
2186 namespace=k8s_instance_info["namespace"])
2187 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".kdu-instance": kdu_instance})
2188
2189 # Obtain services to obtain management service ip
2190 services = await self.k8scluster_map[k8sclustertype].get_services(
2191 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2192 kdu_instance=kdu_instance,
2193 namespace=k8s_instance_info["namespace"])
2194
2195 # Obtain management service info (if exists)
2196 vnfr_update_dict = {}
2197 if services:
2198 vnfr_update_dict["kdur.{}.services".format(kdu_index)] = services
2199 mgmt_services = [service for service in kdud.get("service", []) if service.get("mgmt-service")]
2200 for mgmt_service in mgmt_services:
2201 for service in services:
2202 if service["name"].startswith(mgmt_service["name"]):
2203 # Mgmt service found, Obtain service ip
2204 ip = service.get("external_ip", service.get("cluster_ip"))
2205 if isinstance(ip, list) and len(ip) == 1:
2206 ip = ip[0]
2207
2208 vnfr_update_dict["kdur.{}.ip-address".format(kdu_index)] = ip
2209
2210 # Check if must update also mgmt ip at the vnf
2211 service_external_cp = mgmt_service.get("external-connection-point-ref")
2212 if service_external_cp:
2213 if deep_get(vnfd, ("mgmt-interface", "cp")) == service_external_cp:
2214 vnfr_update_dict["ip-address"] = ip
2215
2216 break
2217 else:
2218 self.logger.warn("Mgmt service name: {} not found".format(mgmt_service["name"]))
2219
2220 vnfr_update_dict["kdur.{}.status".format(kdu_index)] = "READY"
2221 self.update_db_2("vnfrs", vnfr_data.get("_id"), vnfr_update_dict)
2222
2223 kdu_config = kdud.get("kdu-configuration")
2224 if kdu_config and kdu_config.get("initial-config-primitive") and kdu_config.get("juju") is None:
2225 initial_config_primitive_list = kdu_config.get("initial-config-primitive")
2226 initial_config_primitive_list.sort(key=lambda val: int(val["seq"]))
2227
2228 for initial_config_primitive in initial_config_primitive_list:
2229 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, {})
2230
2231 await asyncio.wait_for(
2232 self.k8scluster_map[k8sclustertype].exec_primitive(
2233 cluster_uuid=k8s_instance_info["k8scluster-uuid"],
2234 kdu_instance=kdu_instance,
2235 primitive_name=initial_config_primitive["name"],
2236 params=primitive_params_, db_dict={}),
2237 timeout=timeout)
2238
2239 except Exception as e:
2240 # Prepare update db with error and raise exception
2241 try:
2242 self.update_db_2("nsrs", nsr_id, {nsr_db_path + ".detailed-status": str(e)})
2243 self.update_db_2("vnfrs", vnfr_data.get("_id"), {"kdur.{}.status".format(kdu_index): "ERROR"})
2244 except Exception:
2245 # ignore to keep original exception
2246 pass
2247 # reraise original error
2248 raise
2249
2250 return kdu_instance
2251
2252 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
2253 # Launch kdus if present in the descriptor
2254
2255 k8scluster_id_2_uuic = {"helm-chart-v3": {}, "helm-chart": {}, "juju-bundle": {}}
2256
2257 async def _get_cluster_id(cluster_id, cluster_type):
2258 nonlocal k8scluster_id_2_uuic
2259 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2260 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2261
2262 # check if K8scluster is creating and wait look if previous tasks in process
2263 task_name, task_dependency = self.lcm_tasks.lookfor_related("k8scluster", cluster_id)
2264 if task_dependency:
2265 text = "Waiting for related tasks '{}' on k8scluster {} to be completed".format(task_name, cluster_id)
2266 self.logger.debug(logging_text + text)
2267 await asyncio.wait(task_dependency, timeout=3600)
2268
2269 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2270 if not db_k8scluster:
2271 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
2272
2273 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2274 if not k8s_id:
2275 if cluster_type == "helm-chart-v3":
2276 try:
2277 # backward compatibility for existing clusters that have not been initialized for helm v3
2278 k8s_credentials = yaml.safe_dump(db_k8scluster.get("credentials"))
2279 k8s_id, uninstall_sw = await self.k8sclusterhelm3.init_env(k8s_credentials,
2280 reuse_cluster_uuid=cluster_id)
2281 db_k8scluster_update = {}
2282 db_k8scluster_update["_admin.helm-chart-v3.error_msg"] = None
2283 db_k8scluster_update["_admin.helm-chart-v3.id"] = k8s_id
2284 db_k8scluster_update["_admin.helm-chart-v3.created"] = uninstall_sw
2285 db_k8scluster_update["_admin.helm-chart-v3.operationalState"] = "ENABLED"
2286 self.update_db_2("k8sclusters", cluster_id, db_k8scluster_update)
2287 except Exception as e:
2288 self.logger.error(logging_text + "error initializing helm-v3 cluster: {}".format(str(e)))
2289 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".format(cluster_id,
2290 cluster_type))
2291 else:
2292 raise LcmException("K8s cluster '{}' has not been initialized for '{}'".
2293 format(cluster_id, cluster_type))
2294 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2295 return k8s_id
2296
2297 logging_text += "Deploy kdus: "
2298 step = ""
2299 try:
2300 db_nsr_update = {"_admin.deployed.K8s": []}
2301 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2302
2303 index = 0
2304 updated_cluster_list = []
2305 updated_v3_cluster_list = []
2306
2307 for vnfr_data in db_vnfrs.values():
2308 for kdu_index, kdur in enumerate(get_iterable(vnfr_data, "kdur")):
2309 # Step 0: Prepare and set parameters
2310 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
2311 vnfd_id = vnfr_data.get('vnfd-id')
2312 vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id)
2313 kdud = next(kdud for kdud in vnfd_with_id["kdu"] if kdud["name"] == kdur["kdu-name"])
2314 namespace = kdur.get("k8s-namespace")
2315 if kdur.get("helm-chart"):
2316 kdumodel = kdur["helm-chart"]
2317 # Default version: helm3, if helm-version is v2 assign v2
2318 k8sclustertype = "helm-chart-v3"
2319 self.logger.debug("kdur: {}".format(kdur))
2320 if kdur.get("helm-version") and kdur.get("helm-version") == "v2":
2321 k8sclustertype = "helm-chart"
2322 elif kdur.get("juju-bundle"):
2323 kdumodel = kdur["juju-bundle"]
2324 k8sclustertype = "juju-bundle"
2325 else:
2326 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2327 "juju-bundle. Maybe an old NBI version is running".
2328 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
2329 # check if kdumodel is a file and exists
2330 try:
2331 vnfd_with_id = find_in_list(db_vnfds, lambda vnfd: vnfd["_id"] == vnfd_id)
2332 storage = deep_get(vnfd_with_id, ('_admin', 'storage'))
2333 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2334 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
2335 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["pkg-dir"], k8sclustertype,
2336 kdumodel)
2337 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2338 kdumodel = self.fs.path + filename
2339 except (asyncio.TimeoutError, asyncio.CancelledError):
2340 raise
2341 except Exception: # it is not a file
2342 pass
2343
2344 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2345 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
2346 cluster_uuid = await _get_cluster_id(k8s_cluster_id, k8sclustertype)
2347
2348 # Synchronize repos
2349 if (k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list)\
2350 or (k8sclustertype == "helm-chart-v3" and cluster_uuid not in updated_v3_cluster_list):
2351 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2352 self.k8scluster_map[k8sclustertype].synchronize_repos(cluster_uuid=cluster_uuid))
2353 if del_repo_list or added_repo_dict:
2354 if k8sclustertype == "helm-chart":
2355 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2356 updated = {'_admin.helm_charts_added.' +
2357 item: name for item, name in added_repo_dict.items()}
2358 updated_cluster_list.append(cluster_uuid)
2359 elif k8sclustertype == "helm-chart-v3":
2360 unset = {'_admin.helm_charts_v3_added.' + item: None for item in del_repo_list}
2361 updated = {'_admin.helm_charts_v3_added.' +
2362 item: name for item, name in added_repo_dict.items()}
2363 updated_v3_cluster_list.append(cluster_uuid)
2364 self.logger.debug(logging_text + "repos synchronized on k8s cluster "
2365 "'{}' to_delete: {}, to_add: {}".
2366 format(k8s_cluster_id, del_repo_list, added_repo_dict))
2367 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2368
2369 # Instantiate kdu
2370 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2371 kdur["kdu-name"], k8s_cluster_id)
2372 k8s_instance_info = {"kdu-instance": None,
2373 "k8scluster-uuid": cluster_uuid,
2374 "k8scluster-type": k8sclustertype,
2375 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2376 "kdu-name": kdur["kdu-name"],
2377 "kdu-model": kdumodel,
2378 "namespace": namespace}
2379 db_path = "_admin.deployed.K8s.{}".format(index)
2380 db_nsr_update[db_path] = k8s_instance_info
2381 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2382 vnfd_with_id = find_in_list(db_vnfds, lambda vnf: vnf["_id"] == vnfd_id)
2383 task = asyncio.ensure_future(
2384 self._install_kdu(nsr_id, db_path, vnfr_data, kdu_index, kdud, vnfd_with_id,
2385 k8s_instance_info, k8params=desc_params, timeout=600))
2386 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
2387 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
2388
2389 index += 1
2390
2391 except (LcmException, asyncio.CancelledError):
2392 raise
2393 except Exception as e:
2394 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2395 if isinstance(e, (N2VCException, DbException)):
2396 self.logger.error(logging_text + msg)
2397 else:
2398 self.logger.critical(logging_text + msg, exc_info=True)
2399 raise LcmException(msg)
2400 finally:
2401 if db_nsr_update:
2402 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2403
2404 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
2405 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
2406 base_folder, task_instantiation_info, stage):
2407 # launch instantiate_N2VC in a asyncio task and register task object
2408 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2409 # if not found, create one entry and update database
2410 # fill db_nsr._admin.deployed.VCA.<index>
2411
2412 self.logger.debug(logging_text + "_deploy_n2vc vnfd_id={}, vdu_id={}".format(vnfd_id, vdu_id))
2413 if descriptor_config.get("juju"): # There is one execution envioronment of type juju
2414 ee_list = [descriptor_config]
2415 elif descriptor_config.get("execution-environment-list"):
2416 ee_list = descriptor_config.get("execution-environment-list")
2417 else: # other types as script are not supported
2418 ee_list = []
2419
2420 for ee_item in ee_list:
2421 self.logger.debug(logging_text + "_deploy_n2vc ee_item juju={}, helm={}".format(ee_item.get('juju'),
2422 ee_item.get("helm-chart")))
2423 ee_descriptor_id = ee_item.get("id")
2424 if ee_item.get("juju"):
2425 vca_name = ee_item['juju'].get('charm')
2426 vca_type = "lxc_proxy_charm" if ee_item['juju'].get('charm') is not None else "native_charm"
2427 if ee_item['juju'].get('cloud') == "k8s":
2428 vca_type = "k8s_proxy_charm"
2429 elif ee_item['juju'].get('proxy') is False:
2430 vca_type = "native_charm"
2431 elif ee_item.get("helm-chart"):
2432 vca_name = ee_item['helm-chart']
2433 if ee_item.get("helm-version") and ee_item.get("helm-version") == "v2":
2434 vca_type = "helm"
2435 else:
2436 vca_type = "helm-v3"
2437 else:
2438 self.logger.debug(logging_text + "skipping non juju neither charm configuration")
2439 continue
2440
2441 vca_index = -1
2442 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2443 if not vca_deployed:
2444 continue
2445 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2446 vca_deployed.get("vdu_id") == vdu_id and \
2447 vca_deployed.get("kdu_name") == kdu_name and \
2448 vca_deployed.get("vdu_count_index", 0) == vdu_index and \
2449 vca_deployed.get("ee_descriptor_id") == ee_descriptor_id:
2450 break
2451 else:
2452 # not found, create one.
2453 target = "ns" if not member_vnf_index else "vnf/{}".format(member_vnf_index)
2454 if vdu_id:
2455 target += "/vdu/{}/{}".format(vdu_id, vdu_index or 0)
2456 elif kdu_name:
2457 target += "/kdu/{}".format(kdu_name)
2458 vca_deployed = {
2459 "target_element": target,
2460 # ^ target_element will replace member-vnf-index, kdu_name, vdu_id ... in a single string
2461 "member-vnf-index": member_vnf_index,
2462 "vdu_id": vdu_id,
2463 "kdu_name": kdu_name,
2464 "vdu_count_index": vdu_index,
2465 "operational-status": "init", # TODO revise
2466 "detailed-status": "", # TODO revise
2467 "step": "initial-deploy", # TODO revise
2468 "vnfd_id": vnfd_id,
2469 "vdu_name": vdu_name,
2470 "type": vca_type,
2471 "ee_descriptor_id": ee_descriptor_id
2472 }
2473 vca_index += 1
2474
2475 # create VCA and configurationStatus in db
2476 db_dict = {
2477 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2478 "configurationStatus.{}".format(vca_index): dict()
2479 }
2480 self.update_db_2("nsrs", nsr_id, db_dict)
2481
2482 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2483
2484 self.logger.debug("N2VC > NSR_ID > {}".format(nsr_id))
2485 self.logger.debug("N2VC > DB_NSR > {}".format(db_nsr))
2486 self.logger.debug("N2VC > VCA_DEPLOYED > {}".format(vca_deployed))
2487
2488 # Launch task
2489 task_n2vc = asyncio.ensure_future(
2490 self.instantiate_N2VC(
2491 logging_text=logging_text,
2492 vca_index=vca_index,
2493 nsi_id=nsi_id,
2494 db_nsr=db_nsr,
2495 db_vnfr=db_vnfr,
2496 vdu_id=vdu_id,
2497 kdu_name=kdu_name,
2498 vdu_index=vdu_index,
2499 deploy_params=deploy_params,
2500 config_descriptor=descriptor_config,
2501 base_folder=base_folder,
2502 nslcmop_id=nslcmop_id,
2503 stage=stage,
2504 vca_type=vca_type,
2505 vca_name=vca_name,
2506 ee_config_descriptor=ee_item
2507 )
2508 )
2509 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
2510 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2511 member_vnf_index or "", vdu_id or "")
2512
2513 @staticmethod
2514 def _create_nslcmop(nsr_id, operation, params):
2515 """
2516 Creates a ns-lcm-opp content to be stored at database.
2517 :param nsr_id: internal id of the instance
2518 :param operation: instantiate, terminate, scale, action, ...
2519 :param params: user parameters for the operation
2520 :return: dictionary following SOL005 format
2521 """
2522 # Raise exception if invalid arguments
2523 if not (nsr_id and operation and params):
2524 raise LcmException(
2525 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
2526 now = time()
2527 _id = str(uuid4())
2528 nslcmop = {
2529 "id": _id,
2530 "_id": _id,
2531 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
2532 "operationState": "PROCESSING",
2533 "statusEnteredTime": now,
2534 "nsInstanceId": nsr_id,
2535 "lcmOperationType": operation,
2536 "startTime": now,
2537 "isAutomaticInvocation": False,
2538 "operationParams": params,
2539 "isCancelPending": False,
2540 "links": {
2541 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
2542 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
2543 }
2544 }
2545 return nslcmop
2546
2547 def _format_additional_params(self, params):
2548 params = params or {}
2549 for key, value in params.items():
2550 if str(value).startswith("!!yaml "):
2551 params[key] = yaml.safe_load(value[7:])
2552 return params
2553
2554 def _get_terminate_primitive_params(self, seq, vnf_index):
2555 primitive = seq.get('name')
2556 primitive_params = {}
2557 params = {
2558 "member_vnf_index": vnf_index,
2559 "primitive": primitive,
2560 "primitive_params": primitive_params,
2561 }
2562 desc_params = {}
2563 return self._map_primitive_params(seq, params, desc_params)
2564
2565 # sub-operations
2566
2567 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
2568 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
2569 if op.get('operationState') == 'COMPLETED':
2570 # b. Skip sub-operation
2571 # _ns_execute_primitive() or RO.create_action() will NOT be executed
2572 return self.SUBOPERATION_STATUS_SKIP
2573 else:
2574 # c. retry executing sub-operation
2575 # The sub-operation exists, and operationState != 'COMPLETED'
2576 # Update operationState = 'PROCESSING' to indicate a retry.
2577 operationState = 'PROCESSING'
2578 detailed_status = 'In progress'
2579 self._update_suboperation_status(
2580 db_nslcmop, op_index, operationState, detailed_status)
2581 # Return the sub-operation index
2582 # _ns_execute_primitive() or RO.create_action() will be called from scale()
2583 # with arguments extracted from the sub-operation
2584 return op_index
2585
2586 # Find a sub-operation where all keys in a matching dictionary must match
2587 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
2588 def _find_suboperation(self, db_nslcmop, match):
2589 if db_nslcmop and match:
2590 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
2591 for i, op in enumerate(op_list):
2592 if all(op.get(k) == match[k] for k in match):
2593 return i
2594 return self.SUBOPERATION_STATUS_NOT_FOUND
2595
2596 # Update status for a sub-operation given its index
2597 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
2598 # Update DB for HA tasks
2599 q_filter = {'_id': db_nslcmop['_id']}
2600 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
2601 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
2602 self.db.set_one("nslcmops",
2603 q_filter=q_filter,
2604 update_dict=update_dict,
2605 fail_on_empty=False)
2606
2607 # Add sub-operation, return the index of the added sub-operation
2608 # Optionally, set operationState, detailed-status, and operationType
2609 # Status and type are currently set for 'scale' sub-operations:
2610 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
2611 # 'detailed-status' : status message
2612 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
2613 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
2614 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
2615 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
2616 RO_nsr_id=None, RO_scaling_info=None):
2617 if not db_nslcmop:
2618 return self.SUBOPERATION_STATUS_NOT_FOUND
2619 # Get the "_admin.operations" list, if it exists
2620 db_nslcmop_admin = db_nslcmop.get('_admin', {})
2621 op_list = db_nslcmop_admin.get('operations')
2622 # Create or append to the "_admin.operations" list
2623 new_op = {'member_vnf_index': vnf_index,
2624 'vdu_id': vdu_id,
2625 'vdu_count_index': vdu_count_index,
2626 'primitive': primitive,
2627 'primitive_params': mapped_primitive_params}
2628 if operationState:
2629 new_op['operationState'] = operationState
2630 if detailed_status:
2631 new_op['detailed-status'] = detailed_status
2632 if operationType:
2633 new_op['lcmOperationType'] = operationType
2634 if RO_nsr_id:
2635 new_op['RO_nsr_id'] = RO_nsr_id
2636 if RO_scaling_info:
2637 new_op['RO_scaling_info'] = RO_scaling_info
2638 if not op_list:
2639 # No existing operations, create key 'operations' with current operation as first list element
2640 db_nslcmop_admin.update({'operations': [new_op]})
2641 op_list = db_nslcmop_admin.get('operations')
2642 else:
2643 # Existing operations, append operation to list
2644 op_list.append(new_op)
2645
2646 db_nslcmop_update = {'_admin.operations': op_list}
2647 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
2648 op_index = len(op_list) - 1
2649 return op_index
2650
2651 # Helper methods for scale() sub-operations
2652
2653 # pre-scale/post-scale:
2654 # Check for 3 different cases:
2655 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
2656 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
2657 # c. retry: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
2658 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
2659 operationType, RO_nsr_id=None, RO_scaling_info=None):
2660 # Find this sub-operation
2661 if RO_nsr_id and RO_scaling_info:
2662 operationType = 'SCALE-RO'
2663 match = {
2664 'member_vnf_index': vnf_index,
2665 'RO_nsr_id': RO_nsr_id,
2666 'RO_scaling_info': RO_scaling_info,
2667 }
2668 else:
2669 match = {
2670 'member_vnf_index': vnf_index,
2671 'primitive': vnf_config_primitive,
2672 'primitive_params': primitive_params,
2673 'lcmOperationType': operationType
2674 }
2675 op_index = self._find_suboperation(db_nslcmop, match)
2676 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
2677 # a. New sub-operation
2678 # The sub-operation does not exist, add it.
2679 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
2680 # The following parameters are set to None for all kind of scaling:
2681 vdu_id = None
2682 vdu_count_index = None
2683 vdu_name = None
2684 if RO_nsr_id and RO_scaling_info:
2685 vnf_config_primitive = None
2686 primitive_params = None
2687 else:
2688 RO_nsr_id = None
2689 RO_scaling_info = None
2690 # Initial status for sub-operation
2691 operationState = 'PROCESSING'
2692 detailed_status = 'In progress'
2693 # Add sub-operation for pre/post-scaling (zero or more operations)
2694 self._add_suboperation(db_nslcmop,
2695 vnf_index,
2696 vdu_id,
2697 vdu_count_index,
2698 vdu_name,
2699 vnf_config_primitive,
2700 primitive_params,
2701 operationState,
2702 detailed_status,
2703 operationType,
2704 RO_nsr_id,
2705 RO_scaling_info)
2706 return self.SUBOPERATION_STATUS_NEW
2707 else:
2708 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
2709 # or op_index (operationState != 'COMPLETED')
2710 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
2711
2712 # Function to return execution_environment id
2713
2714 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
2715 # TODO vdu_index_count
2716 for vca in vca_deployed_list:
2717 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
2718 return vca["ee_id"]
2719
2720 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor,
2721 vca_index, destroy_ee=True, exec_primitives=True):
2722 """
2723 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
2724 :param logging_text:
2725 :param db_nslcmop:
2726 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
2727 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
2728 :param vca_index: index in the database _admin.deployed.VCA
2729 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
2730 :param exec_primitives: False to do not execute terminate primitives, because the config is not completed or has
2731 not executed properly
2732 :return: None or exception
2733 """
2734
2735 self.logger.debug(
2736 logging_text + " vca_index: {}, vca_deployed: {}, config_descriptor: {}, destroy_ee: {}".format(
2737 vca_index, vca_deployed, config_descriptor, destroy_ee
2738 )
2739 )
2740
2741 vca_type = vca_deployed.get("type", "lxc_proxy_charm")
2742
2743 # execute terminate_primitives
2744 if exec_primitives:
2745 terminate_primitives = get_ee_sorted_terminate_config_primitive_list(
2746 config_descriptor.get("terminate-config-primitive"), vca_deployed.get("ee_descriptor_id"))
2747 vdu_id = vca_deployed.get("vdu_id")
2748 vdu_count_index = vca_deployed.get("vdu_count_index")
2749 vdu_name = vca_deployed.get("vdu_name")
2750 vnf_index = vca_deployed.get("member-vnf-index")
2751 if terminate_primitives and vca_deployed.get("needed_terminate"):
2752 for seq in terminate_primitives:
2753 # For each sequence in list, get primitive and call _ns_execute_primitive()
2754 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
2755 vnf_index, seq.get("name"))
2756 self.logger.debug(logging_text + step)
2757 # Create the primitive for each sequence, i.e. "primitive": "touch"
2758 primitive = seq.get('name')
2759 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
2760
2761 # Add sub-operation
2762 self._add_suboperation(db_nslcmop,
2763 vnf_index,
2764 vdu_id,
2765 vdu_count_index,
2766 vdu_name,
2767 primitive,
2768 mapped_primitive_params)
2769 # Sub-operations: Call _ns_execute_primitive() instead of action()
2770 try:
2771 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
2772 mapped_primitive_params,
2773 vca_type=vca_type)
2774 except LcmException:
2775 # this happens when VCA is not deployed. In this case it is not needed to terminate
2776 continue
2777 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
2778 if result not in result_ok:
2779 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
2780 "error {}".format(seq.get("name"), vnf_index, result_detail))
2781 # set that this VCA do not need terminated
2782 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
2783 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
2784
2785 if vca_deployed.get("prometheus_jobs") and self.prometheus:
2786 await self.prometheus.update(remove_jobs=vca_deployed["prometheus_jobs"])
2787
2788 if destroy_ee:
2789 await self.vca_map[vca_type].delete_execution_environment(vca_deployed["ee_id"])
2790
2791 async def _delete_all_N2VC(self, db_nsr: dict):
2792 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
2793 namespace = "." + db_nsr["_id"]
2794 try:
2795 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
2796 except N2VCNotFound: # already deleted. Skip
2797 pass
2798 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
2799
2800 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
2801 """
2802 Terminates a deployment from RO
2803 :param logging_text:
2804 :param nsr_deployed: db_nsr._admin.deployed
2805 :param nsr_id:
2806 :param nslcmop_id:
2807 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
2808 this method will update only the index 2, but it will write on database the concatenated content of the list
2809 :return:
2810 """
2811 db_nsr_update = {}
2812 failed_detail = []
2813 ro_nsr_id = ro_delete_action = None
2814 if nsr_deployed and nsr_deployed.get("RO"):
2815 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
2816 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
2817 try:
2818 if ro_nsr_id:
2819 stage[2] = "Deleting ns from VIM."
2820 db_nsr_update["detailed-status"] = " ".join(stage)
2821 self._write_op_status(nslcmop_id, stage)
2822 self.logger.debug(logging_text + stage[2])
2823 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2824 self._write_op_status(nslcmop_id, stage)
2825 desc = await self.RO.delete("ns", ro_nsr_id)
2826 ro_delete_action = desc["action_id"]
2827 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
2828 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2829 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2830 if ro_delete_action:
2831 # wait until NS is deleted from VIM
2832 stage[2] = "Waiting ns deleted from VIM."
2833 detailed_status_old = None
2834 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
2835 ro_delete_action))
2836 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2837 self._write_op_status(nslcmop_id, stage)
2838
2839 delete_timeout = 20 * 60 # 20 minutes
2840 while delete_timeout > 0:
2841 desc = await self.RO.show(
2842 "ns",
2843 item_id_name=ro_nsr_id,
2844 extra_item="action",
2845 extra_item_id=ro_delete_action)
2846
2847 # deploymentStatus
2848 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
2849
2850 ns_status, ns_status_info = self.RO.check_action_status(desc)
2851 if ns_status == "ERROR":
2852 raise ROclient.ROClientException(ns_status_info)
2853 elif ns_status == "BUILD":
2854 stage[2] = "Deleting from VIM {}".format(ns_status_info)
2855 elif ns_status == "ACTIVE":
2856 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2857 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2858 break
2859 else:
2860 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
2861 if stage[2] != detailed_status_old:
2862 detailed_status_old = stage[2]
2863 db_nsr_update["detailed-status"] = " ".join(stage)
2864 self._write_op_status(nslcmop_id, stage)
2865 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2866 await asyncio.sleep(5, loop=self.loop)
2867 delete_timeout -= 5
2868 else: # delete_timeout <= 0:
2869 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
2870
2871 except Exception as e:
2872 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2873 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2874 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2875 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2876 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2877 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
2878 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2879 failed_detail.append("delete conflict: {}".format(e))
2880 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
2881 else:
2882 failed_detail.append("delete error: {}".format(e))
2883 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
2884
2885 # Delete nsd
2886 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
2887 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
2888 try:
2889 stage[2] = "Deleting nsd from RO."
2890 db_nsr_update["detailed-status"] = " ".join(stage)
2891 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2892 self._write_op_status(nslcmop_id, stage)
2893 await self.RO.delete("nsd", ro_nsd_id)
2894 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
2895 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2896 except Exception as e:
2897 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2898 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2899 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
2900 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2901 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
2902 self.logger.debug(logging_text + failed_detail[-1])
2903 else:
2904 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
2905 self.logger.error(logging_text + failed_detail[-1])
2906
2907 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
2908 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
2909 if not vnf_deployed or not vnf_deployed["id"]:
2910 continue
2911 try:
2912 ro_vnfd_id = vnf_deployed["id"]
2913 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
2914 vnf_deployed["member-vnf-index"], ro_vnfd_id)
2915 db_nsr_update["detailed-status"] = " ".join(stage)
2916 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2917 self._write_op_status(nslcmop_id, stage)
2918 await self.RO.delete("vnfd", ro_vnfd_id)
2919 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
2920 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2921 except Exception as e:
2922 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2923 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2924 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
2925 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2926 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
2927 self.logger.debug(logging_text + failed_detail[-1])
2928 else:
2929 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
2930 self.logger.error(logging_text + failed_detail[-1])
2931
2932 if failed_detail:
2933 stage[2] = "Error deleting from VIM"
2934 else:
2935 stage[2] = "Deleted from VIM"
2936 db_nsr_update["detailed-status"] = " ".join(stage)
2937 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2938 self._write_op_status(nslcmop_id, stage)
2939
2940 if failed_detail:
2941 raise LcmException("; ".join(failed_detail))
2942
2943 async def terminate(self, nsr_id, nslcmop_id):
2944 # Try to lock HA task here
2945 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
2946 if not task_is_locked_by_me:
2947 return
2948
2949 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
2950 self.logger.debug(logging_text + "Enter")
2951 timeout_ns_terminate = self.timeout_ns_terminate
2952 db_nsr = None
2953 db_nslcmop = None
2954 operation_params = None
2955 exc = None
2956 error_list = [] # annotates all failed error messages
2957 db_nslcmop_update = {}
2958 autoremove = False # autoremove after terminated
2959 tasks_dict_info = {}
2960 db_nsr_update = {}
2961 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
2962 # ^ contains [stage, step, VIM-status]
2963 try:
2964 # wait for any previous tasks in process
2965 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
2966
2967 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2968 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2969 operation_params = db_nslcmop.get("operationParams") or {}
2970 if operation_params.get("timeout_ns_terminate"):
2971 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
2972 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2973 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2974
2975 db_nsr_update["operational-status"] = "terminating"
2976 db_nsr_update["config-status"] = "terminating"
2977 self._write_ns_status(
2978 nsr_id=nsr_id,
2979 ns_state="TERMINATING",
2980 current_operation="TERMINATING",
2981 current_operation_id=nslcmop_id,
2982 other_update=db_nsr_update
2983 )
2984 self._write_op_status(
2985 op_id=nslcmop_id,
2986 queuePosition=0,
2987 stage=stage
2988 )
2989 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
2990 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
2991 return
2992
2993 stage[1] = "Getting vnf descriptors from db."
2994 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2995 db_vnfds_from_id = {}
2996 db_vnfds_from_member_index = {}
2997 # Loop over VNFRs
2998 for vnfr in db_vnfrs_list:
2999 vnfd_id = vnfr["vnfd-id"]
3000 if vnfd_id not in db_vnfds_from_id:
3001 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
3002 db_vnfds_from_id[vnfd_id] = vnfd
3003 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
3004
3005 # Destroy individual execution environments when there are terminating primitives.
3006 # Rest of EE will be deleted at once
3007 # TODO - check before calling _destroy_N2VC
3008 # if not operation_params.get("skip_terminate_primitives"):#
3009 # or not vca.get("needed_terminate"):
3010 stage[0] = "Stage 2/3 execute terminating primitives."
3011 self.logger.debug(logging_text + stage[0])
3012 stage[1] = "Looking execution environment that needs terminate."
3013 self.logger.debug(logging_text + stage[1])
3014
3015 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
3016 config_descriptor = None
3017 if not vca or not vca.get("ee_id"):
3018 continue
3019 if not vca.get("member-vnf-index"):
3020 # ns
3021 config_descriptor = db_nsr.get("ns-configuration")
3022 elif vca.get("vdu_id"):
3023 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3024 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
3025 if vdud:
3026 config_descriptor = vdud.get("vdu-configuration")
3027 elif vca.get("kdu_name"):
3028 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
3029 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
3030 if kdud:
3031 config_descriptor = kdud.get("kdu-configuration")
3032 else:
3033 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
3034 vca_type = vca.get("type")
3035 exec_terminate_primitives = (not operation_params.get("skip_terminate_primitives") and
3036 vca.get("needed_terminate"))
3037 # For helm we must destroy_ee. Also for native_charm, as juju_model cannot be deleted if there are
3038 # pending native charms
3039 destroy_ee = True if vca_type in ("helm", "helm-v3", "native_charm") else False
3040 # self.logger.debug(logging_text + "vca_index: {}, ee_id: {}, vca_type: {} destroy_ee: {}".format(
3041 # vca_index, vca.get("ee_id"), vca_type, destroy_ee))
3042 task = asyncio.ensure_future(
3043 self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor, vca_index,
3044 destroy_ee, exec_terminate_primitives))
3045 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
3046
3047 # wait for pending tasks of terminate primitives
3048 if tasks_dict_info:
3049 self.logger.debug(logging_text + 'Waiting for tasks {}'.format(list(tasks_dict_info.keys())))
3050 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
3051 min(self.timeout_charm_delete, timeout_ns_terminate),
3052 stage, nslcmop_id)
3053 tasks_dict_info.clear()
3054 if error_list:
3055 return # raise LcmException("; ".join(error_list))
3056
3057 # remove All execution environments at once
3058 stage[0] = "Stage 3/3 delete all."
3059
3060 if nsr_deployed.get("VCA"):
3061 stage[1] = "Deleting all execution environments."
3062 self.logger.debug(logging_text + stage[1])
3063 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
3064 timeout=self.timeout_charm_delete))
3065 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
3066 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
3067
3068 # Delete from k8scluster
3069 stage[1] = "Deleting KDUs."
3070 self.logger.debug(logging_text + stage[1])
3071 # print(nsr_deployed)
3072 for kdu in get_iterable(nsr_deployed, "K8s"):
3073 if not kdu or not kdu.get("kdu-instance"):
3074 continue
3075 kdu_instance = kdu.get("kdu-instance")
3076 if kdu.get("k8scluster-type") in self.k8scluster_map:
3077 task_delete_kdu_instance = asyncio.ensure_future(
3078 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
3079 cluster_uuid=kdu.get("k8scluster-uuid"),
3080 kdu_instance=kdu_instance))
3081 else:
3082 self.logger.error(logging_text + "Unknown k8s deployment type {}".
3083 format(kdu.get("k8scluster-type")))
3084 continue
3085 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
3086
3087 # remove from RO
3088 stage[1] = "Deleting ns from VIM."
3089 if self.ng_ro:
3090 task_delete_ro = asyncio.ensure_future(
3091 self._terminate_ng_ro(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3092 else:
3093 task_delete_ro = asyncio.ensure_future(
3094 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
3095 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
3096
3097 # rest of staff will be done at finally
3098
3099 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
3100 self.logger.error(logging_text + "Exit Exception {}".format(e))
3101 exc = e
3102 except asyncio.CancelledError:
3103 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
3104 exc = "Operation was cancelled"
3105 except Exception as e:
3106 exc = traceback.format_exc()
3107 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
3108 finally:
3109 if exc:
3110 error_list.append(str(exc))
3111 try:
3112 # wait for pending tasks
3113 if tasks_dict_info:
3114 stage[1] = "Waiting for terminate pending tasks."
3115 self.logger.debug(logging_text + stage[1])
3116 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
3117 stage, nslcmop_id)
3118 stage[1] = stage[2] = ""
3119 except asyncio.CancelledError:
3120 error_list.append("Cancelled")
3121 # TODO cancell all tasks
3122 except Exception as exc:
3123 error_list.append(str(exc))
3124 # update status at database
3125 if error_list:
3126 error_detail = "; ".join(error_list)
3127 # self.logger.error(logging_text + error_detail)
3128 error_description_nslcmop = '{} Detail: {}'.format(stage[0], error_detail)
3129 error_description_nsr = 'Operation: TERMINATING.{}, {}.'.format(nslcmop_id, stage[0])
3130
3131 db_nsr_update["operational-status"] = "failed"
3132 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
3133 db_nslcmop_update["detailed-status"] = error_detail
3134 nslcmop_operation_state = "FAILED"
3135 ns_state = "BROKEN"
3136 else:
3137 error_detail = None
3138 error_description_nsr = error_description_nslcmop = None
3139 ns_state = "NOT_INSTANTIATED"
3140 db_nsr_update["operational-status"] = "terminated"
3141 db_nsr_update["detailed-status"] = "Done"
3142 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
3143 db_nslcmop_update["detailed-status"] = "Done"
3144 nslcmop_operation_state = "COMPLETED"
3145
3146 if db_nsr:
3147 self._write_ns_status(
3148 nsr_id=nsr_id,
3149 ns_state=ns_state,
3150 current_operation="IDLE",
3151 current_operation_id=None,
3152 error_description=error_description_nsr,
3153 error_detail=error_detail,
3154 other_update=db_nsr_update
3155 )
3156 self._write_op_status(
3157 op_id=nslcmop_id,
3158 stage="",
3159 error_message=error_description_nslcmop,
3160 operation_state=nslcmop_operation_state,
3161 other_update=db_nslcmop_update,
3162 )
3163 if ns_state == "NOT_INSTANTIATED":
3164 try:
3165 self.db.set_list("vnfrs", {"nsr-id-ref": nsr_id}, {"_admin.nsState": "NOT_INSTANTIATED"})
3166 except DbException as e:
3167 self.logger.warn(logging_text + 'Error writing VNFR status for nsr-id-ref: {} -> {}'.
3168 format(nsr_id, e))
3169 if operation_params:
3170 autoremove = operation_params.get("autoremove", False)
3171 if nslcmop_operation_state:
3172 try:
3173 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
3174 "operationState": nslcmop_operation_state,
3175 "autoremove": autoremove},
3176 loop=self.loop)
3177 except Exception as e:
3178 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3179
3180 self.logger.debug(logging_text + "Exit")
3181 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
3182
3183 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
3184 time_start = time()
3185 error_detail_list = []
3186 error_list = []
3187 pending_tasks = list(created_tasks_info.keys())
3188 num_tasks = len(pending_tasks)
3189 num_done = 0
3190 stage[1] = "{}/{}.".format(num_done, num_tasks)
3191 self._write_op_status(nslcmop_id, stage)
3192 while pending_tasks:
3193 new_error = None
3194 _timeout = timeout + time_start - time()
3195 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
3196 return_when=asyncio.FIRST_COMPLETED)
3197 num_done += len(done)
3198 if not done: # Timeout
3199 for task in pending_tasks:
3200 new_error = created_tasks_info[task] + ": Timeout"
3201 error_detail_list.append(new_error)
3202 error_list.append(new_error)
3203 break
3204 for task in done:
3205 if task.cancelled():
3206 exc = "Cancelled"
3207 else:
3208 exc = task.exception()
3209 if exc:
3210 if isinstance(exc, asyncio.TimeoutError):
3211 exc = "Timeout"
3212 new_error = created_tasks_info[task] + ": {}".format(exc)
3213 error_list.append(created_tasks_info[task])
3214 error_detail_list.append(new_error)
3215 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException,
3216 K8sException, NgRoException)):
3217 self.logger.error(logging_text + new_error)
3218 else:
3219 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
3220 self.logger.error(logging_text + created_tasks_info[task] + " " + exc_traceback)
3221 else:
3222 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
3223 stage[1] = "{}/{}.".format(num_done, num_tasks)
3224 if new_error:
3225 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
3226 if nsr_id: # update also nsr
3227 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
3228 "errorDetail": ". ".join(error_detail_list)})
3229 self._write_op_status(nslcmop_id, stage)
3230 return error_detail_list
3231
3232 @staticmethod
3233 def _map_primitive_params(primitive_desc, params, instantiation_params):
3234 """
3235 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
3236 The default-value is used. If it is between < > it look for a value at instantiation_params
3237 :param primitive_desc: portion of VNFD/NSD that describes primitive
3238 :param params: Params provided by user
3239 :param instantiation_params: Instantiation params provided by user
3240 :return: a dictionary with the calculated params
3241 """
3242 calculated_params = {}
3243 for parameter in primitive_desc.get("parameter", ()):
3244 param_name = parameter["name"]
3245 if param_name in params:
3246 calculated_params[param_name] = params[param_name]
3247 elif "default-value" in parameter or "value" in parameter:
3248 if "value" in parameter:
3249 calculated_params[param_name] = parameter["value"]
3250 else:
3251 calculated_params[param_name] = parameter["default-value"]
3252 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
3253 and calculated_params[param_name].endswith(">"):
3254 if calculated_params[param_name][1:-1] in instantiation_params:
3255 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
3256 else:
3257 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3258 format(calculated_params[param_name], primitive_desc["name"]))
3259 else:
3260 raise LcmException("Parameter {} needed to execute primitive {} not provided".
3261 format(param_name, primitive_desc["name"]))
3262
3263 if isinstance(calculated_params[param_name], (dict, list, tuple)):
3264 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name],
3265 default_flow_style=True, width=256)
3266 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
3267 calculated_params[param_name] = calculated_params[param_name][7:]
3268 if parameter.get("data-type") == "INTEGER":
3269 try:
3270 calculated_params[param_name] = int(calculated_params[param_name])
3271 except ValueError: # error converting string to int
3272 raise LcmException(
3273 "Parameter {} of primitive {} must be integer".format(param_name, primitive_desc["name"]))
3274 elif parameter.get("data-type") == "BOOLEAN":
3275 calculated_params[param_name] = not ((str(calculated_params[param_name])).lower() == 'false')
3276
3277 # add always ns_config_info if primitive name is config
3278 if primitive_desc["name"] == "config":
3279 if "ns_config_info" in instantiation_params:
3280 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
3281 return calculated_params
3282
3283 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None,
3284 ee_descriptor_id=None):
3285 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3286 for vca in deployed_vca:
3287 if not vca:
3288 continue
3289 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3290 continue
3291 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3292 continue
3293 if kdu_name and kdu_name != vca["kdu_name"]:
3294 continue
3295 if ee_descriptor_id and ee_descriptor_id != vca["ee_descriptor_id"]:
3296 continue
3297 break
3298 else:
3299 # vca_deployed not found
3300 raise LcmException("charm for member_vnf_index={} vdu_id={}.{} kdu_name={} execution-environment-list.id={}"
3301 " is not deployed".format(member_vnf_index, vdu_id, vdu_count_index, kdu_name,
3302 ee_descriptor_id))
3303 # get ee_id
3304 ee_id = vca.get("ee_id")
3305 vca_type = vca.get("type", "lxc_proxy_charm") # default value for backward compatibility - proxy charm
3306 if not ee_id:
3307 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
3308 "execution environment"
3309 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
3310 return ee_id, vca_type
3311
3312 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0, retries_interval=30,
3313 timeout=None, vca_type=None, db_dict=None) -> (str, str):
3314 try:
3315 if primitive == "config":
3316 primitive_params = {"params": primitive_params}
3317
3318 vca_type = vca_type or "lxc_proxy_charm"
3319
3320 while retries >= 0:
3321 try:
3322 output = await asyncio.wait_for(
3323 self.vca_map[vca_type].exec_primitive(
3324 ee_id=ee_id,
3325 primitive_name=primitive,
3326 params_dict=primitive_params,
3327 progress_timeout=self.timeout_progress_primitive,
3328 total_timeout=self.timeout_primitive,
3329 db_dict=db_dict),
3330 timeout=timeout or self.timeout_primitive)
3331 # execution was OK
3332 break
3333 except asyncio.CancelledError:
3334 raise
3335 except Exception as e: # asyncio.TimeoutError
3336 if isinstance(e, asyncio.TimeoutError):
3337 e = "Timeout"
3338 retries -= 1
3339 if retries >= 0:
3340 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
3341 # wait and retry
3342 await asyncio.sleep(retries_interval, loop=self.loop)
3343 else:
3344 return 'FAILED', str(e)
3345
3346 return 'COMPLETED', output
3347
3348 except (LcmException, asyncio.CancelledError):
3349 raise
3350 except Exception as e:
3351 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
3352
3353 async def action(self, nsr_id, nslcmop_id):
3354 # Try to lock HA task here
3355 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3356 if not task_is_locked_by_me:
3357 return
3358
3359 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3360 self.logger.debug(logging_text + "Enter")
3361 # get all needed from database
3362 db_nsr = None
3363 db_nslcmop = None
3364 db_nsr_update = {}
3365 db_nslcmop_update = {}
3366 nslcmop_operation_state = None
3367 error_description_nslcmop = None
3368 exc = None
3369 try:
3370 # wait for any previous tasks in process
3371 step = "Waiting for previous operations to terminate"
3372 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3373
3374 self._write_ns_status(
3375 nsr_id=nsr_id,
3376 ns_state=None,
3377 current_operation="RUNNING ACTION",
3378 current_operation_id=nslcmop_id
3379 )
3380
3381 step = "Getting information from database"
3382 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3383 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3384
3385 nsr_deployed = db_nsr["_admin"].get("deployed")
3386 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
3387 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3388 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
3389 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3390 primitive = db_nslcmop["operationParams"]["primitive"]
3391 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3392 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
3393
3394 if vnf_index:
3395 step = "Getting vnfr from database"
3396 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3397 step = "Getting vnfd from database"
3398 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3399 else:
3400 step = "Getting nsd from database"
3401 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
3402
3403 # for backward compatibility
3404 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3405 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3406 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3407 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3408
3409 # look for primitive
3410 config_primitive_desc = descriptor_configuration = None
3411 if vdu_id:
3412 descriptor_configuration = get_vdu_configuration(db_vnfd, vdu_id)
3413 elif kdu_name:
3414 descriptor_configuration = get_kdu_configuration(db_vnfd, kdu_name)
3415 elif vnf_index:
3416 descriptor_configuration = get_vnf_configuration(db_vnfd)
3417 else:
3418 descriptor_configuration = db_nsd.get("ns-configuration")
3419
3420 if descriptor_configuration and descriptor_configuration.get("config-primitive"):
3421 for config_primitive in descriptor_configuration["config-primitive"]:
3422 if config_primitive["name"] == primitive:
3423 config_primitive_desc = config_primitive
3424 break
3425
3426 if not config_primitive_desc:
3427 if not (kdu_name and primitive in ("upgrade", "rollback", "status")):
3428 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3429 format(primitive))
3430 primitive_name = primitive
3431 ee_descriptor_id = None
3432 else:
3433 primitive_name = config_primitive_desc.get("execution-environment-primitive", primitive)
3434 ee_descriptor_id = config_primitive_desc.get("execution-environment-ref")
3435
3436 if vnf_index:
3437 if vdu_id:
3438 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
3439 desc_params = parse_yaml_strings(vdur.get("additionalParams"))
3440 elif kdu_name:
3441 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3442 desc_params = parse_yaml_strings(kdur.get("additionalParams"))
3443 else:
3444 desc_params = parse_yaml_strings(db_vnfr.get("additionalParamsForVnf"))
3445 else:
3446 desc_params = parse_yaml_strings(db_nsr.get("additionalParamsForNs"))
3447 if kdu_name and get_kdu_configuration(db_vnfd, kdu_name):
3448 kdu_configuration = get_kdu_configuration(db_vnfd, kdu_name)
3449 actions = set()
3450 for primitive in kdu_configuration["initial-config-primitive"]:
3451 actions.add(primitive["name"])
3452 for primitive in kdu_configuration["config-primitive"]:
3453 actions.add(primitive["name"])
3454 kdu_action = True if primitive_name in actions else False
3455
3456 # TODO check if ns is in a proper status
3457 if kdu_name and (primitive_name in ("upgrade", "rollback", "status") or kdu_action):
3458 # kdur and desc_params already set from before
3459 if primitive_params:
3460 desc_params.update(primitive_params)
3461 # TODO Check if we will need something at vnf level
3462 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3463 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3464 break
3465 else:
3466 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
3467
3468 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3469 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3470 raise LcmException(msg)
3471
3472 db_dict = {"collection": "nsrs",
3473 "filter": {"_id": nsr_id},
3474 "path": "_admin.deployed.K8s.{}".format(index)}
3475 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive_name, vnf_index, kdu_name))
3476 step = "Executing kdu {}".format(primitive_name)
3477 if primitive_name == "upgrade":
3478 if desc_params.get("kdu_model"):
3479 kdu_model = desc_params.get("kdu_model")
3480 del desc_params["kdu_model"]
3481 else:
3482 kdu_model = kdu.get("kdu-model")
3483 parts = kdu_model.split(sep=":")
3484 if len(parts) == 2:
3485 kdu_model = parts[0]
3486
3487 detailed_status = await asyncio.wait_for(
3488 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3489 cluster_uuid=kdu.get("k8scluster-uuid"),
3490 kdu_instance=kdu.get("kdu-instance"),
3491 atomic=True, kdu_model=kdu_model,
3492 params=desc_params, db_dict=db_dict,
3493 timeout=timeout_ns_action),
3494 timeout=timeout_ns_action + 10)
3495 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
3496 elif primitive_name == "rollback":
3497 detailed_status = await asyncio.wait_for(
3498 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3499 cluster_uuid=kdu.get("k8scluster-uuid"),
3500 kdu_instance=kdu.get("kdu-instance"),
3501 db_dict=db_dict),
3502 timeout=timeout_ns_action)
3503 elif primitive_name == "status":
3504 detailed_status = await asyncio.wait_for(
3505 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3506 cluster_uuid=kdu.get("k8scluster-uuid"),
3507 kdu_instance=kdu.get("kdu-instance")),
3508 timeout=timeout_ns_action)
3509 else:
3510 kdu_instance = kdu.get("kdu-instance") or "{}-{}".format(kdu["kdu-name"], nsr_id)
3511 params = self._map_primitive_params(config_primitive_desc, primitive_params, desc_params)
3512
3513 detailed_status = await asyncio.wait_for(
3514 self.k8scluster_map[kdu["k8scluster-type"]].exec_primitive(
3515 cluster_uuid=kdu.get("k8scluster-uuid"),
3516 kdu_instance=kdu_instance,
3517 primitive_name=primitive_name,
3518 params=params, db_dict=db_dict,
3519 timeout=timeout_ns_action),
3520 timeout=timeout_ns_action)
3521
3522 if detailed_status:
3523 nslcmop_operation_state = 'COMPLETED'
3524 else:
3525 detailed_status = ''
3526 nslcmop_operation_state = 'FAILED'
3527 else:
3528 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"], member_vnf_index=vnf_index,
3529 vdu_id=vdu_id, vdu_count_index=vdu_count_index,
3530 ee_descriptor_id=ee_descriptor_id)
3531 db_nslcmop_notif = {"collection": "nslcmops",
3532 "filter": {"_id": nslcmop_id},
3533 "path": "admin.VCA"}
3534 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
3535 ee_id,
3536 primitive=primitive_name,
3537 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
3538 timeout=timeout_ns_action,
3539 vca_type=vca_type,
3540 db_dict=db_nslcmop_notif)
3541
3542 db_nslcmop_update["detailed-status"] = detailed_status
3543 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
3544 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
3545 detailed_status))
3546 return # database update is called inside finally
3547
3548 except (DbException, LcmException, N2VCException, K8sException) as e:
3549 self.logger.error(logging_text + "Exit Exception {}".format(e))
3550 exc = e
3551 except asyncio.CancelledError:
3552 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3553 exc = "Operation was cancelled"
3554 except asyncio.TimeoutError:
3555 self.logger.error(logging_text + "Timeout while '{}'".format(step))
3556 exc = "Timeout"
3557 except Exception as e:
3558 exc = traceback.format_exc()
3559 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3560 finally:
3561 if exc:
3562 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
3563 "FAILED {}: {}".format(step, exc)
3564 nslcmop_operation_state = "FAILED"
3565 if db_nsr:
3566 self._write_ns_status(
3567 nsr_id=nsr_id,
3568 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
3569 current_operation="IDLE",
3570 current_operation_id=None,
3571 # error_description=error_description_nsr,
3572 # error_detail=error_detail,
3573 other_update=db_nsr_update
3574 )
3575
3576 self._write_op_status(op_id=nslcmop_id, stage="", error_message=error_description_nslcmop,
3577 operation_state=nslcmop_operation_state, other_update=db_nslcmop_update)
3578
3579 if nslcmop_operation_state:
3580 try:
3581 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
3582 "operationState": nslcmop_operation_state},
3583 loop=self.loop)
3584 except Exception as e:
3585 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3586 self.logger.debug(logging_text + "Exit")
3587 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
3588 return nslcmop_operation_state, detailed_status
3589
3590 async def scale(self, nsr_id, nslcmop_id):
3591 # Try to lock HA task here
3592 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3593 if not task_is_locked_by_me:
3594 return
3595
3596 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
3597 stage = ['', '', '']
3598 # ^ stage, step, VIM progress
3599 self.logger.debug(logging_text + "Enter")
3600 # get all needed from database
3601 db_nsr = None
3602 db_nslcmop_update = {}
3603 db_nsr_update = {}
3604 exc = None
3605 # in case of error, indicates what part of scale was failed to put nsr at error status
3606 scale_process = None
3607 old_operational_status = ""
3608 old_config_status = ""
3609 try:
3610 # wait for any previous tasks in process
3611 step = "Waiting for previous operations to terminate"
3612 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3613 self._write_ns_status(nsr_id=nsr_id, ns_state=None,
3614 current_operation="SCALING", current_operation_id=nslcmop_id)
3615
3616 step = "Getting nslcmop from database"
3617 self.logger.debug(step + " after having waited for previous tasks to be completed")
3618 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3619
3620 step = "Getting nsr from database"
3621 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3622 old_operational_status = db_nsr["operational-status"]
3623 old_config_status = db_nsr["config-status"]
3624
3625 step = "Parsing scaling parameters"
3626 db_nsr_update["operational-status"] = "scaling"
3627 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3628 nsr_deployed = db_nsr["_admin"].get("deployed")
3629
3630 #######
3631 nsr_deployed = db_nsr["_admin"].get("deployed")
3632 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
3633 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3634 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3635 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
3636 #######
3637
3638 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
3639 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
3640 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
3641 # for backward compatibility
3642 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3643 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3644 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3645 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3646
3647 step = "Getting vnfr from database"
3648 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3649
3650 step = "Getting vnfd from database"
3651 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3652
3653 step = "Getting scaling-group-descriptor"
3654 scaling_descriptor = find_in_list(
3655 get_scaling_aspect(
3656 db_vnfd
3657 ),
3658 lambda scale_desc: scale_desc["name"] == scaling_group
3659 )
3660 if not scaling_descriptor:
3661 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
3662 "at vnfd:scaling-group-descriptor".format(scaling_group))
3663
3664 step = "Sending scale order to VIM"
3665 # TODO check if ns is in a proper status
3666 nb_scale_op = 0
3667 if not db_nsr["_admin"].get("scaling-group"):
3668 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
3669 admin_scale_index = 0
3670 else:
3671 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
3672 if admin_scale_info["name"] == scaling_group:
3673 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
3674 break
3675 else: # not found, set index one plus last element and add new entry with the name
3676 admin_scale_index += 1
3677 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
3678 RO_scaling_info = []
3679 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
3680 if scaling_type == "SCALE_OUT":
3681 if "aspect-delta-details" not in scaling_descriptor:
3682 raise LcmException(
3683 "Aspect delta details not fount in scaling descriptor {}".format(
3684 scaling_descriptor["name"]
3685 )
3686 )
3687 # count if max-instance-count is reached
3688 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
3689
3690 vdu_scaling_info["scaling_direction"] = "OUT"
3691 vdu_scaling_info["vdu-create"] = {}
3692 for delta in deltas:
3693 for vdu_delta in delta["vdu-delta"]:
3694 vdud = get_vdu(db_vnfd, vdu_delta["id"])
3695 vdu_index = get_vdu_index(db_vnfr, vdu_delta["id"])
3696 cloud_init_text = self._get_vdu_cloud_init_content(vdud, db_vnfd)
3697 if cloud_init_text:
3698 additional_params = self._get_vdu_additional_params(db_vnfr, vdud["id"]) or {}
3699 cloud_init_list = []
3700
3701 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
3702 max_instance_count = 10
3703 if vdu_profile and "max-number-of-instances" in vdu_profile:
3704 max_instance_count = vdu_profile.get("max-number-of-instances", 10)
3705
3706 deafult_instance_num = get_number_of_instances(db_vnfd, vdud["id"])
3707
3708 nb_scale_op += vdu_delta.get("number-of-instances", 1)
3709
3710 if nb_scale_op + deafult_instance_num > max_instance_count:
3711 raise LcmException(
3712 "reached the limit of {} (max-instance-count) "
3713 "scaling-out operations for the "
3714 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group)
3715 )
3716 for x in range(vdu_delta.get("number-of-instances", 1)):
3717 if cloud_init_text:
3718 # TODO Information of its own ip is not available because db_vnfr is not updated.
3719 additional_params["OSM"] = get_osm_params(
3720 db_vnfr,
3721 vdu_delta["id"],
3722 vdu_index + x
3723 )
3724 cloud_init_list.append(
3725 self._parse_cloud_init(
3726 cloud_init_text,
3727 additional_params,
3728 db_vnfd["id"],
3729 vdud["id"]
3730 )
3731 )
3732 RO_scaling_info.append(
3733 {
3734 "osm_vdu_id": vdu_delta["id"],
3735 "member-vnf-index": vnf_index,
3736 "type": "create",
3737 "count": vdu_delta.get("number-of-instances", 1)
3738 }
3739 )
3740 if cloud_init_list:
3741 RO_scaling_info[-1]["cloud_init"] = cloud_init_list
3742 vdu_scaling_info["vdu-create"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1)
3743
3744 elif scaling_type == "SCALE_IN":
3745 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
3746 min_instance_count = int(scaling_descriptor["min-instance-count"])
3747
3748 vdu_scaling_info["scaling_direction"] = "IN"
3749 vdu_scaling_info["vdu-delete"] = {}
3750 deltas = scaling_descriptor.get("aspect-delta-details")["deltas"]
3751 for delta in deltas:
3752 for vdu_delta in delta["vdu-delta"]:
3753 min_instance_count = 0
3754 vdu_profile = get_vdu_profile(db_vnfd, vdu_delta["id"])
3755 if vdu_profile and "min-number-of-instances" in vdu_profile:
3756 min_instance_count = vdu_profile["min-number-of-instances"]
3757
3758 deafult_instance_num = get_number_of_instances(db_vnfd, vdu_delta["id"])
3759
3760 nb_scale_op -= vdu_delta.get("number-of-instances", 1)
3761 if nb_scale_op + deafult_instance_num < min_instance_count:
3762 raise LcmException(
3763 "reached the limit of {} (min-instance-count) scaling-in operations for the "
3764 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group)
3765 )
3766 RO_scaling_info.append({"osm_vdu_id": vdu_delta["id"], "member-vnf-index": vnf_index,
3767 "type": "delete", "count": vdu_delta.get("number-of-instances", 1)})
3768 vdu_scaling_info["vdu-delete"][vdu_delta["id"]] = vdu_delta.get("number-of-instances", 1)
3769
3770 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
3771 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
3772 if vdu_scaling_info["scaling_direction"] == "IN":
3773 for vdur in reversed(db_vnfr["vdur"]):
3774 if vdu_delete.get(vdur["vdu-id-ref"]):
3775 vdu_delete[vdur["vdu-id-ref"]] -= 1
3776 vdu_scaling_info["vdu"].append({
3777 "name": vdur.get("name") or vdur.get("vdu-name"),
3778 "vdu_id": vdur["vdu-id-ref"],
3779 "interface": []
3780 })
3781 for interface in vdur["interfaces"]:
3782 vdu_scaling_info["vdu"][-1]["interface"].append({
3783 "name": interface["name"],
3784 "ip_address": interface["ip-address"],
3785 "mac_address": interface.get("mac-address"),
3786 })
3787 # vdu_delete = vdu_scaling_info.pop("vdu-delete")
3788
3789 # PRE-SCALE BEGIN
3790 step = "Executing pre-scale vnf-config-primitive"
3791 if scaling_descriptor.get("scaling-config-action"):
3792 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
3793 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
3794 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
3795 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3796 step = db_nslcmop_update["detailed-status"] = \
3797 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
3798
3799 # look for primitive
3800 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3801 if config_primitive["name"] == vnf_config_primitive:
3802 break
3803 else:
3804 raise LcmException(
3805 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
3806 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
3807 "primitive".format(scaling_group, vnf_config_primitive))
3808
3809 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
3810 if db_vnfr.get("additionalParamsForVnf"):
3811 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
3812
3813 scale_process = "VCA"
3814 db_nsr_update["config-status"] = "configuring pre-scaling"
3815 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
3816
3817 # Pre-scale retry check: Check if this sub-operation has been executed before
3818 op_index = self._check_or_add_scale_suboperation(
3819 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
3820 if op_index == self.SUBOPERATION_STATUS_SKIP:
3821 # Skip sub-operation
3822 result = 'COMPLETED'
3823 result_detail = 'Done'
3824 self.logger.debug(logging_text +
3825 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
3826 vnf_config_primitive, result, result_detail))
3827 else:
3828 if op_index == self.SUBOPERATION_STATUS_NEW:
3829 # New sub-operation: Get index of this sub-operation
3830 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3831 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3832 format(vnf_config_primitive))
3833 else:
3834 # retry: Get registered params for this existing sub-operation
3835 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3836 vnf_index = op.get('member_vnf_index')
3837 vnf_config_primitive = op.get('primitive')
3838 primitive_params = op.get('primitive_params')
3839 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
3840 format(vnf_config_primitive))
3841 # Execute the primitive, either with new (first-time) or registered (reintent) args
3842 ee_descriptor_id = config_primitive.get("execution-environment-ref")
3843 primitive_name = config_primitive.get("execution-environment-primitive",
3844 vnf_config_primitive)
3845 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
3846 member_vnf_index=vnf_index,
3847 vdu_id=None,
3848 vdu_count_index=None,
3849 ee_descriptor_id=ee_descriptor_id)
3850 result, result_detail = await self._ns_execute_primitive(
3851 ee_id, primitive_name, primitive_params, vca_type)
3852 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3853 vnf_config_primitive, result, result_detail))
3854 # Update operationState = COMPLETED | FAILED
3855 self._update_suboperation_status(
3856 db_nslcmop, op_index, result, result_detail)
3857
3858 if result == "FAILED":
3859 raise LcmException(result_detail)
3860 db_nsr_update["config-status"] = old_config_status
3861 scale_process = None
3862 # PRE-SCALE END
3863
3864 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
3865 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
3866
3867 # SCALE RO - BEGIN
3868 if RO_scaling_info:
3869 scale_process = "RO"
3870 if self.ro_config.get("ng"):
3871 await self._scale_ng_ro(logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage)
3872 vdu_scaling_info.pop("vdu-create", None)
3873 vdu_scaling_info.pop("vdu-delete", None)
3874
3875 scale_process = None
3876 if db_nsr_update:
3877 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3878
3879 # POST-SCALE BEGIN
3880 # execute primitive service POST-SCALING
3881 step = "Executing post-scale vnf-config-primitive"
3882 if scaling_descriptor.get("scaling-config-action"):
3883 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
3884 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
3885 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
3886 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3887 step = db_nslcmop_update["detailed-status"] = \
3888 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
3889
3890 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
3891 if db_vnfr.get("additionalParamsForVnf"):
3892 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
3893
3894 # look for primitive
3895 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3896 if config_primitive["name"] == vnf_config_primitive:
3897 break
3898 else:
3899 raise LcmException(
3900 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-"
3901 "action[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:"
3902 "config-primitive".format(scaling_group, vnf_config_primitive))
3903 scale_process = "VCA"
3904 db_nsr_update["config-status"] = "configuring post-scaling"
3905 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
3906
3907 # Post-scale retry check: Check if this sub-operation has been executed before
3908 op_index = self._check_or_add_scale_suboperation(
3909 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
3910 if op_index == self.SUBOPERATION_STATUS_SKIP:
3911 # Skip sub-operation
3912 result = 'COMPLETED'
3913 result_detail = 'Done'
3914 self.logger.debug(logging_text +
3915 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
3916 format(vnf_config_primitive, result, result_detail))
3917 else:
3918 if op_index == self.SUBOPERATION_STATUS_NEW:
3919 # New sub-operation: Get index of this sub-operation
3920 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3921 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3922 format(vnf_config_primitive))
3923 else:
3924 # retry: Get registered params for this existing sub-operation
3925 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3926 vnf_index = op.get('member_vnf_index')
3927 vnf_config_primitive = op.get('primitive')
3928 primitive_params = op.get('primitive_params')
3929 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation retry".
3930 format(vnf_config_primitive))
3931 # Execute the primitive, either with new (first-time) or registered (reintent) args
3932 ee_descriptor_id = config_primitive.get("execution-environment-ref")
3933 primitive_name = config_primitive.get("execution-environment-primitive",
3934 vnf_config_primitive)
3935 ee_id, vca_type = self._look_for_deployed_vca(nsr_deployed["VCA"],
3936 member_vnf_index=vnf_index,
3937 vdu_id=None,
3938 vdu_count_index=None,
3939 ee_descriptor_id=ee_descriptor_id)
3940 result, result_detail = await self._ns_execute_primitive(
3941 ee_id, primitive_name, primitive_params, vca_type)
3942 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3943 vnf_config_primitive, result, result_detail))
3944 # Update operationState = COMPLETED | FAILED
3945 self._update_suboperation_status(
3946 db_nslcmop, op_index, result, result_detail)
3947
3948 if result == "FAILED":
3949 raise LcmException(result_detail)
3950 db_nsr_update["config-status"] = old_config_status
3951 scale_process = None
3952 # POST-SCALE END
3953
3954 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
3955 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
3956 else old_operational_status
3957 db_nsr_update["config-status"] = old_config_status
3958 return
3959 except (ROclient.ROClientException, DbException, LcmException, NgRoException) as e:
3960 self.logger.error(logging_text + "Exit Exception {}".format(e))
3961 exc = e
3962 except asyncio.CancelledError:
3963 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3964 exc = "Operation was cancelled"
3965 except Exception as e:
3966 exc = traceback.format_exc()
3967 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3968 finally:
3969 self._write_ns_status(nsr_id=nsr_id, ns_state=None, current_operation="IDLE", current_operation_id=None)
3970 if exc:
3971 db_nslcmop_update["detailed-status"] = error_description_nslcmop = "FAILED {}: {}".format(step, exc)
3972 nslcmop_operation_state = "FAILED"
3973 if db_nsr:
3974 db_nsr_update["operational-status"] = old_operational_status
3975 db_nsr_update["config-status"] = old_config_status
3976 db_nsr_update["detailed-status"] = ""
3977 if scale_process:
3978 if "VCA" in scale_process:
3979 db_nsr_update["config-status"] = "failed"
3980 if "RO" in scale_process:
3981 db_nsr_update["operational-status"] = "failed"
3982 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
3983 exc)
3984 else:
3985 error_description_nslcmop = None
3986 nslcmop_operation_state = "COMPLETED"
3987 db_nslcmop_update["detailed-status"] = "Done"
3988
3989 self._write_op_status(op_id=nslcmop_id, stage="", error_message=error_description_nslcmop,
3990 operation_state=nslcmop_operation_state, other_update=db_nslcmop_update)
3991 if db_nsr:
3992 self._write_ns_status(nsr_id=nsr_id, ns_state=None, current_operation="IDLE",
3993 current_operation_id=None, other_update=db_nsr_update)
3994
3995 if nslcmop_operation_state:
3996 try:
3997 msg = {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id, "operationState": nslcmop_operation_state}
3998 await self.msg.aiowrite("ns", "scaled", msg, loop=self.loop)
3999 except Exception as e:
4000 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
4001 self.logger.debug(logging_text + "Exit")
4002 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")
4003
4004 async def _scale_ng_ro(self, logging_text, db_nsr, db_nslcmop, db_vnfr, vdu_scaling_info, stage):
4005 nsr_id = db_nslcmop["nsInstanceId"]
4006 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
4007 db_vnfrs = {}
4008
4009 # read from db: vnfd's for every vnf
4010 db_vnfds = []
4011
4012 # for each vnf in ns, read vnfd
4013 for vnfr in self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}):
4014 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr
4015 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
4016 # if we haven't this vnfd, read it from db
4017 if not find_in_list(db_vnfds, lambda a_vnfd: a_vnfd["id"] == vnfd_id):
4018 # read from db
4019 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
4020 db_vnfds.append(vnfd)
4021 n2vc_key = self.n2vc.get_public_key()
4022 n2vc_key_list = [n2vc_key]
4023 self.scale_vnfr(db_vnfr, vdu_scaling_info.get("vdu-create"), vdu_scaling_info.get("vdu-delete"),
4024 mark_delete=True)
4025 # db_vnfr has been updated, update db_vnfrs to use it
4026 db_vnfrs[db_vnfr["member-vnf-index-ref"]] = db_vnfr
4027 await self._instantiate_ng_ro(logging_text, nsr_id, db_nsd, db_nsr, db_nslcmop, db_vnfrs,
4028 db_vnfds, n2vc_key_list, stage=stage, start_deploy=time(),
4029 timeout_ns_deploy=self.timeout_ns_deploy)
4030 if vdu_scaling_info.get("vdu-delete"):
4031 self.scale_vnfr(db_vnfr, None, vdu_scaling_info["vdu-delete"], mark_delete=False)
4032
4033 async def add_prometheus_metrics(self, ee_id, artifact_path, ee_config_descriptor, vnfr_id, nsr_id, target_ip):
4034 if not self.prometheus:
4035 return
4036 # look if exist a file called 'prometheus*.j2' and
4037 artifact_content = self.fs.dir_ls(artifact_path)
4038 job_file = next((f for f in artifact_content if f.startswith("prometheus") and f.endswith(".j2")), None)
4039 if not job_file:
4040 return
4041 with self.fs.file_open((artifact_path, job_file), "r") as f:
4042 job_data = f.read()
4043
4044 # TODO get_service
4045 _, _, service = ee_id.partition(".") # remove prefix "namespace."
4046 host_name = "{}-{}".format(service, ee_config_descriptor["metric-service"])
4047 host_port = "80"
4048 vnfr_id = vnfr_id.replace("-", "")
4049 variables = {
4050 "JOB_NAME": vnfr_id,
4051 "TARGET_IP": target_ip,
4052 "EXPORTER_POD_IP": host_name,
4053 "EXPORTER_POD_PORT": host_port,
4054 }
4055 job_list = self.prometheus.parse_job(job_data, variables)
4056 # ensure job_name is using the vnfr_id. Adding the metadata nsr_id
4057 for job in job_list:
4058 if not isinstance(job.get("job_name"), str) or vnfr_id not in job["job_name"]:
4059 job["job_name"] = vnfr_id + "_" + str(randint(1, 10000))
4060 job["nsr_id"] = nsr_id
4061 job_dict = {jl["job_name"]: jl for jl in job_list}
4062 if await self.prometheus.update(job_dict):
4063 return list(job_dict.keys())
4064
4065 def get_vca_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
4066 """
4067 Get VCA Cloud and VCA Cloud Credentials for the VIM account
4068
4069 :param: vim_account_id: VIM Account ID
4070
4071 :return: (cloud_name, cloud_credential)
4072 """
4073 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
4074 return config.get("vca_cloud"), config.get("vca_cloud_credential")
4075
4076 def get_vca_k8s_cloud_and_credentials(self, vim_account_id: str) -> (str, str):
4077 """
4078 Get VCA K8s Cloud and VCA K8s Cloud Credentials for the VIM account
4079
4080 :param: vim_account_id: VIM Account ID
4081
4082 :return: (cloud_name, cloud_credential)
4083 """
4084 config = VimAccountDB.get_vim_account_with_id(vim_account_id).get("config", {})
4085 return config.get("vca_k8s_cloud"), config.get("vca_k8s_cloud_credential")