blob: 4e4653956c62d786b6bbb4de998a2432f2603956 [file] [log] [blame]
tierno59d22d22018-09-25 18:10:19 +02001# -*- coding: utf-8 -*-
2
tierno2e215512018-11-28 09:37:52 +00003##
4# Copyright 2018 Telefonica S.A.
5#
6# Licensed under the Apache License, Version 2.0 (the "License"); you may
7# not use this file except in compliance with the License. You may obtain
8# a copy of the License at
9#
10# http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
15# License for the specific language governing permissions and limitations
16# under the License.
17##
18
tierno59d22d22018-09-25 18:10:19 +020019import asyncio
20import yaml
21import logging
22import logging.handlers
tierno59d22d22018-09-25 18:10:19 +020023import traceback
David Garciad4816682019-12-09 14:57:43 +010024import json
gcalvino35be9152018-12-20 09:33:12 +010025from jinja2 import Environment, Template, meta, TemplateError, TemplateNotFound, TemplateSyntaxError
tierno59d22d22018-09-25 18:10:19 +020026
tierno77677d92019-08-22 13:46:35 +000027from osm_lcm import ROclient
tierno744303e2020-01-13 16:46:31 +000028from osm_lcm.lcm_utils import LcmException, LcmExceptionNoMgmtIP, LcmBase, deep_get, get_iterable, populate_dict
calvinosanch9f9c6f22019-11-04 13:37:39 +010029from n2vc.k8s_helm_conn import K8sHelmConnector
Adam Israelbaacc302019-12-01 12:41:39 -050030from n2vc.k8s_juju_conn import K8sJujuConnector
tierno59d22d22018-09-25 18:10:19 +020031
tierno27246d82018-09-27 15:59:09 +020032from osm_common.dbbase import DbException
tierno59d22d22018-09-25 18:10:19 +020033from osm_common.fsbase import FsException
quilesj7e13aeb2019-10-08 13:34:55 +020034
35from n2vc.n2vc_juju_conn import N2VCJujuConnector
tiernof59ad6c2020-04-08 12:50:52 +000036from n2vc.exceptions import N2VCException, N2VCNotFound, K8sException
tierno59d22d22018-09-25 18:10:19 +020037
tierno27246d82018-09-27 15:59:09 +020038from copy import copy, deepcopy
tierno59d22d22018-09-25 18:10:19 +020039from http import HTTPStatus
40from time import time
tierno27246d82018-09-27 15:59:09 +020041from uuid import uuid4
tierno59d22d22018-09-25 18:10:19 +020042
43__author__ = "Alfonso Tierno"
44
45
46class NsLcm(LcmBase):
tierno63de62e2018-10-31 16:38:52 +010047 timeout_vca_on_error = 5 * 60 # Time for charm from first time at blocked,error status to mark as failed
tierno744303e2020-01-13 16:46:31 +000048 timeout_ns_deploy = 2 * 3600 # default global timeout for deployment a ns
tiernoe876f672020-02-13 14:34:48 +000049 timeout_ns_terminate = 1800 # default global timeout for un deployment a ns
garciadeblasf9b04952019-04-09 18:53:58 +020050 timeout_charm_delete = 10 * 60
51 timeout_primitive = 10 * 60 # timeout for primitive execution
tierno067e04a2020-03-31 12:53:13 +000052 timeout_progress_primitive = 2 * 60 # timeout for some progress in a primitive execution
tierno59d22d22018-09-25 18:10:19 +020053
kuuseac3a8882019-10-03 10:48:06 +020054 SUBOPERATION_STATUS_NOT_FOUND = -1
55 SUBOPERATION_STATUS_NEW = -2
56 SUBOPERATION_STATUS_SKIP = -3
tiernoa2143262020-03-27 16:20:40 +000057 task_name_deploy_vca = "Deploying VCA"
kuuseac3a8882019-10-03 10:48:06 +020058
tierno744303e2020-01-13 16:46:31 +000059 def __init__(self, db, msg, fs, lcm_tasks, config, loop):
tierno59d22d22018-09-25 18:10:19 +020060 """
61 Init, Connect to database, filesystem storage, and messaging
62 :param config: two level dictionary with configuration. Top level should contain 'database', 'storage',
63 :return: None
64 """
quilesj7e13aeb2019-10-08 13:34:55 +020065 super().__init__(
66 db=db,
67 msg=msg,
68 fs=fs,
69 logger=logging.getLogger('lcm.ns')
70 )
71
tierno59d22d22018-09-25 18:10:19 +020072 self.loop = loop
73 self.lcm_tasks = lcm_tasks
tierno744303e2020-01-13 16:46:31 +000074 self.timeout = config["timeout"]
75 self.ro_config = config["ro_config"]
76 self.vca_config = config["VCA"].copy()
tierno59d22d22018-09-25 18:10:19 +020077
quilesj7e13aeb2019-10-08 13:34:55 +020078 # create N2VC connector
79 self.n2vc = N2VCJujuConnector(
80 db=self.db,
81 fs=self.fs,
tierno59d22d22018-09-25 18:10:19 +020082 log=self.logger,
quilesj7e13aeb2019-10-08 13:34:55 +020083 loop=self.loop,
84 url='{}:{}'.format(self.vca_config['host'], self.vca_config['port']),
85 username=self.vca_config.get('user', None),
86 vca_config=self.vca_config,
quilesj3655ae02019-12-12 16:08:35 +000087 on_update_db=self._on_update_n2vc_db
tierno59d22d22018-09-25 18:10:19 +020088 )
quilesj7e13aeb2019-10-08 13:34:55 +020089
calvinosanch9f9c6f22019-11-04 13:37:39 +010090 self.k8sclusterhelm = K8sHelmConnector(
91 kubectl_command=self.vca_config.get("kubectlpath"),
92 helm_command=self.vca_config.get("helmpath"),
93 fs=self.fs,
94 log=self.logger,
95 db=self.db,
96 on_update_db=None,
97 )
98
Adam Israelbaacc302019-12-01 12:41:39 -050099 self.k8sclusterjuju = K8sJujuConnector(
100 kubectl_command=self.vca_config.get("kubectlpath"),
101 juju_command=self.vca_config.get("jujupath"),
102 fs=self.fs,
103 log=self.logger,
104 db=self.db,
105 on_update_db=None,
106 )
107
tiernoa2143262020-03-27 16:20:40 +0000108 self.k8scluster_map = {
109 "helm-chart": self.k8sclusterhelm,
110 "chart": self.k8sclusterhelm,
111 "juju-bundle": self.k8sclusterjuju,
112 "juju": self.k8sclusterjuju,
113 }
quilesj7e13aeb2019-10-08 13:34:55 +0200114 # create RO client
tierno77677d92019-08-22 13:46:35 +0000115 self.RO = ROclient.ROClient(self.loop, **self.ro_config)
tierno59d22d22018-09-25 18:10:19 +0200116
quilesj3655ae02019-12-12 16:08:35 +0000117 def _on_update_ro_db(self, nsrs_id, ro_descriptor):
quilesj7e13aeb2019-10-08 13:34:55 +0200118
quilesj3655ae02019-12-12 16:08:35 +0000119 # self.logger.debug('_on_update_ro_db(nsrs_id={}'.format(nsrs_id))
120
121 try:
122 # TODO filter RO descriptor fields...
123
124 # write to database
125 db_dict = dict()
126 # db_dict['deploymentStatus'] = yaml.dump(ro_descriptor, default_flow_style=False, indent=2)
127 db_dict['deploymentStatus'] = ro_descriptor
128 self.update_db_2("nsrs", nsrs_id, db_dict)
129
130 except Exception as e:
131 self.logger.warn('Cannot write database RO deployment for ns={} -> {}'.format(nsrs_id, e))
132
133 async def _on_update_n2vc_db(self, table, filter, path, updated_data):
134
quilesj69a722c2020-01-09 08:30:17 +0000135 # remove last dot from path (if exists)
136 if path.endswith('.'):
137 path = path[:-1]
138
quilesj3655ae02019-12-12 16:08:35 +0000139 # self.logger.debug('_on_update_n2vc_db(table={}, filter={}, path={}, updated_data={}'
140 # .format(table, filter, path, updated_data))
141
142 try:
143
144 nsr_id = filter.get('_id')
145
146 # read ns record from database
147 nsr = self.db.get_one(table='nsrs', q_filter=filter)
148 current_ns_status = nsr.get('nsState')
149
150 # get vca status for NS
quilesj69a722c2020-01-09 08:30:17 +0000151 status_dict = await self.n2vc.get_status(namespace='.' + nsr_id, yaml_format=False)
quilesj3655ae02019-12-12 16:08:35 +0000152
153 # vcaStatus
154 db_dict = dict()
155 db_dict['vcaStatus'] = status_dict
156
157 # update configurationStatus for this VCA
158 try:
159 vca_index = int(path[path.rfind(".")+1:])
160
161 vca_list = deep_get(target_dict=nsr, key_list=('_admin', 'deployed', 'VCA'))
162 vca_status = vca_list[vca_index].get('status')
163
164 configuration_status_list = nsr.get('configurationStatus')
165 config_status = configuration_status_list[vca_index].get('status')
166
167 if config_status == 'BROKEN' and vca_status != 'failed':
168 db_dict['configurationStatus'][vca_index] = 'READY'
169 elif config_status != 'BROKEN' and vca_status == 'failed':
170 db_dict['configurationStatus'][vca_index] = 'BROKEN'
171 except Exception as e:
172 # not update configurationStatus
173 self.logger.debug('Error updating vca_index (ignore): {}'.format(e))
174
175 # if nsState = 'READY' check if juju is reporting some error => nsState = 'DEGRADED'
176 # if nsState = 'DEGRADED' check if all is OK
177 is_degraded = False
178 if current_ns_status in ('READY', 'DEGRADED'):
179 error_description = ''
180 # check machines
181 if status_dict.get('machines'):
182 for machine_id in status_dict.get('machines'):
183 machine = status_dict.get('machines').get(machine_id)
184 # check machine agent-status
185 if machine.get('agent-status'):
186 s = machine.get('agent-status').get('status')
187 if s != 'started':
188 is_degraded = True
189 error_description += 'machine {} agent-status={} ; '.format(machine_id, s)
190 # check machine instance status
191 if machine.get('instance-status'):
192 s = machine.get('instance-status').get('status')
193 if s != 'running':
194 is_degraded = True
195 error_description += 'machine {} instance-status={} ; '.format(machine_id, s)
196 # check applications
197 if status_dict.get('applications'):
198 for app_id in status_dict.get('applications'):
199 app = status_dict.get('applications').get(app_id)
200 # check application status
201 if app.get('status'):
202 s = app.get('status').get('status')
203 if s != 'active':
204 is_degraded = True
205 error_description += 'application {} status={} ; '.format(app_id, s)
206
207 if error_description:
208 db_dict['errorDescription'] = error_description
209 if current_ns_status == 'READY' and is_degraded:
210 db_dict['nsState'] = 'DEGRADED'
211 if current_ns_status == 'DEGRADED' and not is_degraded:
212 db_dict['nsState'] = 'READY'
213
214 # write to database
215 self.update_db_2("nsrs", nsr_id, db_dict)
216
tierno51183952020-04-03 15:48:18 +0000217 except (asyncio.CancelledError, asyncio.TimeoutError):
218 raise
quilesj3655ae02019-12-12 16:08:35 +0000219 except Exception as e:
220 self.logger.warn('Error updating NS state for ns={}: {}'.format(nsr_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +0200221
gcalvino35be9152018-12-20 09:33:12 +0100222 def vnfd2RO(self, vnfd, new_id=None, additionalParams=None, nsrId=None):
tierno59d22d22018-09-25 18:10:19 +0200223 """
224 Converts creates a new vnfd descriptor for RO base on input OSM IM vnfd
225 :param vnfd: input vnfd
226 :param new_id: overrides vnf id if provided
tierno8a518872018-12-21 13:42:14 +0000227 :param additionalParams: Instantiation params for VNFs provided
gcalvino35be9152018-12-20 09:33:12 +0100228 :param nsrId: Id of the NSR
tierno59d22d22018-09-25 18:10:19 +0200229 :return: copy of vnfd
230 """
tierno59d22d22018-09-25 18:10:19 +0200231 try:
232 vnfd_RO = deepcopy(vnfd)
tierno8a518872018-12-21 13:42:14 +0000233 # remove unused by RO configuration, monitoring, scaling and internal keys
tierno59d22d22018-09-25 18:10:19 +0200234 vnfd_RO.pop("_id", None)
235 vnfd_RO.pop("_admin", None)
tierno8a518872018-12-21 13:42:14 +0000236 vnfd_RO.pop("vnf-configuration", None)
237 vnfd_RO.pop("monitoring-param", None)
238 vnfd_RO.pop("scaling-group-descriptor", None)
calvinosanch9f9c6f22019-11-04 13:37:39 +0100239 vnfd_RO.pop("kdu", None)
240 vnfd_RO.pop("k8s-cluster", None)
tierno59d22d22018-09-25 18:10:19 +0200241 if new_id:
242 vnfd_RO["id"] = new_id
tierno8a518872018-12-21 13:42:14 +0000243
244 # parse cloud-init or cloud-init-file with the provided variables using Jinja2
245 for vdu in get_iterable(vnfd_RO, "vdu"):
246 cloud_init_file = None
247 if vdu.get("cloud-init-file"):
tierno59d22d22018-09-25 18:10:19 +0200248 base_folder = vnfd["_admin"]["storage"]
gcalvino35be9152018-12-20 09:33:12 +0100249 cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"],
250 vdu["cloud-init-file"])
251 with self.fs.file_open(cloud_init_file, "r") as ci_file:
252 cloud_init_content = ci_file.read()
tierno59d22d22018-09-25 18:10:19 +0200253 vdu.pop("cloud-init-file", None)
tierno8a518872018-12-21 13:42:14 +0000254 elif vdu.get("cloud-init"):
gcalvino35be9152018-12-20 09:33:12 +0100255 cloud_init_content = vdu["cloud-init"]
tierno8a518872018-12-21 13:42:14 +0000256 else:
257 continue
258
259 env = Environment()
260 ast = env.parse(cloud_init_content)
261 mandatory_vars = meta.find_undeclared_variables(ast)
262 if mandatory_vars:
263 for var in mandatory_vars:
264 if not additionalParams or var not in additionalParams.keys():
265 raise LcmException("Variable '{}' defined at vnfd[id={}]:vdu[id={}]:cloud-init/cloud-init-"
266 "file, must be provided in the instantiation parameters inside the "
267 "'additionalParamsForVnf' block".format(var, vnfd["id"], vdu["id"]))
268 template = Template(cloud_init_content)
tierno2b611dd2019-01-11 10:30:57 +0000269 cloud_init_content = template.render(additionalParams or {})
gcalvino35be9152018-12-20 09:33:12 +0100270 vdu["cloud-init"] = cloud_init_content
tierno8a518872018-12-21 13:42:14 +0000271
tierno59d22d22018-09-25 18:10:19 +0200272 return vnfd_RO
273 except FsException as e:
tierno8a518872018-12-21 13:42:14 +0000274 raise LcmException("Error reading vnfd[id={}]:vdu[id={}]:cloud-init-file={}: {}".
tiernoda964822019-01-14 15:53:47 +0000275 format(vnfd["id"], vdu["id"], cloud_init_file, e))
tierno8a518872018-12-21 13:42:14 +0000276 except (TemplateError, TemplateNotFound, TemplateSyntaxError) as e:
277 raise LcmException("Error parsing Jinja2 to cloud-init content at vnfd[id={}]:vdu[id={}]: {}".
278 format(vnfd["id"], vdu["id"], e))
tierno59d22d22018-09-25 18:10:19 +0200279
tierno27246d82018-09-27 15:59:09 +0200280 def ns_params_2_RO(self, ns_params, nsd, vnfd_dict, n2vc_key_list):
tierno59d22d22018-09-25 18:10:19 +0200281 """
tierno27246d82018-09-27 15:59:09 +0200282 Creates a RO ns descriptor from OSM ns_instantiate params
tierno59d22d22018-09-25 18:10:19 +0200283 :param ns_params: OSM instantiate params
284 :return: The RO ns descriptor
285 """
286 vim_2_RO = {}
tiernob7f3f0d2019-03-20 17:17:21 +0000287 wim_2_RO = {}
tierno27246d82018-09-27 15:59:09 +0200288 # TODO feature 1417: Check that no instantiation is set over PDU
289 # check if PDU forces a concrete vim-network-id and add it
290 # check if PDU contains a SDN-assist info (dpid, switch, port) and pass it to RO
tierno59d22d22018-09-25 18:10:19 +0200291
292 def vim_account_2_RO(vim_account):
293 if vim_account in vim_2_RO:
294 return vim_2_RO[vim_account]
295
296 db_vim = self.db.get_one("vim_accounts", {"_id": vim_account})
297 if db_vim["_admin"]["operationalState"] != "ENABLED":
298 raise LcmException("VIM={} is not available. operationalState={}".format(
299 vim_account, db_vim["_admin"]["operationalState"]))
300 RO_vim_id = db_vim["_admin"]["deployed"]["RO"]
301 vim_2_RO[vim_account] = RO_vim_id
302 return RO_vim_id
303
tiernob7f3f0d2019-03-20 17:17:21 +0000304 def wim_account_2_RO(wim_account):
305 if isinstance(wim_account, str):
306 if wim_account in wim_2_RO:
307 return wim_2_RO[wim_account]
308
309 db_wim = self.db.get_one("wim_accounts", {"_id": wim_account})
310 if db_wim["_admin"]["operationalState"] != "ENABLED":
311 raise LcmException("WIM={} is not available. operationalState={}".format(
312 wim_account, db_wim["_admin"]["operationalState"]))
313 RO_wim_id = db_wim["_admin"]["deployed"]["RO-account"]
314 wim_2_RO[wim_account] = RO_wim_id
315 return RO_wim_id
316 else:
317 return wim_account
318
tierno59d22d22018-09-25 18:10:19 +0200319 def ip_profile_2_RO(ip_profile):
320 RO_ip_profile = deepcopy((ip_profile))
321 if "dns-server" in RO_ip_profile:
322 if isinstance(RO_ip_profile["dns-server"], list):
323 RO_ip_profile["dns-address"] = []
324 for ds in RO_ip_profile.pop("dns-server"):
325 RO_ip_profile["dns-address"].append(ds['address'])
326 else:
327 RO_ip_profile["dns-address"] = RO_ip_profile.pop("dns-server")
328 if RO_ip_profile.get("ip-version") == "ipv4":
329 RO_ip_profile["ip-version"] = "IPv4"
330 if RO_ip_profile.get("ip-version") == "ipv6":
331 RO_ip_profile["ip-version"] = "IPv6"
332 if "dhcp-params" in RO_ip_profile:
333 RO_ip_profile["dhcp"] = RO_ip_profile.pop("dhcp-params")
334 return RO_ip_profile
335
336 if not ns_params:
337 return None
338 RO_ns_params = {
339 # "name": ns_params["nsName"],
340 # "description": ns_params.get("nsDescription"),
341 "datacenter": vim_account_2_RO(ns_params["vimAccountId"]),
tiernob7f3f0d2019-03-20 17:17:21 +0000342 "wim_account": wim_account_2_RO(ns_params.get("wimAccountId")),
tierno59d22d22018-09-25 18:10:19 +0200343 # "scenario": ns_params["nsdId"],
tierno59d22d22018-09-25 18:10:19 +0200344 }
quilesj7e13aeb2019-10-08 13:34:55 +0200345
tiernoe64f7fb2019-09-11 08:55:52 +0000346 n2vc_key_list = n2vc_key_list or []
347 for vnfd_ref, vnfd in vnfd_dict.items():
348 vdu_needed_access = []
349 mgmt_cp = None
350 if vnfd.get("vnf-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000351 ssh_required = deep_get(vnfd, ("vnf-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000352 if ssh_required and vnfd.get("mgmt-interface"):
353 if vnfd["mgmt-interface"].get("vdu-id"):
354 vdu_needed_access.append(vnfd["mgmt-interface"]["vdu-id"])
355 elif vnfd["mgmt-interface"].get("cp"):
356 mgmt_cp = vnfd["mgmt-interface"]["cp"]
tierno27246d82018-09-27 15:59:09 +0200357
tiernoe64f7fb2019-09-11 08:55:52 +0000358 for vdu in vnfd.get("vdu", ()):
359 if vdu.get("vdu-configuration"):
tierno6cf25f52019-09-12 09:33:40 +0000360 ssh_required = deep_get(vdu, ("vdu-configuration", "config-access", "ssh-access", "required"))
tiernoe64f7fb2019-09-11 08:55:52 +0000361 if ssh_required:
tierno27246d82018-09-27 15:59:09 +0200362 vdu_needed_access.append(vdu["id"])
tiernoe64f7fb2019-09-11 08:55:52 +0000363 elif mgmt_cp:
364 for vdu_interface in vdu.get("interface"):
365 if vdu_interface.get("external-connection-point-ref") and \
366 vdu_interface["external-connection-point-ref"] == mgmt_cp:
367 vdu_needed_access.append(vdu["id"])
368 mgmt_cp = None
369 break
tierno27246d82018-09-27 15:59:09 +0200370
tiernoe64f7fb2019-09-11 08:55:52 +0000371 if vdu_needed_access:
372 for vnf_member in nsd.get("constituent-vnfd"):
373 if vnf_member["vnfd-id-ref"] != vnfd_ref:
374 continue
375 for vdu in vdu_needed_access:
376 populate_dict(RO_ns_params,
377 ("vnfs", vnf_member["member-vnf-index"], "vdus", vdu, "mgmt_keys"),
378 n2vc_key_list)
tierno27246d82018-09-27 15:59:09 +0200379
tierno25ec7732018-10-24 18:47:11 +0200380 if ns_params.get("vduImage"):
381 RO_ns_params["vduImage"] = ns_params["vduImage"]
382
tiernoc255a822018-10-31 09:41:53 +0100383 if ns_params.get("ssh_keys"):
384 RO_ns_params["cloud-config"] = {"key-pairs": ns_params["ssh_keys"]}
tierno27246d82018-09-27 15:59:09 +0200385 for vnf_params in get_iterable(ns_params, "vnf"):
386 for constituent_vnfd in nsd["constituent-vnfd"]:
387 if constituent_vnfd["member-vnf-index"] == vnf_params["member-vnf-index"]:
388 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
389 break
390 else:
391 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index={} is not present at nsd:"
392 "constituent-vnfd".format(vnf_params["member-vnf-index"]))
393 if vnf_params.get("vimAccountId"):
394 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "datacenter"),
395 vim_account_2_RO(vnf_params["vimAccountId"]))
tierno59d22d22018-09-25 18:10:19 +0200396
tierno27246d82018-09-27 15:59:09 +0200397 for vdu_params in get_iterable(vnf_params, "vdu"):
398 # TODO feature 1417: check that this VDU exist and it is not a PDU
399 if vdu_params.get("volume"):
400 for volume_params in vdu_params["volume"]:
401 if volume_params.get("vim-volume-id"):
402 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
403 vdu_params["id"], "devices", volume_params["name"], "vim_id"),
404 volume_params["vim-volume-id"])
405 if vdu_params.get("interface"):
406 for interface_params in vdu_params["interface"]:
407 if interface_params.get("ip-address"):
408 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
409 vdu_params["id"], "interfaces", interface_params["name"],
410 "ip_address"),
411 interface_params["ip-address"])
412 if interface_params.get("mac-address"):
413 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
414 vdu_params["id"], "interfaces", interface_params["name"],
415 "mac_address"),
416 interface_params["mac-address"])
417 if interface_params.get("floating-ip-required"):
418 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
419 vdu_params["id"], "interfaces", interface_params["name"],
420 "floating-ip"),
421 interface_params["floating-ip-required"])
422
423 for internal_vld_params in get_iterable(vnf_params, "internal-vld"):
424 if internal_vld_params.get("vim-network-name"):
425 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
426 internal_vld_params["name"], "vim-network-name"),
427 internal_vld_params["vim-network-name"])
gcalvino0d7ac8d2018-12-17 16:24:08 +0100428 if internal_vld_params.get("vim-network-id"):
429 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
430 internal_vld_params["name"], "vim-network-id"),
431 internal_vld_params["vim-network-id"])
tierno27246d82018-09-27 15:59:09 +0200432 if internal_vld_params.get("ip-profile"):
433 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
434 internal_vld_params["name"], "ip-profile"),
435 ip_profile_2_RO(internal_vld_params["ip-profile"]))
kbsub4d761eb2019-10-17 16:28:48 +0000436 if internal_vld_params.get("provider-network"):
437
438 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "networks",
439 internal_vld_params["name"], "provider-network"),
440 internal_vld_params["provider-network"].copy())
tierno27246d82018-09-27 15:59:09 +0200441
442 for icp_params in get_iterable(internal_vld_params, "internal-connection-point"):
443 # look for interface
444 iface_found = False
445 for vdu_descriptor in vnf_descriptor["vdu"]:
446 for vdu_interface in vdu_descriptor["interface"]:
447 if vdu_interface.get("internal-connection-point-ref") == icp_params["id-ref"]:
448 if icp_params.get("ip-address"):
449 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
450 vdu_descriptor["id"], "interfaces",
451 vdu_interface["name"], "ip_address"),
452 icp_params["ip-address"])
453
454 if icp_params.get("mac-address"):
455 populate_dict(RO_ns_params, ("vnfs", vnf_params["member-vnf-index"], "vdus",
456 vdu_descriptor["id"], "interfaces",
457 vdu_interface["name"], "mac_address"),
458 icp_params["mac-address"])
459 iface_found = True
tierno59d22d22018-09-25 18:10:19 +0200460 break
tierno27246d82018-09-27 15:59:09 +0200461 if iface_found:
462 break
463 else:
464 raise LcmException("Invalid instantiate parameter vnf:member-vnf-index[{}]:"
465 "internal-vld:id-ref={} is not present at vnfd:internal-"
466 "connection-point".format(vnf_params["member-vnf-index"],
467 icp_params["id-ref"]))
468
469 for vld_params in get_iterable(ns_params, "vld"):
470 if "ip-profile" in vld_params:
471 populate_dict(RO_ns_params, ("networks", vld_params["name"], "ip-profile"),
472 ip_profile_2_RO(vld_params["ip-profile"]))
tiernob7f3f0d2019-03-20 17:17:21 +0000473
kbsub4d761eb2019-10-17 16:28:48 +0000474 if vld_params.get("provider-network"):
475
476 populate_dict(RO_ns_params, ("networks", vld_params["name"], "provider-network"),
477 vld_params["provider-network"].copy())
478
tiernob7f3f0d2019-03-20 17:17:21 +0000479 if "wimAccountId" in vld_params and vld_params["wimAccountId"] is not None:
480 populate_dict(RO_ns_params, ("networks", vld_params["name"], "wim_account"),
481 wim_account_2_RO(vld_params["wimAccountId"])),
tierno27246d82018-09-27 15:59:09 +0200482 if vld_params.get("vim-network-name"):
483 RO_vld_sites = []
484 if isinstance(vld_params["vim-network-name"], dict):
485 for vim_account, vim_net in vld_params["vim-network-name"].items():
486 RO_vld_sites.append({
487 "netmap-use": vim_net,
488 "datacenter": vim_account_2_RO(vim_account)
489 })
490 else: # isinstance str
491 RO_vld_sites.append({"netmap-use": vld_params["vim-network-name"]})
492 if RO_vld_sites:
493 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
kbsub4d761eb2019-10-17 16:28:48 +0000494
gcalvino0d7ac8d2018-12-17 16:24:08 +0100495 if vld_params.get("vim-network-id"):
496 RO_vld_sites = []
497 if isinstance(vld_params["vim-network-id"], dict):
498 for vim_account, vim_net in vld_params["vim-network-id"].items():
499 RO_vld_sites.append({
500 "netmap-use": vim_net,
501 "datacenter": vim_account_2_RO(vim_account)
502 })
503 else: # isinstance str
504 RO_vld_sites.append({"netmap-use": vld_params["vim-network-id"]})
505 if RO_vld_sites:
506 populate_dict(RO_ns_params, ("networks", vld_params["name"], "sites"), RO_vld_sites)
Felipe Vicens720b07a2019-01-31 02:32:09 +0100507 if vld_params.get("ns-net"):
508 if isinstance(vld_params["ns-net"], dict):
509 for vld_id, instance_scenario_id in vld_params["ns-net"].items():
510 RO_vld_ns_net = {"instance_scenario_id": instance_scenario_id, "osm_id": vld_id}
Felipe Vicensb0e5fe42019-12-05 10:30:38 +0100511 populate_dict(RO_ns_params, ("networks", vld_params["name"], "use-network"), RO_vld_ns_net)
tierno27246d82018-09-27 15:59:09 +0200512 if "vnfd-connection-point-ref" in vld_params:
513 for cp_params in vld_params["vnfd-connection-point-ref"]:
514 # look for interface
515 for constituent_vnfd in nsd["constituent-vnfd"]:
516 if constituent_vnfd["member-vnf-index"] == cp_params["member-vnf-index-ref"]:
517 vnf_descriptor = vnfd_dict[constituent_vnfd["vnfd-id-ref"]]
518 break
519 else:
520 raise LcmException(
521 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={} "
522 "is not present at nsd:constituent-vnfd".format(cp_params["member-vnf-index-ref"]))
523 match_cp = False
524 for vdu_descriptor in vnf_descriptor["vdu"]:
525 for interface_descriptor in vdu_descriptor["interface"]:
526 if interface_descriptor.get("external-connection-point-ref") == \
527 cp_params["vnfd-connection-point-ref"]:
528 match_cp = True
tierno59d22d22018-09-25 18:10:19 +0200529 break
tierno27246d82018-09-27 15:59:09 +0200530 if match_cp:
531 break
532 else:
533 raise LcmException(
534 "Invalid instantiate parameter vld:vnfd-connection-point-ref:member-vnf-index-ref={}:"
535 "vnfd-connection-point-ref={} is not present at vnfd={}".format(
536 cp_params["member-vnf-index-ref"],
537 cp_params["vnfd-connection-point-ref"],
538 vnf_descriptor["id"]))
539 if cp_params.get("ip-address"):
540 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
541 vdu_descriptor["id"], "interfaces",
542 interface_descriptor["name"], "ip_address"),
543 cp_params["ip-address"])
544 if cp_params.get("mac-address"):
545 populate_dict(RO_ns_params, ("vnfs", cp_params["member-vnf-index-ref"], "vdus",
546 vdu_descriptor["id"], "interfaces",
547 interface_descriptor["name"], "mac_address"),
548 cp_params["mac-address"])
tierno59d22d22018-09-25 18:10:19 +0200549 return RO_ns_params
550
tierno27246d82018-09-27 15:59:09 +0200551 def scale_vnfr(self, db_vnfr, vdu_create=None, vdu_delete=None):
552 # make a copy to do not change
553 vdu_create = copy(vdu_create)
554 vdu_delete = copy(vdu_delete)
555
556 vdurs = db_vnfr.get("vdur")
557 if vdurs is None:
558 vdurs = []
559 vdu_index = len(vdurs)
560 while vdu_index:
561 vdu_index -= 1
562 vdur = vdurs[vdu_index]
563 if vdur.get("pdu-type"):
564 continue
565 vdu_id_ref = vdur["vdu-id-ref"]
566 if vdu_create and vdu_create.get(vdu_id_ref):
567 for index in range(0, vdu_create[vdu_id_ref]):
568 vdur = deepcopy(vdur)
569 vdur["_id"] = str(uuid4())
570 vdur["count-index"] += 1
571 vdurs.insert(vdu_index+1+index, vdur)
572 del vdu_create[vdu_id_ref]
573 if vdu_delete and vdu_delete.get(vdu_id_ref):
574 del vdurs[vdu_index]
575 vdu_delete[vdu_id_ref] -= 1
576 if not vdu_delete[vdu_id_ref]:
577 del vdu_delete[vdu_id_ref]
578 # check all operations are done
579 if vdu_create or vdu_delete:
580 raise LcmException("Error scaling OUT VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
581 vdu_create))
582 if vdu_delete:
583 raise LcmException("Error scaling IN VNFR for {}. There is not any existing vnfr. Scaled to 0?".format(
584 vdu_delete))
585
586 vnfr_update = {"vdur": vdurs}
587 db_vnfr["vdur"] = vdurs
588 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
589
tiernof578e552018-11-08 19:07:20 +0100590 def ns_update_nsr(self, ns_update_nsr, db_nsr, nsr_desc_RO):
591 """
592 Updates database nsr with the RO info for the created vld
593 :param ns_update_nsr: dictionary to be filled with the updated info
594 :param db_nsr: content of db_nsr. This is also modified
595 :param nsr_desc_RO: nsr descriptor from RO
596 :return: Nothing, LcmException is raised on errors
597 """
598
599 for vld_index, vld in enumerate(get_iterable(db_nsr, "vld")):
600 for net_RO in get_iterable(nsr_desc_RO, "nets"):
601 if vld["id"] != net_RO.get("ns_net_osm_id"):
602 continue
603 vld["vim-id"] = net_RO.get("vim_net_id")
604 vld["name"] = net_RO.get("vim_name")
605 vld["status"] = net_RO.get("status")
606 vld["status-detailed"] = net_RO.get("error_msg")
607 ns_update_nsr["vld.{}".format(vld_index)] = vld
608 break
609 else:
610 raise LcmException("ns_update_nsr: Not found vld={} at RO info".format(vld["id"]))
611
tiernoe876f672020-02-13 14:34:48 +0000612 def set_vnfr_at_error(self, db_vnfrs, error_text):
613 try:
614 for db_vnfr in db_vnfrs.values():
615 vnfr_update = {"status": "ERROR"}
616 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
617 if "status" not in vdur:
618 vdur["status"] = "ERROR"
619 vnfr_update["vdur.{}.status".format(vdu_index)] = "ERROR"
620 if error_text:
621 vdur["status-detailed"] = str(error_text)
622 vnfr_update["vdur.{}.status-detailed".format(vdu_index)] = "ERROR"
623 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
624 except DbException as e:
625 self.logger.error("Cannot update vnf. {}".format(e))
626
tierno59d22d22018-09-25 18:10:19 +0200627 def ns_update_vnfr(self, db_vnfrs, nsr_desc_RO):
628 """
629 Updates database vnfr with the RO info, e.g. ip_address, vim_id... Descriptor db_vnfrs is also updated
tierno27246d82018-09-27 15:59:09 +0200630 :param db_vnfrs: dictionary with member-vnf-index: vnfr-content
631 :param nsr_desc_RO: nsr descriptor from RO
632 :return: Nothing, LcmException is raised on errors
tierno59d22d22018-09-25 18:10:19 +0200633 """
634 for vnf_index, db_vnfr in db_vnfrs.items():
635 for vnf_RO in nsr_desc_RO["vnfs"]:
tierno27246d82018-09-27 15:59:09 +0200636 if vnf_RO["member_vnf_index"] != vnf_index:
637 continue
638 vnfr_update = {}
tiernof578e552018-11-08 19:07:20 +0100639 if vnf_RO.get("ip_address"):
tierno1674de82019-04-09 13:03:14 +0000640 db_vnfr["ip-address"] = vnfr_update["ip-address"] = vnf_RO["ip_address"].split(";")[0]
tiernof578e552018-11-08 19:07:20 +0100641 elif not db_vnfr.get("ip-address"):
tierno0ec0c272020-02-19 17:43:01 +0000642 if db_vnfr.get("vdur"): # if not VDUs, there is not ip_address
643 raise LcmExceptionNoMgmtIP("ns member_vnf_index '{}' has no IP address".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200644
tierno27246d82018-09-27 15:59:09 +0200645 for vdu_index, vdur in enumerate(get_iterable(db_vnfr, "vdur")):
646 vdur_RO_count_index = 0
647 if vdur.get("pdu-type"):
648 continue
649 for vdur_RO in get_iterable(vnf_RO, "vms"):
650 if vdur["vdu-id-ref"] != vdur_RO["vdu_osm_id"]:
651 continue
652 if vdur["count-index"] != vdur_RO_count_index:
653 vdur_RO_count_index += 1
654 continue
655 vdur["vim-id"] = vdur_RO.get("vim_vm_id")
tierno1674de82019-04-09 13:03:14 +0000656 if vdur_RO.get("ip_address"):
657 vdur["ip-address"] = vdur_RO["ip_address"].split(";")[0]
tierno274ed572019-04-04 13:33:27 +0000658 else:
659 vdur["ip-address"] = None
tierno27246d82018-09-27 15:59:09 +0200660 vdur["vdu-id-ref"] = vdur_RO.get("vdu_osm_id")
661 vdur["name"] = vdur_RO.get("vim_name")
662 vdur["status"] = vdur_RO.get("status")
663 vdur["status-detailed"] = vdur_RO.get("error_msg")
664 for ifacer in get_iterable(vdur, "interfaces"):
665 for interface_RO in get_iterable(vdur_RO, "interfaces"):
666 if ifacer["name"] == interface_RO.get("internal_name"):
667 ifacer["ip-address"] = interface_RO.get("ip_address")
668 ifacer["mac-address"] = interface_RO.get("mac_address")
669 break
670 else:
671 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} interface={} "
quilesj7e13aeb2019-10-08 13:34:55 +0200672 "from VIM info"
673 .format(vnf_index, vdur["vdu-id-ref"], ifacer["name"]))
tierno27246d82018-09-27 15:59:09 +0200674 vnfr_update["vdur.{}".format(vdu_index)] = vdur
675 break
676 else:
tierno15b1cf12019-08-29 13:21:40 +0000677 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vdur={} count_index={} from "
678 "VIM info".format(vnf_index, vdur["vdu-id-ref"], vdur["count-index"]))
tiernof578e552018-11-08 19:07:20 +0100679
680 for vld_index, vld in enumerate(get_iterable(db_vnfr, "vld")):
681 for net_RO in get_iterable(nsr_desc_RO, "nets"):
682 if vld["id"] != net_RO.get("vnf_net_osm_id"):
683 continue
684 vld["vim-id"] = net_RO.get("vim_net_id")
685 vld["name"] = net_RO.get("vim_name")
686 vld["status"] = net_RO.get("status")
687 vld["status-detailed"] = net_RO.get("error_msg")
688 vnfr_update["vld.{}".format(vld_index)] = vld
689 break
690 else:
tierno15b1cf12019-08-29 13:21:40 +0000691 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} vld={} from VIM info".format(
tiernof578e552018-11-08 19:07:20 +0100692 vnf_index, vld["id"]))
693
tierno27246d82018-09-27 15:59:09 +0200694 self.update_db_2("vnfrs", db_vnfr["_id"], vnfr_update)
695 break
tierno59d22d22018-09-25 18:10:19 +0200696
697 else:
tierno15b1cf12019-08-29 13:21:40 +0000698 raise LcmException("ns_update_vnfr: Not found member_vnf_index={} from VIM info".format(vnf_index))
tierno59d22d22018-09-25 18:10:19 +0200699
tierno5ee02052019-12-05 19:55:02 +0000700 def _get_ns_config_info(self, nsr_id):
tiernoc3f2a822019-11-05 13:45:04 +0000701 """
702 Generates a mapping between vnf,vdu elements and the N2VC id
tierno5ee02052019-12-05 19:55:02 +0000703 :param nsr_id: id of nsr to get last database _admin.deployed.VCA that contains this list
tiernoc3f2a822019-11-05 13:45:04 +0000704 :return: a dictionary with {osm-config-mapping: {}} where its element contains:
705 "<member-vnf-index>": <N2VC-id> for a vnf configuration, or
706 "<member-vnf-index>.<vdu.id>.<vdu replica(0, 1,..)>": <N2VC-id> for a vdu configuration
707 """
tierno5ee02052019-12-05 19:55:02 +0000708 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
709 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernoc3f2a822019-11-05 13:45:04 +0000710 mapping = {}
711 ns_config_info = {"osm-config-mapping": mapping}
712 for vca in vca_deployed_list:
713 if not vca["member-vnf-index"]:
714 continue
715 if not vca["vdu_id"]:
716 mapping[vca["member-vnf-index"]] = vca["application"]
717 else:
718 mapping["{}.{}.{}".format(vca["member-vnf-index"], vca["vdu_id"], vca["vdu_count_index"])] =\
719 vca["application"]
720 return ns_config_info
721
722 @staticmethod
723 def _get_initial_config_primitive_list(desc_primitive_list, vca_deployed):
724 """
725 Generates a list of initial-config-primitive based on the list provided by the descriptor. It includes internal
726 primitives as verify-ssh-credentials, or config when needed
727 :param desc_primitive_list: information of the descriptor
728 :param vca_deployed: information of the deployed, needed for known if it is related to an NS, VNF, VDU and if
729 this element contains a ssh public key
730 :return: The modified list. Can ba an empty list, but always a list
731 """
732 if desc_primitive_list:
733 primitive_list = desc_primitive_list.copy()
734 else:
735 primitive_list = []
736 # look for primitive config, and get the position. None if not present
737 config_position = None
738 for index, primitive in enumerate(primitive_list):
739 if primitive["name"] == "config":
740 config_position = index
741 break
742
743 # for NS, add always a config primitive if not present (bug 874)
744 if not vca_deployed["member-vnf-index"] and config_position is None:
745 primitive_list.insert(0, {"name": "config", "parameter": []})
746 config_position = 0
747 # for VNF/VDU add verify-ssh-credentials after config
748 if vca_deployed["member-vnf-index"] and config_position is not None and vca_deployed.get("ssh-public-key"):
749 primitive_list.insert(config_position + 1, {"name": "verify-ssh-credentials", "parameter": []})
750 return primitive_list
751
tiernoe876f672020-02-13 14:34:48 +0000752 async def instantiate_RO(self, logging_text, nsr_id, nsd, db_nsr, db_nslcmop, db_vnfrs, db_vnfds_ref,
753 n2vc_key_list, stage):
754 try:
755 db_nsr_update = {}
756 RO_descriptor_number = 0 # number of descriptors created at RO
757 vnf_index_2_RO_id = {} # map between vnfd/nsd id to the id used at RO
758 nslcmop_id = db_nslcmop["_id"]
759 start_deploy = time()
760 ns_params = db_nslcmop.get("operationParams")
761 if ns_params and ns_params.get("timeout_ns_deploy"):
762 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
763 else:
764 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +0200765
tiernoe876f672020-02-13 14:34:48 +0000766 # Check for and optionally request placement optimization. Database will be updated if placement activated
767 stage[2] = "Waiting for Placement."
tierno38089af2020-04-16 07:56:58 +0000768 await self._do_placement(logging_text, db_nslcmop, db_vnfrs)
quilesj7e13aeb2019-10-08 13:34:55 +0200769
tiernoe876f672020-02-13 14:34:48 +0000770 # deploy RO
magnussonle9198bb2020-01-21 13:00:51 +0100771
tiernoe876f672020-02-13 14:34:48 +0000772 # get vnfds, instantiate at RO
773 for c_vnf in nsd.get("constituent-vnfd", ()):
774 member_vnf_index = c_vnf["member-vnf-index"]
775 vnfd = db_vnfds_ref[c_vnf['vnfd-id-ref']]
776 vnfd_ref = vnfd["id"]
quilesj7e13aeb2019-10-08 13:34:55 +0200777
tiernoe876f672020-02-13 14:34:48 +0000778 stage[2] = "Creating vnfd='{}' member_vnf_index='{}' at RO".format(vnfd_ref, member_vnf_index)
779 db_nsr_update["detailed-status"] = " ".join(stage)
780 self.update_db_2("nsrs", nsr_id, db_nsr_update)
781 self._write_op_status(nslcmop_id, stage)
calvinosanch9f9c6f22019-11-04 13:37:39 +0100782
tiernoe876f672020-02-13 14:34:48 +0000783 # self.logger.debug(logging_text + stage[2])
784 vnfd_id_RO = "{}.{}.{}".format(nsr_id, RO_descriptor_number, member_vnf_index[:23])
785 vnf_index_2_RO_id[member_vnf_index] = vnfd_id_RO
786 RO_descriptor_number += 1
787
788 # look position at deployed.RO.vnfd if not present it will be appended at the end
789 for index, vnf_deployed in enumerate(db_nsr["_admin"]["deployed"]["RO"]["vnfd"]):
790 if vnf_deployed["member-vnf-index"] == member_vnf_index:
791 break
792 else:
793 index = len(db_nsr["_admin"]["deployed"]["RO"]["vnfd"])
794 db_nsr["_admin"]["deployed"]["RO"]["vnfd"].append(None)
795
796 # look if present
797 RO_update = {"member-vnf-index": member_vnf_index}
798 vnfd_list = await self.RO.get_list("vnfd", filter_by={"osm_id": vnfd_id_RO})
799 if vnfd_list:
800 RO_update["id"] = vnfd_list[0]["uuid"]
801 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' exists at RO. Using RO_id={}".
802 format(vnfd_ref, member_vnf_index, vnfd_list[0]["uuid"]))
803 else:
804 vnfd_RO = self.vnfd2RO(vnfd, vnfd_id_RO, db_vnfrs[c_vnf["member-vnf-index"]].
805 get("additionalParamsForVnf"), nsr_id)
806 desc = await self.RO.create("vnfd", descriptor=vnfd_RO)
807 RO_update["id"] = desc["uuid"]
808 self.logger.debug(logging_text + "vnfd='{}' member_vnf_index='{}' created at RO. RO_id={}".format(
809 vnfd_ref, member_vnf_index, desc["uuid"]))
810 db_nsr_update["_admin.deployed.RO.vnfd.{}".format(index)] = RO_update
811 db_nsr["_admin"]["deployed"]["RO"]["vnfd"][index] = RO_update
812
813 # create nsd at RO
814 nsd_ref = nsd["id"]
815
816 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
817 db_nsr_update["detailed-status"] = " ".join(stage)
818 self.update_db_2("nsrs", nsr_id, db_nsr_update)
819 self._write_op_status(nslcmop_id, stage)
820
821 # self.logger.debug(logging_text + stage[2])
822 RO_osm_nsd_id = "{}.{}.{}".format(nsr_id, RO_descriptor_number, nsd_ref[:23])
tiernod8323042019-08-09 11:32:23 +0000823 RO_descriptor_number += 1
tiernoe876f672020-02-13 14:34:48 +0000824 nsd_list = await self.RO.get_list("nsd", filter_by={"osm_id": RO_osm_nsd_id})
825 if nsd_list:
826 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = nsd_list[0]["uuid"]
827 self.logger.debug(logging_text + "nsd={} exists at RO. Using RO_id={}".format(
828 nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +0000829 else:
tiernoe876f672020-02-13 14:34:48 +0000830 nsd_RO = deepcopy(nsd)
831 nsd_RO["id"] = RO_osm_nsd_id
832 nsd_RO.pop("_id", None)
833 nsd_RO.pop("_admin", None)
834 for c_vnf in nsd_RO.get("constituent-vnfd", ()):
835 member_vnf_index = c_vnf["member-vnf-index"]
836 c_vnf["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
837 for c_vld in nsd_RO.get("vld", ()):
838 for cp in c_vld.get("vnfd-connection-point-ref", ()):
839 member_vnf_index = cp["member-vnf-index-ref"]
840 cp["vnfd-id-ref"] = vnf_index_2_RO_id[member_vnf_index]
tiernod8323042019-08-09 11:32:23 +0000841
tiernoe876f672020-02-13 14:34:48 +0000842 desc = await self.RO.create("nsd", descriptor=nsd_RO)
843 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
844 db_nsr_update["_admin.deployed.RO.nsd_id"] = RO_nsd_uuid = desc["uuid"]
845 self.logger.debug(logging_text + "nsd={} created at RO. RO_id={}".format(nsd_ref, RO_nsd_uuid))
tiernod8323042019-08-09 11:32:23 +0000846 self.update_db_2("nsrs", nsr_id, db_nsr_update)
847
tiernoe876f672020-02-13 14:34:48 +0000848 # Crate ns at RO
849 stage[2] = "Creating nsd={} at RO".format(nsd_ref)
850 db_nsr_update["detailed-status"] = " ".join(stage)
851 self.update_db_2("nsrs", nsr_id, db_nsr_update)
852 self._write_op_status(nslcmop_id, stage)
tiernod8323042019-08-09 11:32:23 +0000853
tiernoe876f672020-02-13 14:34:48 +0000854 # if present use it unless in error status
855 RO_nsr_id = deep_get(db_nsr, ("_admin", "deployed", "RO", "nsr_id"))
856 if RO_nsr_id:
857 try:
858 stage[2] = "Looking for existing ns at RO"
859 db_nsr_update["detailed-status"] = " ".join(stage)
860 self.update_db_2("nsrs", nsr_id, db_nsr_update)
861 self._write_op_status(nslcmop_id, stage)
862 # self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
863 desc = await self.RO.show("ns", RO_nsr_id)
tiernod8323042019-08-09 11:32:23 +0000864
tiernoe876f672020-02-13 14:34:48 +0000865 except ROclient.ROClientException as e:
866 if e.http_code != HTTPStatus.NOT_FOUND:
867 raise
868 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
869 if RO_nsr_id:
870 ns_status, ns_status_info = self.RO.check_ns_status(desc)
871 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
872 if ns_status == "ERROR":
873 stage[2] = "Deleting ns at RO. RO_ns_id={}".format(RO_nsr_id)
874 self.logger.debug(logging_text + stage[2])
875 await self.RO.delete("ns", RO_nsr_id)
876 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = None
877 if not RO_nsr_id:
878 stage[2] = "Checking dependencies"
879 db_nsr_update["detailed-status"] = " ".join(stage)
880 self.update_db_2("nsrs", nsr_id, db_nsr_update)
881 self._write_op_status(nslcmop_id, stage)
882 # self.logger.debug(logging_text + stage[2])
tiernod8323042019-08-09 11:32:23 +0000883
tiernoe876f672020-02-13 14:34:48 +0000884 # check if VIM is creating and wait look if previous tasks in process
885 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account", ns_params["vimAccountId"])
886 if task_dependency:
887 stage[2] = "Waiting for related tasks '{}' to be completed".format(task_name)
888 self.logger.debug(logging_text + stage[2])
889 await asyncio.wait(task_dependency, timeout=3600)
890 if ns_params.get("vnf"):
891 for vnf in ns_params["vnf"]:
892 if "vimAccountId" in vnf:
893 task_name, task_dependency = self.lcm_tasks.lookfor_related("vim_account",
894 vnf["vimAccountId"])
895 if task_dependency:
896 stage[2] = "Waiting for related tasks '{}' to be completed.".format(task_name)
897 self.logger.debug(logging_text + stage[2])
898 await asyncio.wait(task_dependency, timeout=3600)
899
900 stage[2] = "Checking instantiation parameters."
901 RO_ns_params = self.ns_params_2_RO(ns_params, nsd, db_vnfds_ref, n2vc_key_list)
902 stage[2] = "Deploying ns at VIM."
903 db_nsr_update["detailed-status"] = " ".join(stage)
904 self.update_db_2("nsrs", nsr_id, db_nsr_update)
905 self._write_op_status(nslcmop_id, stage)
906
907 desc = await self.RO.create("ns", descriptor=RO_ns_params, name=db_nsr["name"], scenario=RO_nsd_uuid)
908 RO_nsr_id = db_nsr_update["_admin.deployed.RO.nsr_id"] = desc["uuid"]
909 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
910 db_nsr_update["_admin.deployed.RO.nsr_status"] = "BUILD"
911 self.logger.debug(logging_text + "ns created at RO. RO_id={}".format(desc["uuid"]))
912
913 # wait until NS is ready
914 stage[2] = "Waiting VIM to deploy ns."
915 db_nsr_update["detailed-status"] = " ".join(stage)
916 self.update_db_2("nsrs", nsr_id, db_nsr_update)
917 self._write_op_status(nslcmop_id, stage)
918 detailed_status_old = None
919 self.logger.debug(logging_text + stage[2] + " RO_ns_id={}".format(RO_nsr_id))
920
921 old_desc = None
922 while time() <= start_deploy + timeout_ns_deploy:
tiernod8323042019-08-09 11:32:23 +0000923 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +0000924
tiernoe876f672020-02-13 14:34:48 +0000925 # deploymentStatus
926 if desc != old_desc:
927 # desc has changed => update db
928 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
929 old_desc = desc
tiernod8323042019-08-09 11:32:23 +0000930
tiernoe876f672020-02-13 14:34:48 +0000931 ns_status, ns_status_info = self.RO.check_ns_status(desc)
932 db_nsr_update["_admin.deployed.RO.nsr_status"] = ns_status
933 if ns_status == "ERROR":
934 raise ROclient.ROClientException(ns_status_info)
935 elif ns_status == "BUILD":
936 stage[2] = "VIM: ({})".format(ns_status_info)
937 elif ns_status == "ACTIVE":
938 stage[2] = "Waiting for management IP address reported by the VIM. Updating VNFRs."
939 try:
940 self.ns_update_vnfr(db_vnfrs, desc)
941 break
942 except LcmExceptionNoMgmtIP:
943 pass
944 else:
945 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
946 if stage[2] != detailed_status_old:
947 detailed_status_old = stage[2]
948 db_nsr_update["detailed-status"] = " ".join(stage)
949 self.update_db_2("nsrs", nsr_id, db_nsr_update)
950 self._write_op_status(nslcmop_id, stage)
951 await asyncio.sleep(5, loop=self.loop)
952 else: # timeout_ns_deploy
953 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tiernod8323042019-08-09 11:32:23 +0000954
tiernoe876f672020-02-13 14:34:48 +0000955 # Updating NSR
956 self.ns_update_nsr(db_nsr_update, db_nsr, desc)
tiernod8323042019-08-09 11:32:23 +0000957
tiernoe876f672020-02-13 14:34:48 +0000958 db_nsr_update["_admin.deployed.RO.operational-status"] = "running"
959 # db_nsr["_admin.deployed.RO.detailed-status"] = "Deployed at VIM"
960 stage[2] = "Deployed at VIM"
961 db_nsr_update["detailed-status"] = " ".join(stage)
962 self.update_db_2("nsrs", nsr_id, db_nsr_update)
963 self._write_op_status(nslcmop_id, stage)
964 # await self._on_update_n2vc_db("nsrs", {"_id": nsr_id}, "_admin.deployed", db_nsr_update)
965 # self.logger.debug(logging_text + "Deployed at VIM")
966 except (ROclient.ROClientException, LcmException, DbException) as e:
tierno067e04a2020-03-31 12:53:13 +0000967 stage[2] = "ERROR deploying at VIM"
tiernoe876f672020-02-13 14:34:48 +0000968 self.set_vnfr_at_error(db_vnfrs, str(e))
969 raise
quilesj7e13aeb2019-10-08 13:34:55 +0200970
tiernoa5088192019-11-26 16:12:53 +0000971 async def wait_vm_up_insert_key_ro(self, logging_text, nsr_id, vnfr_id, vdu_id, vdu_index, pub_key=None, user=None):
972 """
973 Wait for ip addres at RO, and optionally, insert public key in virtual machine
974 :param logging_text: prefix use for logging
975 :param nsr_id:
976 :param vnfr_id:
977 :param vdu_id:
978 :param vdu_index:
979 :param pub_key: public ssh key to inject, None to skip
980 :param user: user to apply the public ssh key
981 :return: IP address
982 """
quilesj7e13aeb2019-10-08 13:34:55 +0200983
tiernoa5088192019-11-26 16:12:53 +0000984 # self.logger.debug(logging_text + "Starting wait_vm_up_insert_key_ro")
tiernod8323042019-08-09 11:32:23 +0000985 ro_nsr_id = None
986 ip_address = None
987 nb_tries = 0
988 target_vdu_id = None
quilesj3149f262019-12-03 10:58:10 +0000989 ro_retries = 0
quilesj7e13aeb2019-10-08 13:34:55 +0200990
tiernod8323042019-08-09 11:32:23 +0000991 while True:
quilesj7e13aeb2019-10-08 13:34:55 +0200992
quilesj3149f262019-12-03 10:58:10 +0000993 ro_retries += 1
994 if ro_retries >= 360: # 1 hour
995 raise LcmException("Not found _admin.deployed.RO.nsr_id for nsr_id: {}".format(nsr_id))
996
tiernod8323042019-08-09 11:32:23 +0000997 await asyncio.sleep(10, loop=self.loop)
quilesj7e13aeb2019-10-08 13:34:55 +0200998
999 # get ip address
tiernod8323042019-08-09 11:32:23 +00001000 if not target_vdu_id:
1001 db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id})
quilesj3149f262019-12-03 10:58:10 +00001002
1003 if not vdu_id: # for the VNF case
tiernoe876f672020-02-13 14:34:48 +00001004 if db_vnfr.get("status") == "ERROR":
1005 raise LcmException("Cannot inject ssh-key because target VNF is in error state")
tiernod8323042019-08-09 11:32:23 +00001006 ip_address = db_vnfr.get("ip-address")
1007 if not ip_address:
1008 continue
quilesj3149f262019-12-03 10:58:10 +00001009 vdur = next((x for x in get_iterable(db_vnfr, "vdur") if x.get("ip-address") == ip_address), None)
1010 else: # VDU case
1011 vdur = next((x for x in get_iterable(db_vnfr, "vdur")
1012 if x.get("vdu-id-ref") == vdu_id and x.get("count-index") == vdu_index), None)
1013
tierno0e8c3f02020-03-12 17:18:21 +00001014 if not vdur and len(db_vnfr.get("vdur", ())) == 1: # If only one, this should be the target vdu
1015 vdur = db_vnfr["vdur"][0]
quilesj3149f262019-12-03 10:58:10 +00001016 if not vdur:
tierno0e8c3f02020-03-12 17:18:21 +00001017 raise LcmException("Not found vnfr_id={}, vdu_id={}, vdu_index={}".format(vnfr_id, vdu_id,
1018 vdu_index))
quilesj7e13aeb2019-10-08 13:34:55 +02001019
tierno0e8c3f02020-03-12 17:18:21 +00001020 if vdur.get("pdu-type") or vdur.get("status") == "ACTIVE":
quilesj3149f262019-12-03 10:58:10 +00001021 ip_address = vdur.get("ip-address")
1022 if not ip_address:
1023 continue
1024 target_vdu_id = vdur["vdu-id-ref"]
1025 elif vdur.get("status") == "ERROR":
1026 raise LcmException("Cannot inject ssh-key because target VM is in error state")
1027
tiernod8323042019-08-09 11:32:23 +00001028 if not target_vdu_id:
1029 continue
tiernod8323042019-08-09 11:32:23 +00001030
quilesj7e13aeb2019-10-08 13:34:55 +02001031 # inject public key into machine
1032 if pub_key and user:
tiernoe876f672020-02-13 14:34:48 +00001033 # wait until NS is deployed at RO
1034 if not ro_nsr_id:
1035 db_nsrs = self.db.get_one("nsrs", {"_id": nsr_id})
1036 ro_nsr_id = deep_get(db_nsrs, ("_admin", "deployed", "RO", "nsr_id"))
1037 if not ro_nsr_id:
1038 continue
1039
tiernoa5088192019-11-26 16:12:53 +00001040 # self.logger.debug(logging_text + "Inserting RO key")
tierno0e8c3f02020-03-12 17:18:21 +00001041 if vdur.get("pdu-type"):
1042 self.logger.error(logging_text + "Cannot inject ssh-ky to a PDU")
1043 return ip_address
quilesj7e13aeb2019-10-08 13:34:55 +02001044 try:
1045 ro_vm_id = "{}-{}".format(db_vnfr["member-vnf-index-ref"], target_vdu_id) # TODO add vdu_index
1046 result_dict = await self.RO.create_action(
1047 item="ns",
1048 item_id_name=ro_nsr_id,
1049 descriptor={"add_public_key": pub_key, "vms": [ro_vm_id], "user": user}
1050 )
1051 # result_dict contains the format {VM-id: {vim_result: 200, description: text}}
1052 if not result_dict or not isinstance(result_dict, dict):
1053 raise LcmException("Unknown response from RO when injecting key")
1054 for result in result_dict.values():
1055 if result.get("vim_result") == 200:
1056 break
1057 else:
1058 raise ROclient.ROClientException("error injecting key: {}".format(
1059 result.get("description")))
1060 break
1061 except ROclient.ROClientException as e:
tiernoa5088192019-11-26 16:12:53 +00001062 if not nb_tries:
1063 self.logger.debug(logging_text + "error injecting key: {}. Retrying until {} seconds".
1064 format(e, 20*10))
quilesj7e13aeb2019-10-08 13:34:55 +02001065 nb_tries += 1
tiernoa5088192019-11-26 16:12:53 +00001066 if nb_tries >= 20:
quilesj7e13aeb2019-10-08 13:34:55 +02001067 raise LcmException("Reaching max tries injecting key. Error: {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02001068 else:
quilesj7e13aeb2019-10-08 13:34:55 +02001069 break
1070
1071 return ip_address
1072
tierno5ee02052019-12-05 19:55:02 +00001073 async def _wait_dependent_n2vc(self, nsr_id, vca_deployed_list, vca_index):
1074 """
1075 Wait until dependent VCA deployments have been finished. NS wait for VNFs and VDUs. VNFs for VDUs
1076 """
1077 my_vca = vca_deployed_list[vca_index]
1078 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
quilesj3655ae02019-12-12 16:08:35 +00001079 # vdu or kdu: no dependencies
tierno5ee02052019-12-05 19:55:02 +00001080 return
1081 timeout = 300
1082 while timeout >= 0:
quilesj3655ae02019-12-12 16:08:35 +00001083 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1084 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
1085 configuration_status_list = db_nsr["configurationStatus"]
1086 for index, vca_deployed in enumerate(configuration_status_list):
tierno5ee02052019-12-05 19:55:02 +00001087 if index == vca_index:
quilesj3655ae02019-12-12 16:08:35 +00001088 # myself
tierno5ee02052019-12-05 19:55:02 +00001089 continue
1090 if not my_vca.get("member-vnf-index") or \
1091 (vca_deployed.get("member-vnf-index") == my_vca.get("member-vnf-index")):
quilesj3655ae02019-12-12 16:08:35 +00001092 internal_status = configuration_status_list[index].get("status")
1093 if internal_status == 'READY':
1094 continue
1095 elif internal_status == 'BROKEN':
tierno5ee02052019-12-05 19:55:02 +00001096 raise LcmException("Configuration aborted because dependent charm/s has failed")
quilesj3655ae02019-12-12 16:08:35 +00001097 else:
1098 break
tierno5ee02052019-12-05 19:55:02 +00001099 else:
quilesj3655ae02019-12-12 16:08:35 +00001100 # no dependencies, return
tierno5ee02052019-12-05 19:55:02 +00001101 return
1102 await asyncio.sleep(10)
1103 timeout -= 1
tierno5ee02052019-12-05 19:55:02 +00001104
1105 raise LcmException("Configuration aborted because dependent charm/s timeout")
1106
tiernoe876f672020-02-13 14:34:48 +00001107 async def instantiate_N2VC(self, logging_text, vca_index, nsi_id, db_nsr, db_vnfr, vdu_id, kdu_name, vdu_index,
1108 config_descriptor, deploy_params, base_folder, nslcmop_id, stage):
tiernod8323042019-08-09 11:32:23 +00001109 nsr_id = db_nsr["_id"]
1110 db_update_entry = "_admin.deployed.VCA.{}.".format(vca_index)
tiernoda6fb102019-11-23 00:36:52 +00001111 vca_deployed_list = db_nsr["_admin"]["deployed"]["VCA"]
tiernod8323042019-08-09 11:32:23 +00001112 vca_deployed = db_nsr["_admin"]["deployed"]["VCA"][vca_index]
quilesj7e13aeb2019-10-08 13:34:55 +02001113 db_dict = {
1114 'collection': 'nsrs',
1115 'filter': {'_id': nsr_id},
1116 'path': db_update_entry
1117 }
tiernod8323042019-08-09 11:32:23 +00001118 step = ""
1119 try:
quilesj3655ae02019-12-12 16:08:35 +00001120
1121 element_type = 'NS'
1122 element_under_configuration = nsr_id
1123
tiernod8323042019-08-09 11:32:23 +00001124 vnfr_id = None
1125 if db_vnfr:
1126 vnfr_id = db_vnfr["_id"]
1127
1128 namespace = "{nsi}.{ns}".format(
1129 nsi=nsi_id if nsi_id else "",
1130 ns=nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00001131
tiernod8323042019-08-09 11:32:23 +00001132 if vnfr_id:
quilesj3655ae02019-12-12 16:08:35 +00001133 element_type = 'VNF'
1134 element_under_configuration = vnfr_id
quilesjb8a35dd2020-01-09 15:10:14 +00001135 namespace += ".{}".format(vnfr_id)
tiernod8323042019-08-09 11:32:23 +00001136 if vdu_id:
1137 namespace += ".{}-{}".format(vdu_id, vdu_index or 0)
quilesj3655ae02019-12-12 16:08:35 +00001138 element_type = 'VDU'
quilesjb8a35dd2020-01-09 15:10:14 +00001139 element_under_configuration = "{}-{}".format(vdu_id, vdu_index or 0)
tierno51183952020-04-03 15:48:18 +00001140 elif kdu_name:
1141 namespace += ".{}".format(kdu_name)
1142 element_type = 'KDU'
1143 element_under_configuration = kdu_name
tiernod8323042019-08-09 11:32:23 +00001144
1145 # Get artifact path
David Garcia56522772020-01-20 13:19:29 +01001146 self.fs.sync() # Sync from FSMongo
David Garcia485b2912019-12-04 14:01:50 +01001147 artifact_path = "{}/{}/charms/{}".format(
tiernod8323042019-08-09 11:32:23 +00001148 base_folder["folder"],
1149 base_folder["pkg-dir"],
1150 config_descriptor["juju"]["charm"]
1151 )
1152
quilesj7e13aeb2019-10-08 13:34:55 +02001153 is_proxy_charm = deep_get(config_descriptor, ('juju', 'charm')) is not None
1154 if deep_get(config_descriptor, ('juju', 'proxy')) is False:
tiernod8323042019-08-09 11:32:23 +00001155 is_proxy_charm = False
1156
1157 # n2vc_redesign STEP 3.1
quilesj7e13aeb2019-10-08 13:34:55 +02001158
1159 # find old ee_id if exists
tiernod8323042019-08-09 11:32:23 +00001160 ee_id = vca_deployed.get("ee_id")
tiernod8323042019-08-09 11:32:23 +00001161
quilesj7e13aeb2019-10-08 13:34:55 +02001162 # create or register execution environment in VCA
1163 if is_proxy_charm:
quilesj3655ae02019-12-12 16:08:35 +00001164
tiernoc231a872020-01-21 08:49:05 +00001165 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001166 nsr_id=nsr_id,
1167 vca_index=vca_index,
1168 status='CREATING',
1169 element_under_configuration=element_under_configuration,
1170 element_type=element_type
1171 )
1172
quilesj7e13aeb2019-10-08 13:34:55 +02001173 step = "create execution environment"
1174 self.logger.debug(logging_text + step)
tierno3bedc9b2019-11-27 15:46:57 +00001175 ee_id, credentials = await self.n2vc.create_execution_environment(namespace=namespace,
1176 reuse_ee_id=ee_id,
1177 db_dict=db_dict)
quilesj3655ae02019-12-12 16:08:35 +00001178
quilesj7e13aeb2019-10-08 13:34:55 +02001179 else:
tierno3bedc9b2019-11-27 15:46:57 +00001180 step = "Waiting to VM being up and getting IP address"
1181 self.logger.debug(logging_text + step)
1182 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1183 user=None, pub_key=None)
1184 credentials = {"hostname": rw_mgmt_ip}
quilesj7e13aeb2019-10-08 13:34:55 +02001185 # get username
tierno3bedc9b2019-11-27 15:46:57 +00001186 username = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
quilesj7e13aeb2019-10-08 13:34:55 +02001187 # TODO remove this when changes on IM regarding config-access:ssh-access:default-user were
1188 # merged. Meanwhile let's get username from initial-config-primitive
tierno3bedc9b2019-11-27 15:46:57 +00001189 if not username and config_descriptor.get("initial-config-primitive"):
1190 for config_primitive in config_descriptor["initial-config-primitive"]:
1191 for param in config_primitive.get("parameter", ()):
1192 if param["name"] == "ssh-username":
1193 username = param["value"]
1194 break
1195 if not username:
1196 raise LcmException("Cannot determine the username neither with 'initial-config-promitive' nor with "
1197 "'config-access.ssh-access.default-user'")
1198 credentials["username"] = username
quilesj7e13aeb2019-10-08 13:34:55 +02001199 # n2vc_redesign STEP 3.2
tierno3bedc9b2019-11-27 15:46:57 +00001200
tiernoc231a872020-01-21 08:49:05 +00001201 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001202 nsr_id=nsr_id,
1203 vca_index=vca_index,
1204 status='REGISTERING',
1205 element_under_configuration=element_under_configuration,
1206 element_type=element_type
1207 )
1208
tierno3bedc9b2019-11-27 15:46:57 +00001209 step = "register execution environment {}".format(credentials)
quilesj7e13aeb2019-10-08 13:34:55 +02001210 self.logger.debug(logging_text + step)
tierno3bedc9b2019-11-27 15:46:57 +00001211 ee_id = await self.n2vc.register_execution_environment(credentials=credentials, namespace=namespace,
1212 db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001213
1214 # for compatibility with MON/POL modules, the need model and application name at database
1215 # TODO ask to N2VC instead of assuming the format "model_name.application_name"
1216 ee_id_parts = ee_id.split('.')
1217 model_name = ee_id_parts[0]
1218 application_name = ee_id_parts[1]
tierno51183952020-04-03 15:48:18 +00001219 db_nsr_update = {db_update_entry + "model": model_name,
1220 db_update_entry + "application": application_name,
1221 db_update_entry + "ee_id": ee_id}
tiernod8323042019-08-09 11:32:23 +00001222
1223 # n2vc_redesign STEP 3.3
tierno3bedc9b2019-11-27 15:46:57 +00001224
tiernod8323042019-08-09 11:32:23 +00001225 step = "Install configuration Software"
quilesj3655ae02019-12-12 16:08:35 +00001226
tiernoc231a872020-01-21 08:49:05 +00001227 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001228 nsr_id=nsr_id,
1229 vca_index=vca_index,
1230 status='INSTALLING SW',
1231 element_under_configuration=element_under_configuration,
tierno51183952020-04-03 15:48:18 +00001232 element_type=element_type,
1233 other_update=db_nsr_update
quilesj3655ae02019-12-12 16:08:35 +00001234 )
1235
tierno3bedc9b2019-11-27 15:46:57 +00001236 # TODO check if already done
quilesj7e13aeb2019-10-08 13:34:55 +02001237 self.logger.debug(logging_text + step)
David Garcia18a63322020-04-01 16:14:59 +02001238 config = None
1239 if not is_proxy_charm:
1240 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
1241 if initial_config_primitive_list:
1242 for primitive in initial_config_primitive_list:
1243 if primitive["name"] == "config":
1244 config = self._map_primitive_params(
1245 primitive,
1246 {},
1247 deploy_params
1248 )
1249 break
1250 await self.n2vc.install_configuration_sw(
1251 ee_id=ee_id,
1252 artifact_path=artifact_path,
1253 db_dict=db_dict,
1254 config=config
1255 )
quilesj7e13aeb2019-10-08 13:34:55 +02001256
quilesj63f90042020-01-17 09:53:55 +00001257 # write in db flag of configuration_sw already installed
1258 self.update_db_2("nsrs", nsr_id, {db_update_entry + "config_sw_installed": True})
1259
1260 # add relations for this VCA (wait for other peers related with this VCA)
1261 await self._add_vca_relations(logging_text=logging_text, nsr_id=nsr_id, vca_index=vca_index)
1262
quilesj7e13aeb2019-10-08 13:34:55 +02001263 # if SSH access is required, then get execution environment SSH public
tierno3bedc9b2019-11-27 15:46:57 +00001264 if is_proxy_charm: # if native charm we have waited already to VM be UP
1265 pub_key = None
1266 user = None
1267 if deep_get(config_descriptor, ("config-access", "ssh-access", "required")):
1268 # Needed to inject a ssh key
1269 user = deep_get(config_descriptor, ("config-access", "ssh-access", "default-user"))
1270 step = "Install configuration Software, getting public ssh key"
1271 pub_key = await self.n2vc.get_ee_ssh_public__key(ee_id=ee_id, db_dict=db_dict)
quilesj7e13aeb2019-10-08 13:34:55 +02001272
tiernoacc90452019-12-10 11:06:54 +00001273 step = "Insert public key into VM user={} ssh_key={}".format(user, pub_key)
tierno3bedc9b2019-11-27 15:46:57 +00001274 else:
1275 step = "Waiting to VM being up and getting IP address"
1276 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001277
tierno3bedc9b2019-11-27 15:46:57 +00001278 # n2vc_redesign STEP 5.1
1279 # wait for RO (ip-address) Insert pub_key into VM
tierno5ee02052019-12-05 19:55:02 +00001280 if vnfr_id:
1281 rw_mgmt_ip = await self.wait_vm_up_insert_key_ro(logging_text, nsr_id, vnfr_id, vdu_id, vdu_index,
1282 user=user, pub_key=pub_key)
1283 else:
1284 rw_mgmt_ip = None # This is for a NS configuration
tierno3bedc9b2019-11-27 15:46:57 +00001285
1286 self.logger.debug(logging_text + ' VM_ip_address={}'.format(rw_mgmt_ip))
quilesj7e13aeb2019-10-08 13:34:55 +02001287
tiernoa5088192019-11-26 16:12:53 +00001288 # store rw_mgmt_ip in deploy params for later replacement
quilesj7e13aeb2019-10-08 13:34:55 +02001289 deploy_params["rw_mgmt_ip"] = rw_mgmt_ip
tiernod8323042019-08-09 11:32:23 +00001290
1291 # n2vc_redesign STEP 6 Execute initial config primitive
quilesj7e13aeb2019-10-08 13:34:55 +02001292 step = 'execute initial config primitive'
tiernoa5088192019-11-26 16:12:53 +00001293 initial_config_primitive_list = config_descriptor.get('initial-config-primitive')
quilesj7e13aeb2019-10-08 13:34:55 +02001294
1295 # sort initial config primitives by 'seq'
quilesj63f90042020-01-17 09:53:55 +00001296 if initial_config_primitive_list:
1297 try:
1298 initial_config_primitive_list.sort(key=lambda val: int(val['seq']))
1299 except Exception as e:
1300 self.logger.error(logging_text + step + ": " + str(e))
1301 else:
1302 self.logger.debug(logging_text + step + ": No initial-config-primitive")
quilesj7e13aeb2019-10-08 13:34:55 +02001303
tiernoda6fb102019-11-23 00:36:52 +00001304 # add config if not present for NS charm
1305 initial_config_primitive_list = self._get_initial_config_primitive_list(initial_config_primitive_list,
1306 vca_deployed)
quilesj3655ae02019-12-12 16:08:35 +00001307
1308 # wait for dependent primitives execution (NS -> VNF -> VDU)
tierno5ee02052019-12-05 19:55:02 +00001309 if initial_config_primitive_list:
1310 await self._wait_dependent_n2vc(nsr_id, vca_deployed_list, vca_index)
quilesj3655ae02019-12-12 16:08:35 +00001311
1312 # stage, in function of element type: vdu, kdu, vnf or ns
1313 my_vca = vca_deployed_list[vca_index]
1314 if my_vca.get("vdu_id") or my_vca.get("kdu_name"):
1315 # VDU or KDU
tiernoe876f672020-02-13 14:34:48 +00001316 stage[0] = 'Stage 3/5: running Day-1 primitives for VDU.'
quilesj3655ae02019-12-12 16:08:35 +00001317 elif my_vca.get("member-vnf-index"):
1318 # VNF
tiernoe876f672020-02-13 14:34:48 +00001319 stage[0] = 'Stage 4/5: running Day-1 primitives for VNF.'
quilesj3655ae02019-12-12 16:08:35 +00001320 else:
1321 # NS
tiernoe876f672020-02-13 14:34:48 +00001322 stage[0] = 'Stage 5/5: running Day-1 primitives for NS.'
quilesj3655ae02019-12-12 16:08:35 +00001323
tiernoc231a872020-01-21 08:49:05 +00001324 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001325 nsr_id=nsr_id,
1326 vca_index=vca_index,
1327 status='EXECUTING PRIMITIVE'
1328 )
1329
1330 self._write_op_status(
1331 op_id=nslcmop_id,
1332 stage=stage
1333 )
1334
tiernoe876f672020-02-13 14:34:48 +00001335 check_if_terminated_needed = True
tiernod8323042019-08-09 11:32:23 +00001336 for initial_config_primitive in initial_config_primitive_list:
tiernoda6fb102019-11-23 00:36:52 +00001337 # adding information on the vca_deployed if it is a NS execution environment
1338 if not vca_deployed["member-vnf-index"]:
David Garciad4816682019-12-09 14:57:43 +01001339 deploy_params["ns_config_info"] = json.dumps(self._get_ns_config_info(nsr_id))
tiernod8323042019-08-09 11:32:23 +00001340 # TODO check if already done
1341 primitive_params_ = self._map_primitive_params(initial_config_primitive, {}, deploy_params)
tierno3bedc9b2019-11-27 15:46:57 +00001342
tiernod8323042019-08-09 11:32:23 +00001343 step = "execute primitive '{}' params '{}'".format(initial_config_primitive["name"], primitive_params_)
1344 self.logger.debug(logging_text + step)
quilesj7e13aeb2019-10-08 13:34:55 +02001345 await self.n2vc.exec_primitive(
1346 ee_id=ee_id,
1347 primitive_name=initial_config_primitive["name"],
1348 params_dict=primitive_params_,
1349 db_dict=db_dict
1350 )
tiernoe876f672020-02-13 14:34:48 +00001351 # Once some primitive has been exec, check and write at db if it needs to exec terminated primitives
1352 if check_if_terminated_needed:
1353 if config_descriptor.get('terminate-config-primitive'):
1354 self.update_db_2("nsrs", nsr_id, {db_update_entry + "needed_terminate": True})
1355 check_if_terminated_needed = False
quilesj3655ae02019-12-12 16:08:35 +00001356
tiernod8323042019-08-09 11:32:23 +00001357 # TODO register in database that primitive is done
quilesj7e13aeb2019-10-08 13:34:55 +02001358
1359 step = "instantiated at VCA"
1360 self.logger.debug(logging_text + step)
1361
tiernoc231a872020-01-21 08:49:05 +00001362 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001363 nsr_id=nsr_id,
1364 vca_index=vca_index,
1365 status='READY'
1366 )
1367
tiernod8323042019-08-09 11:32:23 +00001368 except Exception as e: # TODO not use Exception but N2VC exception
quilesj3655ae02019-12-12 16:08:35 +00001369 # self.update_db_2("nsrs", nsr_id, {db_update_entry + "instantiation": "FAILED"})
tiernoe876f672020-02-13 14:34:48 +00001370 if not isinstance(e, (DbException, N2VCException, LcmException, asyncio.CancelledError)):
1371 self.logger.error("Exception while {} : {}".format(step, e), exc_info=True)
tiernoc231a872020-01-21 08:49:05 +00001372 self._write_configuration_status(
quilesj3655ae02019-12-12 16:08:35 +00001373 nsr_id=nsr_id,
1374 vca_index=vca_index,
1375 status='BROKEN'
1376 )
tiernoe876f672020-02-13 14:34:48 +00001377 raise LcmException("{} {}".format(step, e)) from e
tiernod8323042019-08-09 11:32:23 +00001378
quilesj4cda56b2019-12-05 10:02:20 +00001379 def _write_ns_status(self, nsr_id: str, ns_state: str, current_operation: str, current_operation_id: str,
tiernoa2143262020-03-27 16:20:40 +00001380 error_description: str = None, error_detail: str = None, other_update: dict = None):
tiernoe876f672020-02-13 14:34:48 +00001381 """
1382 Update db_nsr fields.
1383 :param nsr_id:
1384 :param ns_state:
1385 :param current_operation:
1386 :param current_operation_id:
1387 :param error_description:
tiernoa2143262020-03-27 16:20:40 +00001388 :param error_detail:
tiernoe876f672020-02-13 14:34:48 +00001389 :param other_update: Other required changes at database if provided, will be cleared
1390 :return:
1391 """
quilesj4cda56b2019-12-05 10:02:20 +00001392 try:
tiernoe876f672020-02-13 14:34:48 +00001393 db_dict = other_update or {}
1394 db_dict["_admin.nslcmop"] = current_operation_id # for backward compatibility
1395 db_dict["_admin.current-operation"] = current_operation_id
1396 db_dict["_admin.operation-type"] = current_operation if current_operation != "IDLE" else None
quilesj4cda56b2019-12-05 10:02:20 +00001397 db_dict["currentOperation"] = current_operation
1398 db_dict["currentOperationID"] = current_operation_id
1399 db_dict["errorDescription"] = error_description
tiernoa2143262020-03-27 16:20:40 +00001400 db_dict["errorDetail"] = error_detail
tiernoe876f672020-02-13 14:34:48 +00001401
1402 if ns_state:
1403 db_dict["nsState"] = ns_state
quilesj4cda56b2019-12-05 10:02:20 +00001404 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001405 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001406 self.logger.warn('Error writing NS status, ns={}: {}'.format(nsr_id, e))
1407
tiernoe876f672020-02-13 14:34:48 +00001408 def _write_op_status(self, op_id: str, stage: list = None, error_message: str = None, queuePosition: int = 0,
1409 operation_state: str = None, other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001410 try:
tiernoe876f672020-02-13 14:34:48 +00001411 db_dict = other_update or {}
quilesj3655ae02019-12-12 16:08:35 +00001412 db_dict['queuePosition'] = queuePosition
tiernoe876f672020-02-13 14:34:48 +00001413 if isinstance(stage, list):
1414 db_dict['stage'] = stage[0]
1415 db_dict['detailed-status'] = " ".join(stage)
1416 elif stage is not None:
1417 db_dict['stage'] = str(stage)
1418
1419 if error_message is not None:
quilesj3655ae02019-12-12 16:08:35 +00001420 db_dict['errorMessage'] = error_message
tiernoe876f672020-02-13 14:34:48 +00001421 if operation_state is not None:
1422 db_dict['operationState'] = operation_state
1423 db_dict["statusEnteredTime"] = time()
quilesj3655ae02019-12-12 16:08:35 +00001424 self.update_db_2("nslcmops", op_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001425 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001426 self.logger.warn('Error writing OPERATION status for op_id: {} -> {}'.format(op_id, e))
1427
tierno51183952020-04-03 15:48:18 +00001428 def _write_all_config_status(self, db_nsr: dict, status: str):
quilesj3655ae02019-12-12 16:08:35 +00001429 try:
tierno51183952020-04-03 15:48:18 +00001430 nsr_id = db_nsr["_id"]
quilesj3655ae02019-12-12 16:08:35 +00001431 # configurationStatus
1432 config_status = db_nsr.get('configurationStatus')
1433 if config_status:
tierno51183952020-04-03 15:48:18 +00001434 db_nsr_update = {"configurationStatus.{}.status".format(index): status for index, v in
1435 enumerate(config_status) if v}
quilesj3655ae02019-12-12 16:08:35 +00001436 # update status
tierno51183952020-04-03 15:48:18 +00001437 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001438
tiernoe876f672020-02-13 14:34:48 +00001439 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001440 self.logger.warn('Error writing all configuration status, ns={}: {}'.format(nsr_id, e))
1441
quilesj63f90042020-01-17 09:53:55 +00001442 def _write_configuration_status(self, nsr_id: str, vca_index: int, status: str = None,
tierno51183952020-04-03 15:48:18 +00001443 element_under_configuration: str = None, element_type: str = None,
1444 other_update: dict = None):
quilesj3655ae02019-12-12 16:08:35 +00001445
1446 # self.logger.debug('_write_configuration_status(): vca_index={}, status={}'
1447 # .format(vca_index, status))
1448
1449 try:
1450 db_path = 'configurationStatus.{}.'.format(vca_index)
tierno51183952020-04-03 15:48:18 +00001451 db_dict = other_update or {}
quilesj63f90042020-01-17 09:53:55 +00001452 if status:
1453 db_dict[db_path + 'status'] = status
quilesj3655ae02019-12-12 16:08:35 +00001454 if element_under_configuration:
1455 db_dict[db_path + 'elementUnderConfiguration'] = element_under_configuration
1456 if element_type:
1457 db_dict[db_path + 'elementType'] = element_type
1458 self.update_db_2("nsrs", nsr_id, db_dict)
tiernoe876f672020-02-13 14:34:48 +00001459 except DbException as e:
quilesj3655ae02019-12-12 16:08:35 +00001460 self.logger.warn('Error writing configuration status={}, ns={}, vca_index={}: {}'
1461 .format(status, nsr_id, vca_index, e))
quilesj4cda56b2019-12-05 10:02:20 +00001462
tierno38089af2020-04-16 07:56:58 +00001463 async def _do_placement(self, logging_text, db_nslcmop, db_vnfrs):
1464 """
1465 Check and computes the placement, (vim account where to deploy). If it is decided by an external tool, it
1466 sends the request via kafka and wait until the result is wrote at database (nslcmops _admin.plca).
1467 Database is used because the result can be obtained from a different LCM worker in case of HA.
1468 :param logging_text: contains the prefix for logging, with the ns and nslcmop identifiers
1469 :param db_nslcmop: database content of nslcmop
1470 :param db_vnfrs: database content of vnfrs, indexed by member-vnf-index.
1471 :return: None. Modifies database vnfrs and parameter db_vnfr with the computed 'vim-account-id'
1472 """
1473 nslcmop_id = db_nslcmop['_id']
magnussonle9198bb2020-01-21 13:00:51 +01001474 placement_engine = deep_get(db_nslcmop, ('operationParams', 'placement-engine'))
1475 if placement_engine == "PLA":
tierno38089af2020-04-16 07:56:58 +00001476 self.logger.debug(logging_text + "Invoke and wait for placement optimization")
1477 await self.msg.aiowrite("pla", "get_placement", {'nslcmopId': nslcmop_id}, loop=self.loop)
magnussonle9198bb2020-01-21 13:00:51 +01001478 db_poll_interval = 5
tierno38089af2020-04-16 07:56:58 +00001479 wait = db_poll_interval * 10
magnussonle9198bb2020-01-21 13:00:51 +01001480 pla_result = None
1481 while not pla_result and wait >= 0:
1482 await asyncio.sleep(db_poll_interval)
1483 wait -= db_poll_interval
tierno38089af2020-04-16 07:56:58 +00001484 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
magnussonle9198bb2020-01-21 13:00:51 +01001485 pla_result = deep_get(db_nslcmop, ('_admin', 'pla'))
1486
1487 if not pla_result:
tierno38089af2020-04-16 07:56:58 +00001488 raise LcmException("Placement timeout for nslcmopId={}".format(nslcmop_id))
magnussonle9198bb2020-01-21 13:00:51 +01001489
1490 for pla_vnf in pla_result['vnf']:
1491 vnfr = db_vnfrs.get(pla_vnf['member-vnf-index'])
1492 if not pla_vnf.get('vimAccountId') or not vnfr:
1493 continue
1494 self.db.set_one("vnfrs", {"_id": vnfr["_id"]}, {"vim-account-id": pla_vnf['vimAccountId']})
tierno38089af2020-04-16 07:56:58 +00001495 # Modifies db_vnfrs
1496 vnfr["vim-account-id"] = pla_vnf['vimAccountId']
magnussonle9198bb2020-01-21 13:00:51 +01001497 return
1498
1499 def update_nsrs_with_pla_result(self, params):
1500 try:
1501 nslcmop_id = deep_get(params, ('placement', 'nslcmopId'))
1502 self.update_db_2("nslcmops", nslcmop_id, {"_admin.pla": params.get('placement')})
1503 except Exception as e:
1504 self.logger.warn('Update failed for nslcmop_id={}:{}'.format(nslcmop_id, e))
1505
tierno59d22d22018-09-25 18:10:19 +02001506 async def instantiate(self, nsr_id, nslcmop_id):
quilesj7e13aeb2019-10-08 13:34:55 +02001507 """
1508
1509 :param nsr_id: ns instance to deploy
1510 :param nslcmop_id: operation to run
1511 :return:
1512 """
kuused124bfe2019-06-18 12:09:24 +02001513
1514 # Try to lock HA task here
1515 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
1516 if not task_is_locked_by_me:
quilesj3655ae02019-12-12 16:08:35 +00001517 self.logger.debug('instantiate() task is not locked by me, ns={}'.format(nsr_id))
kuused124bfe2019-06-18 12:09:24 +02001518 return
1519
tierno59d22d22018-09-25 18:10:19 +02001520 logging_text = "Task ns={} instantiate={} ".format(nsr_id, nslcmop_id)
1521 self.logger.debug(logging_text + "Enter")
quilesj7e13aeb2019-10-08 13:34:55 +02001522
tierno59d22d22018-09-25 18:10:19 +02001523 # get all needed from database
quilesj7e13aeb2019-10-08 13:34:55 +02001524
1525 # database nsrs record
tierno59d22d22018-09-25 18:10:19 +02001526 db_nsr = None
quilesj7e13aeb2019-10-08 13:34:55 +02001527
1528 # database nslcmops record
tierno59d22d22018-09-25 18:10:19 +02001529 db_nslcmop = None
quilesj7e13aeb2019-10-08 13:34:55 +02001530
1531 # update operation on nsrs
tiernoe876f672020-02-13 14:34:48 +00001532 db_nsr_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001533 # update operation on nslcmops
tierno59d22d22018-09-25 18:10:19 +02001534 db_nslcmop_update = {}
quilesj7e13aeb2019-10-08 13:34:55 +02001535
tierno59d22d22018-09-25 18:10:19 +02001536 nslcmop_operation_state = None
quilesj7e13aeb2019-10-08 13:34:55 +02001537 db_vnfrs = {} # vnf's info indexed by member-index
1538 # n2vc_info = {}
tiernoe876f672020-02-13 14:34:48 +00001539 tasks_dict_info = {} # from task to info text
tierno59d22d22018-09-25 18:10:19 +02001540 exc = None
tiernoe876f672020-02-13 14:34:48 +00001541 error_list = []
1542 stage = ['Stage 1/5: preparation of the environment.', "Waiting for previous operations to terminate.", ""]
1543 # ^ stage, step, VIM progress
tierno59d22d22018-09-25 18:10:19 +02001544 try:
kuused124bfe2019-06-18 12:09:24 +02001545 # wait for any previous tasks in process
1546 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
1547
quilesj7e13aeb2019-10-08 13:34:55 +02001548 # STEP 0: Reading database (nslcmops, nsrs, nsds, vnfrs, vnfds)
tiernoe876f672020-02-13 14:34:48 +00001549 stage[1] = "Reading from database,"
quilesj4cda56b2019-12-05 10:02:20 +00001550 # nsState="BUILDING", currentOperation="INSTANTIATING", currentOperationID=nslcmop_id
tiernoe876f672020-02-13 14:34:48 +00001551 db_nsr_update["detailed-status"] = "creating"
1552 db_nsr_update["operational-status"] = "init"
quilesj4cda56b2019-12-05 10:02:20 +00001553 self._write_ns_status(
1554 nsr_id=nsr_id,
1555 ns_state="BUILDING",
1556 current_operation="INSTANTIATING",
tiernoe876f672020-02-13 14:34:48 +00001557 current_operation_id=nslcmop_id,
1558 other_update=db_nsr_update
1559 )
1560 self._write_op_status(
1561 op_id=nslcmop_id,
1562 stage=stage,
1563 queuePosition=0
quilesj4cda56b2019-12-05 10:02:20 +00001564 )
1565
quilesj7e13aeb2019-10-08 13:34:55 +02001566 # read from db: operation
tiernoe876f672020-02-13 14:34:48 +00001567 stage[1] = "Getting nslcmop={} from db".format(nslcmop_id)
tierno59d22d22018-09-25 18:10:19 +02001568 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
tierno744303e2020-01-13 16:46:31 +00001569 ns_params = db_nslcmop.get("operationParams")
1570 if ns_params and ns_params.get("timeout_ns_deploy"):
1571 timeout_ns_deploy = ns_params["timeout_ns_deploy"]
1572 else:
1573 timeout_ns_deploy = self.timeout.get("ns_deploy", self.timeout_ns_deploy)
quilesj7e13aeb2019-10-08 13:34:55 +02001574
1575 # read from db: ns
tiernoe876f672020-02-13 14:34:48 +00001576 stage[1] = "Getting nsr={} from db".format(nsr_id)
tierno59d22d22018-09-25 18:10:19 +02001577 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
quilesj7e13aeb2019-10-08 13:34:55 +02001578 # nsd is replicated into ns (no db read)
tierno59d22d22018-09-25 18:10:19 +02001579 nsd = db_nsr["nsd"]
tiernod8323042019-08-09 11:32:23 +00001580 # nsr_name = db_nsr["name"] # TODO short-name??
tierno47e86b52018-10-10 14:05:55 +02001581
quilesj7e13aeb2019-10-08 13:34:55 +02001582 # read from db: vnf's of this ns
tiernoe876f672020-02-13 14:34:48 +00001583 stage[1] = "Getting vnfrs from db"
1584 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001585 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
tierno27246d82018-09-27 15:59:09 +02001586
quilesj7e13aeb2019-10-08 13:34:55 +02001587 # read from db: vnfd's for every vnf
1588 db_vnfds_ref = {} # every vnfd data indexed by vnf name
1589 db_vnfds = {} # every vnfd data indexed by vnf id
1590 db_vnfds_index = {} # every vnfd data indexed by vnf member-index
1591
1592 # for each vnf in ns, read vnfd
tierno27246d82018-09-27 15:59:09 +02001593 for vnfr in db_vnfrs_list:
quilesj7e13aeb2019-10-08 13:34:55 +02001594 db_vnfrs[vnfr["member-vnf-index-ref"]] = vnfr # vnf's dict indexed by member-index: '1', '2', etc
1595 vnfd_id = vnfr["vnfd-id"] # vnfd uuid for this vnf
1596 vnfd_ref = vnfr["vnfd-ref"] # vnfd name for this vnf
1597 # if we haven't this vnfd, read it from db
tierno27246d82018-09-27 15:59:09 +02001598 if vnfd_id not in db_vnfds:
quilesj63f90042020-01-17 09:53:55 +00001599 # read from db
tiernoe876f672020-02-13 14:34:48 +00001600 stage[1] = "Getting vnfd={} id='{}' from db".format(vnfd_id, vnfd_ref)
1601 self.logger.debug(logging_text + stage[1])
tierno27246d82018-09-27 15:59:09 +02001602 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
tierno27246d82018-09-27 15:59:09 +02001603
quilesj7e13aeb2019-10-08 13:34:55 +02001604 # store vnfd
1605 db_vnfds_ref[vnfd_ref] = vnfd # vnfd's indexed by name
1606 db_vnfds[vnfd_id] = vnfd # vnfd's indexed by id
1607 db_vnfds_index[vnfr["member-vnf-index-ref"]] = db_vnfds[vnfd_id] # vnfd's indexed by member-index
1608
1609 # Get or generates the _admin.deployed.VCA list
tiernoe4f7e6c2018-11-27 14:55:30 +00001610 vca_deployed_list = None
1611 if db_nsr["_admin"].get("deployed"):
1612 vca_deployed_list = db_nsr["_admin"]["deployed"].get("VCA")
1613 if vca_deployed_list is None:
1614 vca_deployed_list = []
quilesj3655ae02019-12-12 16:08:35 +00001615 configuration_status_list = []
tiernoe4f7e6c2018-11-27 14:55:30 +00001616 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
quilesj3655ae02019-12-12 16:08:35 +00001617 db_nsr_update["configurationStatus"] = configuration_status_list
quilesj7e13aeb2019-10-08 13:34:55 +02001618 # add _admin.deployed.VCA to db_nsr dictionary, value=vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001619 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001620 elif isinstance(vca_deployed_list, dict):
1621 # maintain backward compatibility. Change a dict to list at database
1622 vca_deployed_list = list(vca_deployed_list.values())
1623 db_nsr_update["_admin.deployed.VCA"] = vca_deployed_list
tierno98ad6ea2019-05-30 17:16:28 +00001624 populate_dict(db_nsr, ("_admin", "deployed", "VCA"), vca_deployed_list)
tiernoe4f7e6c2018-11-27 14:55:30 +00001625
tierno6cf25f52019-09-12 09:33:40 +00001626 if not isinstance(deep_get(db_nsr, ("_admin", "deployed", "RO", "vnfd")), list):
tiernoa009e552019-01-30 16:45:44 +00001627 populate_dict(db_nsr, ("_admin", "deployed", "RO", "vnfd"), [])
1628 db_nsr_update["_admin.deployed.RO.vnfd"] = []
tierno59d22d22018-09-25 18:10:19 +02001629
tiernobaa51102018-12-14 13:16:18 +00001630 # set state to INSTANTIATED. When instantiated NBI will not delete directly
1631 db_nsr_update["_admin.nsState"] = "INSTANTIATED"
1632 self.update_db_2("nsrs", nsr_id, db_nsr_update)
quilesj3655ae02019-12-12 16:08:35 +00001633
1634 # n2vc_redesign STEP 2 Deploy Network Scenario
tiernoe876f672020-02-13 14:34:48 +00001635 stage[0] = 'Stage 2/5: deployment of KDUs, VMs and execution environments.'
quilesj3655ae02019-12-12 16:08:35 +00001636 self._write_op_status(
1637 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00001638 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00001639 )
1640
tiernoe876f672020-02-13 14:34:48 +00001641 stage[1] = "Deploying KDUs,"
1642 # self.logger.debug(logging_text + "Before deploy_kdus")
calvinosanch9f9c6f22019-11-04 13:37:39 +01001643 # Call to deploy_kdus in case exists the "vdu:kdu" param
tiernoe876f672020-02-13 14:34:48 +00001644 await self.deploy_kdus(
1645 logging_text=logging_text,
1646 nsr_id=nsr_id,
1647 nslcmop_id=nslcmop_id,
1648 db_vnfrs=db_vnfrs,
1649 db_vnfds=db_vnfds,
1650 task_instantiation_info=tasks_dict_info,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001651 )
tiernoe876f672020-02-13 14:34:48 +00001652
1653 stage[1] = "Getting VCA public key."
tiernod8323042019-08-09 11:32:23 +00001654 # n2vc_redesign STEP 1 Get VCA public ssh-key
1655 # feature 1429. Add n2vc public key to needed VMs
tierno3bedc9b2019-11-27 15:46:57 +00001656 n2vc_key = self.n2vc.get_public_key()
tiernoa5088192019-11-26 16:12:53 +00001657 n2vc_key_list = [n2vc_key]
1658 if self.vca_config.get("public_key"):
1659 n2vc_key_list.append(self.vca_config["public_key"])
tierno98ad6ea2019-05-30 17:16:28 +00001660
tiernoe876f672020-02-13 14:34:48 +00001661 stage[1] = "Deploying NS at VIM."
tiernod8323042019-08-09 11:32:23 +00001662 task_ro = asyncio.ensure_future(
quilesj7e13aeb2019-10-08 13:34:55 +02001663 self.instantiate_RO(
1664 logging_text=logging_text,
1665 nsr_id=nsr_id,
1666 nsd=nsd,
1667 db_nsr=db_nsr,
1668 db_nslcmop=db_nslcmop,
1669 db_vnfrs=db_vnfrs,
1670 db_vnfds_ref=db_vnfds_ref,
tiernoe876f672020-02-13 14:34:48 +00001671 n2vc_key_list=n2vc_key_list,
1672 stage=stage
tierno98ad6ea2019-05-30 17:16:28 +00001673 )
tiernod8323042019-08-09 11:32:23 +00001674 )
1675 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_RO", task_ro)
tiernoa2143262020-03-27 16:20:40 +00001676 tasks_dict_info[task_ro] = "Deploying at VIM"
tierno98ad6ea2019-05-30 17:16:28 +00001677
tiernod8323042019-08-09 11:32:23 +00001678 # n2vc_redesign STEP 3 to 6 Deploy N2VC
tiernoe876f672020-02-13 14:34:48 +00001679 stage[1] = "Deploying Execution Environments."
1680 self.logger.debug(logging_text + stage[1])
tierno98ad6ea2019-05-30 17:16:28 +00001681
tiernod8323042019-08-09 11:32:23 +00001682 nsi_id = None # TODO put nsi_id when this nsr belongs to a NSI
quilesj7e13aeb2019-10-08 13:34:55 +02001683 # get_iterable() returns a value from a dict or empty tuple if key does not exist
tierno98ad6ea2019-05-30 17:16:28 +00001684 for c_vnf in get_iterable(nsd, "constituent-vnfd"):
1685 vnfd_id = c_vnf["vnfd-id-ref"]
tierno98ad6ea2019-05-30 17:16:28 +00001686 vnfd = db_vnfds_ref[vnfd_id]
tiernod8323042019-08-09 11:32:23 +00001687 member_vnf_index = str(c_vnf["member-vnf-index"])
1688 db_vnfr = db_vnfrs[member_vnf_index]
1689 base_folder = vnfd["_admin"]["storage"]
1690 vdu_id = None
1691 vdu_index = 0
tierno98ad6ea2019-05-30 17:16:28 +00001692 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01001693 kdu_name = None
tierno59d22d22018-09-25 18:10:19 +02001694
tierno8a518872018-12-21 13:42:14 +00001695 # Get additional parameters
tiernod8323042019-08-09 11:32:23 +00001696 deploy_params = {}
1697 if db_vnfr.get("additionalParamsForVnf"):
tierno626e0152019-11-29 14:16:16 +00001698 deploy_params = self._format_additional_params(db_vnfr["additionalParamsForVnf"].copy())
tierno8a518872018-12-21 13:42:14 +00001699
tiernod8323042019-08-09 11:32:23 +00001700 descriptor_config = vnfd.get("vnf-configuration")
1701 if descriptor_config and descriptor_config.get("juju"):
quilesj7e13aeb2019-10-08 13:34:55 +02001702 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00001703 logging_text=logging_text + "member_vnf_index={} ".format(member_vnf_index),
quilesj7e13aeb2019-10-08 13:34:55 +02001704 db_nsr=db_nsr,
1705 db_vnfr=db_vnfr,
1706 nslcmop_id=nslcmop_id,
1707 nsr_id=nsr_id,
1708 nsi_id=nsi_id,
1709 vnfd_id=vnfd_id,
1710 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001711 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02001712 member_vnf_index=member_vnf_index,
1713 vdu_index=vdu_index,
1714 vdu_name=vdu_name,
1715 deploy_params=deploy_params,
1716 descriptor_config=descriptor_config,
1717 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00001718 task_instantiation_info=tasks_dict_info,
1719 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02001720 )
tierno59d22d22018-09-25 18:10:19 +02001721
1722 # Deploy charms for each VDU that supports one.
tiernod8323042019-08-09 11:32:23 +00001723 for vdud in get_iterable(vnfd, 'vdu'):
1724 vdu_id = vdud["id"]
1725 descriptor_config = vdud.get('vdu-configuration')
tierno626e0152019-11-29 14:16:16 +00001726 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
1727 if vdur.get("additionalParams"):
1728 deploy_params_vdu = self._format_additional_params(vdur["additionalParams"])
1729 else:
1730 deploy_params_vdu = deploy_params
tiernod8323042019-08-09 11:32:23 +00001731 if descriptor_config and descriptor_config.get("juju"):
1732 # look for vdu index in the db_vnfr["vdu"] section
1733 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
1734 # if vdur["vdu-id-ref"] == vdu_id:
1735 # break
1736 # else:
1737 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
1738 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
1739 # vdu_name = vdur.get("name")
1740 vdu_name = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01001741 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00001742 for vdu_index in range(int(vdud.get("count", 1))):
1743 # TODO vnfr_params["rw_mgmt_ip"] = vdur["ip-address"]
quilesj7e13aeb2019-10-08 13:34:55 +02001744 self._deploy_n2vc(
tiernoa54150d2019-12-05 17:15:10 +00001745 logging_text=logging_text + "member_vnf_index={}, vdu_id={}, vdu_index={} ".format(
1746 member_vnf_index, vdu_id, vdu_index),
quilesj7e13aeb2019-10-08 13:34:55 +02001747 db_nsr=db_nsr,
1748 db_vnfr=db_vnfr,
1749 nslcmop_id=nslcmop_id,
1750 nsr_id=nsr_id,
1751 nsi_id=nsi_id,
1752 vnfd_id=vnfd_id,
1753 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001754 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02001755 member_vnf_index=member_vnf_index,
1756 vdu_index=vdu_index,
1757 vdu_name=vdu_name,
tierno626e0152019-11-29 14:16:16 +00001758 deploy_params=deploy_params_vdu,
quilesj7e13aeb2019-10-08 13:34:55 +02001759 descriptor_config=descriptor_config,
1760 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00001761 task_instantiation_info=tasks_dict_info,
1762 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02001763 )
calvinosanch9f9c6f22019-11-04 13:37:39 +01001764 for kdud in get_iterable(vnfd, 'kdu'):
1765 kdu_name = kdud["name"]
1766 descriptor_config = kdud.get('kdu-configuration')
1767 if descriptor_config and descriptor_config.get("juju"):
1768 vdu_id = None
1769 vdu_index = 0
1770 vdu_name = None
1771 # look for vdu index in the db_vnfr["vdu"] section
1772 # for vdur_index, vdur in enumerate(db_vnfr["vdur"]):
1773 # if vdur["vdu-id-ref"] == vdu_id:
1774 # break
1775 # else:
1776 # raise LcmException("Mismatch vdu_id={} not found in the vnfr['vdur'] list for "
1777 # "member_vnf_index={}".format(vdu_id, member_vnf_index))
1778 # vdu_name = vdur.get("name")
1779 # vdu_name = None
tierno59d22d22018-09-25 18:10:19 +02001780
calvinosanch9f9c6f22019-11-04 13:37:39 +01001781 self._deploy_n2vc(
1782 logging_text=logging_text,
1783 db_nsr=db_nsr,
1784 db_vnfr=db_vnfr,
1785 nslcmop_id=nslcmop_id,
1786 nsr_id=nsr_id,
1787 nsi_id=nsi_id,
1788 vnfd_id=vnfd_id,
1789 vdu_id=vdu_id,
1790 kdu_name=kdu_name,
1791 member_vnf_index=member_vnf_index,
1792 vdu_index=vdu_index,
1793 vdu_name=vdu_name,
1794 deploy_params=deploy_params,
1795 descriptor_config=descriptor_config,
1796 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00001797 task_instantiation_info=tasks_dict_info,
1798 stage=stage
calvinosanch9f9c6f22019-11-04 13:37:39 +01001799 )
tierno59d22d22018-09-25 18:10:19 +02001800
tierno1b633412019-02-25 16:48:23 +00001801 # Check if this NS has a charm configuration
tiernod8323042019-08-09 11:32:23 +00001802 descriptor_config = nsd.get("ns-configuration")
1803 if descriptor_config and descriptor_config.get("juju"):
1804 vnfd_id = None
1805 db_vnfr = None
1806 member_vnf_index = None
1807 vdu_id = None
calvinosanch9f9c6f22019-11-04 13:37:39 +01001808 kdu_name = None
tiernod8323042019-08-09 11:32:23 +00001809 vdu_index = 0
1810 vdu_name = None
tierno1b633412019-02-25 16:48:23 +00001811
tiernod8323042019-08-09 11:32:23 +00001812 # Get additional parameters
1813 deploy_params = {}
1814 if db_nsr.get("additionalParamsForNs"):
tierno626e0152019-11-29 14:16:16 +00001815 deploy_params = self._format_additional_params(db_nsr["additionalParamsForNs"].copy())
tiernod8323042019-08-09 11:32:23 +00001816 base_folder = nsd["_admin"]["storage"]
quilesj7e13aeb2019-10-08 13:34:55 +02001817 self._deploy_n2vc(
1818 logging_text=logging_text,
1819 db_nsr=db_nsr,
1820 db_vnfr=db_vnfr,
1821 nslcmop_id=nslcmop_id,
1822 nsr_id=nsr_id,
1823 nsi_id=nsi_id,
1824 vnfd_id=vnfd_id,
1825 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01001826 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02001827 member_vnf_index=member_vnf_index,
1828 vdu_index=vdu_index,
1829 vdu_name=vdu_name,
1830 deploy_params=deploy_params,
1831 descriptor_config=descriptor_config,
1832 base_folder=base_folder,
tierno8e2fae72020-04-01 15:21:15 +00001833 task_instantiation_info=tasks_dict_info,
1834 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02001835 )
tierno1b633412019-02-25 16:48:23 +00001836
tiernoe876f672020-02-13 14:34:48 +00001837 # rest of staff will be done at finally
tierno1b633412019-02-25 16:48:23 +00001838
tiernoe876f672020-02-13 14:34:48 +00001839 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
1840 self.logger.error(logging_text + "Exit Exception while '{}': {}".format(stage[1], e))
tierno59d22d22018-09-25 18:10:19 +02001841 exc = e
1842 except asyncio.CancelledError:
tiernoe876f672020-02-13 14:34:48 +00001843 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
tierno59d22d22018-09-25 18:10:19 +02001844 exc = "Operation was cancelled"
1845 except Exception as e:
1846 exc = traceback.format_exc()
tiernoe876f672020-02-13 14:34:48 +00001847 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
tierno59d22d22018-09-25 18:10:19 +02001848 finally:
1849 if exc:
tiernoe876f672020-02-13 14:34:48 +00001850 error_list.append(str(exc))
tiernobaa51102018-12-14 13:16:18 +00001851 try:
tiernoe876f672020-02-13 14:34:48 +00001852 # wait for pending tasks
1853 if tasks_dict_info:
1854 stage[1] = "Waiting for instantiate pending tasks."
1855 self.logger.debug(logging_text + stage[1])
1856 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_deploy,
1857 stage, nslcmop_id, nsr_id=nsr_id)
1858 stage[1] = stage[2] = ""
1859 except asyncio.CancelledError:
1860 error_list.append("Cancelled")
1861 # TODO cancel all tasks
1862 except Exception as exc:
1863 error_list.append(str(exc))
quilesj4cda56b2019-12-05 10:02:20 +00001864
tiernoe876f672020-02-13 14:34:48 +00001865 # update operation-status
1866 db_nsr_update["operational-status"] = "running"
1867 # let's begin with VCA 'configured' status (later we can change it)
1868 db_nsr_update["config-status"] = "configured"
1869 for task, task_name in tasks_dict_info.items():
1870 if not task.done() or task.cancelled() or task.exception():
1871 if task_name.startswith(self.task_name_deploy_vca):
1872 # A N2VC task is pending
1873 db_nsr_update["config-status"] = "failed"
quilesj4cda56b2019-12-05 10:02:20 +00001874 else:
tiernoe876f672020-02-13 14:34:48 +00001875 # RO or KDU task is pending
1876 db_nsr_update["operational-status"] = "failed"
quilesj3655ae02019-12-12 16:08:35 +00001877
tiernoe876f672020-02-13 14:34:48 +00001878 # update status at database
1879 if error_list:
tiernoa2143262020-03-27 16:20:40 +00001880 error_detail = ". ".join(error_list)
tiernoe876f672020-02-13 14:34:48 +00001881 self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00001882 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
1883 error_description_nsr = 'Operation: INSTANTIATING.{}, Stage {}'.format(nslcmop_id, stage[0])
quilesj3655ae02019-12-12 16:08:35 +00001884
tiernoa2143262020-03-27 16:20:40 +00001885 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00001886 db_nslcmop_update["detailed-status"] = error_detail
1887 nslcmop_operation_state = "FAILED"
1888 ns_state = "BROKEN"
1889 else:
tiernoa2143262020-03-27 16:20:40 +00001890 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00001891 error_description_nsr = error_description_nslcmop = None
1892 ns_state = "READY"
1893 db_nsr_update["detailed-status"] = "Done"
1894 db_nslcmop_update["detailed-status"] = "Done"
1895 nslcmop_operation_state = "COMPLETED"
quilesj4cda56b2019-12-05 10:02:20 +00001896
tiernoe876f672020-02-13 14:34:48 +00001897 if db_nsr:
1898 self._write_ns_status(
1899 nsr_id=nsr_id,
1900 ns_state=ns_state,
1901 current_operation="IDLE",
1902 current_operation_id=None,
1903 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00001904 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00001905 other_update=db_nsr_update
1906 )
1907 if db_nslcmop:
1908 self._write_op_status(
1909 op_id=nslcmop_id,
1910 stage="",
1911 error_message=error_description_nslcmop,
1912 operation_state=nslcmop_operation_state,
1913 other_update=db_nslcmop_update,
1914 )
quilesj3655ae02019-12-12 16:08:35 +00001915
tierno59d22d22018-09-25 18:10:19 +02001916 if nslcmop_operation_state:
1917 try:
1918 await self.msg.aiowrite("ns", "instantiated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00001919 "operationState": nslcmop_operation_state},
1920 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02001921 except Exception as e:
1922 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
1923
1924 self.logger.debug(logging_text + "Exit")
1925 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_instantiate")
1926
quilesj63f90042020-01-17 09:53:55 +00001927 async def _add_vca_relations(self, logging_text, nsr_id, vca_index: int, timeout: int = 3600) -> bool:
1928
1929 # steps:
1930 # 1. find all relations for this VCA
1931 # 2. wait for other peers related
1932 # 3. add relations
1933
1934 try:
1935
1936 # STEP 1: find all relations for this VCA
1937
1938 # read nsr record
1939 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1940
1941 # this VCA data
1942 my_vca = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))[vca_index]
1943
1944 # read all ns-configuration relations
1945 ns_relations = list()
1946 db_ns_relations = deep_get(db_nsr, ('nsd', 'ns-configuration', 'relation'))
1947 if db_ns_relations:
1948 for r in db_ns_relations:
1949 # check if this VCA is in the relation
1950 if my_vca.get('member-vnf-index') in\
1951 (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
1952 ns_relations.append(r)
1953
1954 # read all vnf-configuration relations
1955 vnf_relations = list()
1956 db_vnfd_list = db_nsr.get('vnfd-id')
1957 if db_vnfd_list:
1958 for vnfd in db_vnfd_list:
1959 db_vnfd = self.db.get_one("vnfds", {"_id": vnfd})
1960 db_vnf_relations = deep_get(db_vnfd, ('vnf-configuration', 'relation'))
1961 if db_vnf_relations:
1962 for r in db_vnf_relations:
1963 # check if this VCA is in the relation
1964 if my_vca.get('vdu_id') in (r.get('entities')[0].get('id'), r.get('entities')[1].get('id')):
1965 vnf_relations.append(r)
1966
1967 # if no relations, terminate
1968 if not ns_relations and not vnf_relations:
1969 self.logger.debug(logging_text + ' No relations')
1970 return True
1971
1972 self.logger.debug(logging_text + ' adding relations\n {}\n {}'.format(ns_relations, vnf_relations))
1973
1974 # add all relations
1975 start = time()
1976 while True:
1977 # check timeout
1978 now = time()
1979 if now - start >= timeout:
1980 self.logger.error(logging_text + ' : timeout adding relations')
1981 return False
1982
1983 # reload nsr from database (we need to update record: _admin.deloyed.VCA)
1984 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
1985
1986 # for each defined NS relation, find the VCA's related
1987 for r in ns_relations:
1988 from_vca_ee_id = None
1989 to_vca_ee_id = None
1990 from_vca_endpoint = None
1991 to_vca_endpoint = None
1992 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
1993 for vca in vca_list:
1994 if vca.get('member-vnf-index') == r.get('entities')[0].get('id') \
1995 and vca.get('config_sw_installed'):
1996 from_vca_ee_id = vca.get('ee_id')
1997 from_vca_endpoint = r.get('entities')[0].get('endpoint')
1998 if vca.get('member-vnf-index') == r.get('entities')[1].get('id') \
1999 and vca.get('config_sw_installed'):
2000 to_vca_ee_id = vca.get('ee_id')
2001 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2002 if from_vca_ee_id and to_vca_ee_id:
2003 # add relation
2004 await self.n2vc.add_relation(
2005 ee_id_1=from_vca_ee_id,
2006 ee_id_2=to_vca_ee_id,
2007 endpoint_1=from_vca_endpoint,
2008 endpoint_2=to_vca_endpoint)
2009 # remove entry from relations list
2010 ns_relations.remove(r)
2011 else:
2012 # check failed peers
2013 try:
2014 vca_status_list = db_nsr.get('configurationStatus')
2015 if vca_status_list:
2016 for i in range(len(vca_list)):
2017 vca = vca_list[i]
2018 vca_status = vca_status_list[i]
2019 if vca.get('member-vnf-index') == r.get('entities')[0].get('id'):
2020 if vca_status.get('status') == 'BROKEN':
2021 # peer broken: remove relation from list
2022 ns_relations.remove(r)
2023 if vca.get('member-vnf-index') == r.get('entities')[1].get('id'):
2024 if vca_status.get('status') == 'BROKEN':
2025 # peer broken: remove relation from list
2026 ns_relations.remove(r)
2027 except Exception:
2028 # ignore
2029 pass
2030
2031 # for each defined VNF relation, find the VCA's related
2032 for r in vnf_relations:
2033 from_vca_ee_id = None
2034 to_vca_ee_id = None
2035 from_vca_endpoint = None
2036 to_vca_endpoint = None
2037 vca_list = deep_get(db_nsr, ('_admin', 'deployed', 'VCA'))
2038 for vca in vca_list:
2039 if vca.get('vdu_id') == r.get('entities')[0].get('id') and vca.get('config_sw_installed'):
2040 from_vca_ee_id = vca.get('ee_id')
2041 from_vca_endpoint = r.get('entities')[0].get('endpoint')
2042 if vca.get('vdu_id') == r.get('entities')[1].get('id') and vca.get('config_sw_installed'):
2043 to_vca_ee_id = vca.get('ee_id')
2044 to_vca_endpoint = r.get('entities')[1].get('endpoint')
2045 if from_vca_ee_id and to_vca_ee_id:
2046 # add relation
2047 await self.n2vc.add_relation(
2048 ee_id_1=from_vca_ee_id,
2049 ee_id_2=to_vca_ee_id,
2050 endpoint_1=from_vca_endpoint,
2051 endpoint_2=to_vca_endpoint)
2052 # remove entry from relations list
2053 vnf_relations.remove(r)
2054 else:
2055 # check failed peers
2056 try:
2057 vca_status_list = db_nsr.get('configurationStatus')
2058 if vca_status_list:
2059 for i in range(len(vca_list)):
2060 vca = vca_list[i]
2061 vca_status = vca_status_list[i]
2062 if vca.get('vdu_id') == r.get('entities')[0].get('id'):
2063 if vca_status.get('status') == 'BROKEN':
2064 # peer broken: remove relation from list
2065 ns_relations.remove(r)
2066 if vca.get('vdu_id') == r.get('entities')[1].get('id'):
2067 if vca_status.get('status') == 'BROKEN':
2068 # peer broken: remove relation from list
2069 ns_relations.remove(r)
2070 except Exception:
2071 # ignore
2072 pass
2073
2074 # wait for next try
2075 await asyncio.sleep(5.0)
2076
2077 if not ns_relations and not vnf_relations:
2078 self.logger.debug('Relations added')
2079 break
2080
2081 return True
2082
2083 except Exception as e:
2084 self.logger.warn(logging_text + ' ERROR adding relations: {}'.format(e))
2085 return False
2086
tiernoe876f672020-02-13 14:34:48 +00002087 async def deploy_kdus(self, logging_text, nsr_id, nslcmop_id, db_vnfrs, db_vnfds, task_instantiation_info):
calvinosanch9f9c6f22019-11-04 13:37:39 +01002088 # Launch kdus if present in the descriptor
tierno626e0152019-11-29 14:16:16 +00002089
2090 k8scluster_id_2_uuic = {"helm-chart": {}, "juju-bundle": {}}
2091
2092 def _get_cluster_id(cluster_id, cluster_type):
2093 nonlocal k8scluster_id_2_uuic
2094 if cluster_id in k8scluster_id_2_uuic[cluster_type]:
2095 return k8scluster_id_2_uuic[cluster_type][cluster_id]
2096
2097 db_k8scluster = self.db.get_one("k8sclusters", {"_id": cluster_id}, fail_on_empty=False)
2098 if not db_k8scluster:
2099 raise LcmException("K8s cluster {} cannot be found".format(cluster_id))
2100 k8s_id = deep_get(db_k8scluster, ("_admin", cluster_type, "id"))
2101 if not k8s_id:
2102 raise LcmException("K8s cluster '{}' has not been initilized for '{}'".format(cluster_id, cluster_type))
2103 k8scluster_id_2_uuic[cluster_type][cluster_id] = k8s_id
2104 return k8s_id
2105
2106 logging_text += "Deploy kdus: "
tiernoe876f672020-02-13 14:34:48 +00002107 step = ""
calvinosanch9f9c6f22019-11-04 13:37:39 +01002108 try:
tierno626e0152019-11-29 14:16:16 +00002109 db_nsr_update = {"_admin.deployed.K8s": []}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002110 self.update_db_2("nsrs", nsr_id, db_nsr_update)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002111
tierno626e0152019-11-29 14:16:16 +00002112 index = 0
tiernoe876f672020-02-13 14:34:48 +00002113 updated_cluster_list = []
2114
tierno626e0152019-11-29 14:16:16 +00002115 for vnfr_data in db_vnfrs.values():
2116 for kdur in get_iterable(vnfr_data, "kdur"):
2117 desc_params = self._format_additional_params(kdur.get("additionalParams"))
quilesjacde94f2020-01-23 10:07:08 +00002118 vnfd_id = vnfr_data.get('vnfd-id')
tierno626e0152019-11-29 14:16:16 +00002119 if kdur.get("helm-chart"):
2120 kdumodel = kdur["helm-chart"]
tiernoe876f672020-02-13 14:34:48 +00002121 k8sclustertype = "helm-chart"
tierno626e0152019-11-29 14:16:16 +00002122 elif kdur.get("juju-bundle"):
2123 kdumodel = kdur["juju-bundle"]
tiernoe876f672020-02-13 14:34:48 +00002124 k8sclustertype = "juju-bundle"
tierno626e0152019-11-29 14:16:16 +00002125 else:
tiernoe876f672020-02-13 14:34:48 +00002126 raise LcmException("kdu type for kdu='{}.{}' is neither helm-chart nor "
2127 "juju-bundle. Maybe an old NBI version is running".
2128 format(vnfr_data["member-vnf-index-ref"], kdur["kdu-name"]))
quilesjacde94f2020-01-23 10:07:08 +00002129 # check if kdumodel is a file and exists
2130 try:
tierno51183952020-04-03 15:48:18 +00002131 storage = deep_get(db_vnfds.get(vnfd_id), ('_admin', 'storage'))
2132 if storage and storage.get('pkg-dir'): # may be not present if vnfd has not artifacts
2133 # path format: /vnfdid/pkkdir/helm-charts|juju-bundles/kdumodel
2134 filename = '{}/{}/{}s/{}'.format(storage["folder"], storage["'pkg-dir"], k8sclustertype,
2135 kdumodel)
2136 if self.fs.file_exists(filename, mode='file') or self.fs.file_exists(filename, mode='dir'):
2137 kdumodel = self.fs.path + filename
2138 except (asyncio.TimeoutError, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00002139 raise
2140 except Exception: # it is not a file
quilesjacde94f2020-01-23 10:07:08 +00002141 pass
lloretgallegedc5f332020-02-20 11:50:50 +01002142
tiernoe876f672020-02-13 14:34:48 +00002143 k8s_cluster_id = kdur["k8s-cluster"]["id"]
2144 step = "Synchronize repos for k8s cluster '{}'".format(k8s_cluster_id)
2145 cluster_uuid = _get_cluster_id(k8s_cluster_id, k8sclustertype)
lloretgallegedc5f332020-02-20 11:50:50 +01002146
tiernoe876f672020-02-13 14:34:48 +00002147 if k8sclustertype == "helm-chart" and cluster_uuid not in updated_cluster_list:
2148 del_repo_list, added_repo_dict = await asyncio.ensure_future(
2149 self.k8sclusterhelm.synchronize_repos(cluster_uuid=cluster_uuid))
2150 if del_repo_list or added_repo_dict:
2151 unset = {'_admin.helm_charts_added.' + item: None for item in del_repo_list}
2152 updated = {'_admin.helm_charts_added.' +
2153 item: name for item, name in added_repo_dict.items()}
2154 self.logger.debug(logging_text + "repos synchronized on k8s cluster '{}' to_delete: {}, "
2155 "to_add: {}".format(k8s_cluster_id, del_repo_list,
2156 added_repo_dict))
2157 self.db.set_one("k8sclusters", {"_id": k8s_cluster_id}, updated, unset=unset)
2158 updated_cluster_list.append(cluster_uuid)
lloretgallegedc5f332020-02-20 11:50:50 +01002159
tiernoe876f672020-02-13 14:34:48 +00002160 step = "Instantiating KDU {}.{} in k8s cluster {}".format(vnfr_data["member-vnf-index-ref"],
2161 kdur["kdu-name"], k8s_cluster_id)
tierno626e0152019-11-29 14:16:16 +00002162
tierno067e04a2020-03-31 12:53:13 +00002163 k8s_instace_info = {"kdu-instance": None,
2164 "k8scluster-uuid": cluster_uuid,
tierno626e0152019-11-29 14:16:16 +00002165 "k8scluster-type": k8sclustertype,
tierno067e04a2020-03-31 12:53:13 +00002166 "member-vnf-index": vnfr_data["member-vnf-index-ref"],
2167 "kdu-name": kdur["kdu-name"],
2168 "kdu-model": kdumodel}
tierno626e0152019-11-29 14:16:16 +00002169 db_nsr_update["_admin.deployed.K8s.{}".format(index)] = k8s_instace_info
2170 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tierno626e0152019-11-29 14:16:16 +00002171
tiernoe876f672020-02-13 14:34:48 +00002172 db_dict = {"collection": "nsrs",
2173 "filter": {"_id": nsr_id},
2174 "path": "_admin.deployed.K8s.{}".format(index)}
lloretgallegedc5f332020-02-20 11:50:50 +01002175
tiernoa2143262020-03-27 16:20:40 +00002176 task = asyncio.ensure_future(
2177 self.k8scluster_map[k8sclustertype].install(cluster_uuid=cluster_uuid, kdu_model=kdumodel,
2178 atomic=True, params=desc_params,
2179 db_dict=db_dict, timeout=600,
2180 kdu_name=kdur["kdu-name"]))
Adam Israelbaacc302019-12-01 12:41:39 -05002181
tiernoe876f672020-02-13 14:34:48 +00002182 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_KDU-{}".format(index), task)
tiernoa2143262020-03-27 16:20:40 +00002183 task_instantiation_info[task] = "Deploying KDU {}".format(kdur["kdu-name"])
tiernoe876f672020-02-13 14:34:48 +00002184
tierno626e0152019-11-29 14:16:16 +00002185 index += 1
quilesjdd799ac2020-01-23 16:31:11 +00002186
tiernoe876f672020-02-13 14:34:48 +00002187 except (LcmException, asyncio.CancelledError):
2188 raise
calvinosanch9f9c6f22019-11-04 13:37:39 +01002189 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00002190 msg = "Exception {} while {}: {}".format(type(e).__name__, step, e)
2191 if isinstance(e, (N2VCException, DbException)):
2192 self.logger.error(logging_text + msg)
2193 else:
2194 self.logger.critical(logging_text + msg, exc_info=True)
quilesjdd799ac2020-01-23 16:31:11 +00002195 raise LcmException(msg)
calvinosanch9f9c6f22019-11-04 13:37:39 +01002196 finally:
calvinosanch9f9c6f22019-11-04 13:37:39 +01002197 if db_nsr_update:
2198 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoda6fb102019-11-23 00:36:52 +00002199
quilesj7e13aeb2019-10-08 13:34:55 +02002200 def _deploy_n2vc(self, logging_text, db_nsr, db_vnfr, nslcmop_id, nsr_id, nsi_id, vnfd_id, vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002201 kdu_name, member_vnf_index, vdu_index, vdu_name, deploy_params, descriptor_config,
tiernoe876f672020-02-13 14:34:48 +00002202 base_folder, task_instantiation_info, stage):
quilesj7e13aeb2019-10-08 13:34:55 +02002203 # launch instantiate_N2VC in a asyncio task and register task object
2204 # Look where information of this charm is at database <nsrs>._admin.deployed.VCA
2205 # if not found, create one entry and update database
tiernobaa51102018-12-14 13:16:18 +00002206
quilesj7e13aeb2019-10-08 13:34:55 +02002207 # fill db_nsr._admin.deployed.VCA.<index>
2208 vca_index = -1
2209 for vca_index, vca_deployed in enumerate(db_nsr["_admin"]["deployed"]["VCA"]):
2210 if not vca_deployed:
2211 continue
2212 if vca_deployed.get("member-vnf-index") == member_vnf_index and \
2213 vca_deployed.get("vdu_id") == vdu_id and \
calvinosanch9f9c6f22019-11-04 13:37:39 +01002214 vca_deployed.get("kdu_name") == kdu_name and \
quilesj7e13aeb2019-10-08 13:34:55 +02002215 vca_deployed.get("vdu_count_index", 0) == vdu_index:
2216 break
2217 else:
2218 # not found, create one.
2219 vca_deployed = {
2220 "member-vnf-index": member_vnf_index,
2221 "vdu_id": vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002222 "kdu_name": kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002223 "vdu_count_index": vdu_index,
2224 "operational-status": "init", # TODO revise
2225 "detailed-status": "", # TODO revise
2226 "step": "initial-deploy", # TODO revise
2227 "vnfd_id": vnfd_id,
2228 "vdu_name": vdu_name,
2229 }
2230 vca_index += 1
quilesj3655ae02019-12-12 16:08:35 +00002231
2232 # create VCA and configurationStatus in db
2233 db_dict = {
2234 "_admin.deployed.VCA.{}".format(vca_index): vca_deployed,
2235 "configurationStatus.{}".format(vca_index): dict()
2236 }
2237 self.update_db_2("nsrs", nsr_id, db_dict)
2238
quilesj7e13aeb2019-10-08 13:34:55 +02002239 db_nsr["_admin"]["deployed"]["VCA"].append(vca_deployed)
2240
2241 # Launch task
2242 task_n2vc = asyncio.ensure_future(
2243 self.instantiate_N2VC(
2244 logging_text=logging_text,
2245 vca_index=vca_index,
2246 nsi_id=nsi_id,
2247 db_nsr=db_nsr,
2248 db_vnfr=db_vnfr,
2249 vdu_id=vdu_id,
calvinosanch9f9c6f22019-11-04 13:37:39 +01002250 kdu_name=kdu_name,
quilesj7e13aeb2019-10-08 13:34:55 +02002251 vdu_index=vdu_index,
2252 deploy_params=deploy_params,
2253 config_descriptor=descriptor_config,
2254 base_folder=base_folder,
tiernoe876f672020-02-13 14:34:48 +00002255 nslcmop_id=nslcmop_id,
2256 stage=stage
quilesj7e13aeb2019-10-08 13:34:55 +02002257 )
2258 )
2259 self.lcm_tasks.register("ns", nsr_id, nslcmop_id, "instantiate_N2VC-{}".format(vca_index), task_n2vc)
tiernoe876f672020-02-13 14:34:48 +00002260 task_instantiation_info[task_n2vc] = self.task_name_deploy_vca + " {}.{}".format(
2261 member_vnf_index or "", vdu_id or "")
tiernobaa51102018-12-14 13:16:18 +00002262
kuuse0ca67472019-05-13 15:59:27 +02002263 # Check if this VNFD has a configured terminate action
2264 def _has_terminate_config_primitive(self, vnfd):
2265 vnf_config = vnfd.get("vnf-configuration")
2266 if vnf_config and vnf_config.get("terminate-config-primitive"):
2267 return True
2268 else:
2269 return False
2270
tiernoc9556972019-07-05 15:25:25 +00002271 @staticmethod
2272 def _get_terminate_config_primitive_seq_list(vnfd):
2273 """ Get a numerically sorted list of the sequences for this VNFD's terminate action """
kuuse0ca67472019-05-13 15:59:27 +02002274 # No need to check for existing primitive twice, already done before
2275 vnf_config = vnfd.get("vnf-configuration")
2276 seq_list = vnf_config.get("terminate-config-primitive")
2277 # Get all 'seq' tags in seq_list, order sequences numerically, ascending.
2278 seq_list_sorted = sorted(seq_list, key=lambda x: int(x['seq']))
2279 return seq_list_sorted
2280
2281 @staticmethod
2282 def _create_nslcmop(nsr_id, operation, params):
2283 """
2284 Creates a ns-lcm-opp content to be stored at database.
2285 :param nsr_id: internal id of the instance
2286 :param operation: instantiate, terminate, scale, action, ...
2287 :param params: user parameters for the operation
2288 :return: dictionary following SOL005 format
2289 """
2290 # Raise exception if invalid arguments
2291 if not (nsr_id and operation and params):
2292 raise LcmException(
2293 "Parameters 'nsr_id', 'operation' and 'params' needed to create primitive not provided")
2294 now = time()
2295 _id = str(uuid4())
2296 nslcmop = {
2297 "id": _id,
2298 "_id": _id,
2299 # COMPLETED,PARTIALLY_COMPLETED,FAILED_TEMP,FAILED,ROLLING_BACK,ROLLED_BACK
2300 "operationState": "PROCESSING",
2301 "statusEnteredTime": now,
2302 "nsInstanceId": nsr_id,
2303 "lcmOperationType": operation,
2304 "startTime": now,
2305 "isAutomaticInvocation": False,
2306 "operationParams": params,
2307 "isCancelPending": False,
2308 "links": {
2309 "self": "/osm/nslcm/v1/ns_lcm_op_occs/" + _id,
2310 "nsInstance": "/osm/nslcm/v1/ns_instances/" + nsr_id,
2311 }
2312 }
2313 return nslcmop
2314
calvinosanch9f9c6f22019-11-04 13:37:39 +01002315 def _format_additional_params(self, params):
tierno626e0152019-11-29 14:16:16 +00002316 params = params or {}
calvinosanch9f9c6f22019-11-04 13:37:39 +01002317 for key, value in params.items():
2318 if str(value).startswith("!!yaml "):
2319 params[key] = yaml.safe_load(value[7:])
calvinosanch9f9c6f22019-11-04 13:37:39 +01002320 return params
2321
kuuse8b998e42019-07-30 15:22:16 +02002322 def _get_terminate_primitive_params(self, seq, vnf_index):
2323 primitive = seq.get('name')
2324 primitive_params = {}
2325 params = {
2326 "member_vnf_index": vnf_index,
2327 "primitive": primitive,
2328 "primitive_params": primitive_params,
2329 }
2330 desc_params = {}
2331 return self._map_primitive_params(seq, params, desc_params)
2332
kuuseac3a8882019-10-03 10:48:06 +02002333 # sub-operations
2334
tierno51183952020-04-03 15:48:18 +00002335 def _retry_or_skip_suboperation(self, db_nslcmop, op_index):
2336 op = deep_get(db_nslcmop, ('_admin', 'operations'), [])[op_index]
2337 if op.get('operationState') == 'COMPLETED':
kuuseac3a8882019-10-03 10:48:06 +02002338 # b. Skip sub-operation
2339 # _ns_execute_primitive() or RO.create_action() will NOT be executed
2340 return self.SUBOPERATION_STATUS_SKIP
2341 else:
2342 # c. Reintent executing sub-operation
2343 # The sub-operation exists, and operationState != 'COMPLETED'
2344 # Update operationState = 'PROCESSING' to indicate a reintent.
2345 operationState = 'PROCESSING'
2346 detailed_status = 'In progress'
2347 self._update_suboperation_status(
2348 db_nslcmop, op_index, operationState, detailed_status)
2349 # Return the sub-operation index
2350 # _ns_execute_primitive() or RO.create_action() will be called from scale()
2351 # with arguments extracted from the sub-operation
2352 return op_index
2353
2354 # Find a sub-operation where all keys in a matching dictionary must match
2355 # Returns the index of the matching sub-operation, or SUBOPERATION_STATUS_NOT_FOUND if no match
2356 def _find_suboperation(self, db_nslcmop, match):
2357 if (db_nslcmop and match):
2358 op_list = db_nslcmop.get('_admin', {}).get('operations', [])
2359 for i, op in enumerate(op_list):
2360 if all(op.get(k) == match[k] for k in match):
2361 return i
2362 return self.SUBOPERATION_STATUS_NOT_FOUND
2363
2364 # Update status for a sub-operation given its index
2365 def _update_suboperation_status(self, db_nslcmop, op_index, operationState, detailed_status):
2366 # Update DB for HA tasks
2367 q_filter = {'_id': db_nslcmop['_id']}
2368 update_dict = {'_admin.operations.{}.operationState'.format(op_index): operationState,
2369 '_admin.operations.{}.detailed-status'.format(op_index): detailed_status}
2370 self.db.set_one("nslcmops",
2371 q_filter=q_filter,
2372 update_dict=update_dict,
2373 fail_on_empty=False)
2374
2375 # Add sub-operation, return the index of the added sub-operation
2376 # Optionally, set operationState, detailed-status, and operationType
2377 # Status and type are currently set for 'scale' sub-operations:
2378 # 'operationState' : 'PROCESSING' | 'COMPLETED' | 'FAILED'
2379 # 'detailed-status' : status message
2380 # 'operationType': may be any type, in the case of scaling: 'PRE-SCALE' | 'POST-SCALE'
2381 # Status and operation type are currently only used for 'scale', but NOT for 'terminate' sub-operations.
quilesj7e13aeb2019-10-08 13:34:55 +02002382 def _add_suboperation(self, db_nslcmop, vnf_index, vdu_id, vdu_count_index, vdu_name, primitive,
2383 mapped_primitive_params, operationState=None, detailed_status=None, operationType=None,
kuuseac3a8882019-10-03 10:48:06 +02002384 RO_nsr_id=None, RO_scaling_info=None):
tiernoe876f672020-02-13 14:34:48 +00002385 if not db_nslcmop:
kuuseac3a8882019-10-03 10:48:06 +02002386 return self.SUBOPERATION_STATUS_NOT_FOUND
2387 # Get the "_admin.operations" list, if it exists
2388 db_nslcmop_admin = db_nslcmop.get('_admin', {})
2389 op_list = db_nslcmop_admin.get('operations')
2390 # Create or append to the "_admin.operations" list
kuuse8b998e42019-07-30 15:22:16 +02002391 new_op = {'member_vnf_index': vnf_index,
2392 'vdu_id': vdu_id,
2393 'vdu_count_index': vdu_count_index,
2394 'primitive': primitive,
2395 'primitive_params': mapped_primitive_params}
kuuseac3a8882019-10-03 10:48:06 +02002396 if operationState:
2397 new_op['operationState'] = operationState
2398 if detailed_status:
2399 new_op['detailed-status'] = detailed_status
2400 if operationType:
2401 new_op['lcmOperationType'] = operationType
2402 if RO_nsr_id:
2403 new_op['RO_nsr_id'] = RO_nsr_id
2404 if RO_scaling_info:
2405 new_op['RO_scaling_info'] = RO_scaling_info
2406 if not op_list:
2407 # No existing operations, create key 'operations' with current operation as first list element
2408 db_nslcmop_admin.update({'operations': [new_op]})
2409 op_list = db_nslcmop_admin.get('operations')
2410 else:
2411 # Existing operations, append operation to list
2412 op_list.append(new_op)
kuuse8b998e42019-07-30 15:22:16 +02002413
kuuseac3a8882019-10-03 10:48:06 +02002414 db_nslcmop_update = {'_admin.operations': op_list}
2415 self.update_db_2("nslcmops", db_nslcmop['_id'], db_nslcmop_update)
2416 op_index = len(op_list) - 1
2417 return op_index
2418
2419 # Helper methods for scale() sub-operations
2420
2421 # pre-scale/post-scale:
2422 # Check for 3 different cases:
2423 # a. New: First time execution, return SUBOPERATION_STATUS_NEW
2424 # b. Skip: Existing sub-operation exists, operationState == 'COMPLETED', return SUBOPERATION_STATUS_SKIP
2425 # c. Reintent: Existing sub-operation exists, operationState != 'COMPLETED', return op_index to re-execute
quilesj7e13aeb2019-10-08 13:34:55 +02002426 def _check_or_add_scale_suboperation(self, db_nslcmop, vnf_index, vnf_config_primitive, primitive_params,
2427 operationType, RO_nsr_id=None, RO_scaling_info=None):
kuuseac3a8882019-10-03 10:48:06 +02002428 # Find this sub-operation
2429 if (RO_nsr_id and RO_scaling_info):
2430 operationType = 'SCALE-RO'
2431 match = {
2432 'member_vnf_index': vnf_index,
2433 'RO_nsr_id': RO_nsr_id,
2434 'RO_scaling_info': RO_scaling_info,
2435 }
2436 else:
2437 match = {
2438 'member_vnf_index': vnf_index,
2439 'primitive': vnf_config_primitive,
2440 'primitive_params': primitive_params,
2441 'lcmOperationType': operationType
2442 }
2443 op_index = self._find_suboperation(db_nslcmop, match)
tierno51183952020-04-03 15:48:18 +00002444 if op_index == self.SUBOPERATION_STATUS_NOT_FOUND:
kuuseac3a8882019-10-03 10:48:06 +02002445 # a. New sub-operation
2446 # The sub-operation does not exist, add it.
2447 # _ns_execute_primitive() will be called from scale() as usual, with non-modified arguments
2448 # The following parameters are set to None for all kind of scaling:
2449 vdu_id = None
2450 vdu_count_index = None
2451 vdu_name = None
tierno51183952020-04-03 15:48:18 +00002452 if RO_nsr_id and RO_scaling_info:
kuuseac3a8882019-10-03 10:48:06 +02002453 vnf_config_primitive = None
2454 primitive_params = None
2455 else:
2456 RO_nsr_id = None
2457 RO_scaling_info = None
2458 # Initial status for sub-operation
2459 operationState = 'PROCESSING'
2460 detailed_status = 'In progress'
2461 # Add sub-operation for pre/post-scaling (zero or more operations)
2462 self._add_suboperation(db_nslcmop,
2463 vnf_index,
2464 vdu_id,
2465 vdu_count_index,
2466 vdu_name,
2467 vnf_config_primitive,
2468 primitive_params,
2469 operationState,
2470 detailed_status,
2471 operationType,
2472 RO_nsr_id,
2473 RO_scaling_info)
2474 return self.SUBOPERATION_STATUS_NEW
2475 else:
2476 # Return either SUBOPERATION_STATUS_SKIP (operationState == 'COMPLETED'),
2477 # or op_index (operationState != 'COMPLETED')
tierno51183952020-04-03 15:48:18 +00002478 return self._retry_or_skip_suboperation(db_nslcmop, op_index)
kuuseac3a8882019-10-03 10:48:06 +02002479
preethika.pdf7d8e02019-12-10 13:10:48 +00002480 # Function to return execution_environment id
2481
2482 def _get_ee_id(self, vnf_index, vdu_id, vca_deployed_list):
tiernoe876f672020-02-13 14:34:48 +00002483 # TODO vdu_index_count
preethika.pdf7d8e02019-12-10 13:10:48 +00002484 for vca in vca_deployed_list:
2485 if vca["member-vnf-index"] == vnf_index and vca["vdu_id"] == vdu_id:
2486 return vca["ee_id"]
2487
tiernoe876f672020-02-13 14:34:48 +00002488 async def destroy_N2VC(self, logging_text, db_nslcmop, vca_deployed, config_descriptor, vca_index, destroy_ee=True):
2489 """
2490 Execute the terminate primitives and destroy the execution environment (if destroy_ee=False
2491 :param logging_text:
2492 :param db_nslcmop:
2493 :param vca_deployed: Dictionary of deployment info at db_nsr._admin.depoloyed.VCA.<INDEX>
2494 :param config_descriptor: Configuration descriptor of the NSD, VNFD, VNFD.vdu or VNFD.kdu
2495 :param vca_index: index in the database _admin.deployed.VCA
2496 :param destroy_ee: False to do not destroy, because it will be destroyed all of then at once
2497 :return: None or exception
2498 """
2499 # execute terminate_primitives
2500 terminate_primitives = config_descriptor.get("terminate-config-primitive")
2501 vdu_id = vca_deployed.get("vdu_id")
2502 vdu_count_index = vca_deployed.get("vdu_count_index")
2503 vdu_name = vca_deployed.get("vdu_name")
2504 vnf_index = vca_deployed.get("member-vnf-index")
2505 if terminate_primitives and vca_deployed.get("needed_terminate"):
2506 # Get all 'seq' tags in seq_list, order sequences numerically, ascending.
2507 terminate_primitives = sorted(terminate_primitives, key=lambda x: int(x['seq']))
2508 for seq in terminate_primitives:
kuuse8b998e42019-07-30 15:22:16 +02002509 # For each sequence in list, get primitive and call _ns_execute_primitive()
kuuse0ca67472019-05-13 15:59:27 +02002510 step = "Calling terminate action for vnf_member_index={} primitive={}".format(
2511 vnf_index, seq.get("name"))
2512 self.logger.debug(logging_text + step)
kuuse8b998e42019-07-30 15:22:16 +02002513 # Create the primitive for each sequence, i.e. "primitive": "touch"
kuuse0ca67472019-05-13 15:59:27 +02002514 primitive = seq.get('name')
kuuse8b998e42019-07-30 15:22:16 +02002515 mapped_primitive_params = self._get_terminate_primitive_params(seq, vnf_index)
2516 # The following 3 parameters are currently set to None for 'terminate':
2517 # vdu_id, vdu_count_index, vdu_name
tiernoe876f672020-02-13 14:34:48 +00002518
kuuseac3a8882019-10-03 10:48:06 +02002519 # Add sub-operation
kuuse8b998e42019-07-30 15:22:16 +02002520 self._add_suboperation(db_nslcmop,
kuuse8b998e42019-07-30 15:22:16 +02002521 vnf_index,
2522 vdu_id,
2523 vdu_count_index,
2524 vdu_name,
2525 primitive,
2526 mapped_primitive_params)
kuuseac3a8882019-10-03 10:48:06 +02002527 # Sub-operations: Call _ns_execute_primitive() instead of action()
quilesj7e13aeb2019-10-08 13:34:55 +02002528 try:
tiernoe876f672020-02-13 14:34:48 +00002529 result, result_detail = await self._ns_execute_primitive(vca_deployed["ee_id"], primitive,
2530 mapped_primitive_params)
2531 except LcmException:
2532 # this happens when VCA is not deployed. In this case it is not needed to terminate
2533 continue
2534 result_ok = ['COMPLETED', 'PARTIALLY_COMPLETED']
2535 if result not in result_ok:
2536 raise LcmException("terminate_primitive {} for vnf_member_index={} fails with "
2537 "error {}".format(seq.get("name"), vnf_index, result_detail))
2538 # set that this VCA do not need terminated
2539 db_update_entry = "_admin.deployed.VCA.{}.needed_terminate".format(vca_index)
2540 self.update_db_2("nsrs", db_nslcmop["nsInstanceId"], {db_update_entry: False})
2541
2542 if destroy_ee:
2543 await self.n2vc.delete_execution_environment(vca_deployed["ee_id"])
kuuse0ca67472019-05-13 15:59:27 +02002544
tierno51183952020-04-03 15:48:18 +00002545 async def _delete_all_N2VC(self, db_nsr: dict):
2546 self._write_all_config_status(db_nsr=db_nsr, status='TERMINATING')
2547 namespace = "." + db_nsr["_id"]
tiernof59ad6c2020-04-08 12:50:52 +00002548 try:
2549 await self.n2vc.delete_namespace(namespace=namespace, total_timeout=self.timeout_charm_delete)
2550 except N2VCNotFound: # already deleted. Skip
2551 pass
tierno51183952020-04-03 15:48:18 +00002552 self._write_all_config_status(db_nsr=db_nsr, status='DELETED')
quilesj3655ae02019-12-12 16:08:35 +00002553
tiernoe876f672020-02-13 14:34:48 +00002554 async def _terminate_RO(self, logging_text, nsr_deployed, nsr_id, nslcmop_id, stage):
2555 """
2556 Terminates a deployment from RO
2557 :param logging_text:
2558 :param nsr_deployed: db_nsr._admin.deployed
2559 :param nsr_id:
2560 :param nslcmop_id:
2561 :param stage: list of string with the content to write on db_nslcmop.detailed-status.
2562 this method will update only the index 2, but it will write on database the concatenated content of the list
2563 :return:
2564 """
2565 db_nsr_update = {}
2566 failed_detail = []
2567 ro_nsr_id = ro_delete_action = None
2568 if nsr_deployed and nsr_deployed.get("RO"):
2569 ro_nsr_id = nsr_deployed["RO"].get("nsr_id")
2570 ro_delete_action = nsr_deployed["RO"].get("nsr_delete_action_id")
2571 try:
2572 if ro_nsr_id:
2573 stage[2] = "Deleting ns from VIM."
2574 db_nsr_update["detailed-status"] = " ".join(stage)
2575 self._write_op_status(nslcmop_id, stage)
2576 self.logger.debug(logging_text + stage[2])
2577 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2578 self._write_op_status(nslcmop_id, stage)
2579 desc = await self.RO.delete("ns", ro_nsr_id)
2580 ro_delete_action = desc["action_id"]
2581 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = ro_delete_action
2582 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2583 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2584 if ro_delete_action:
2585 # wait until NS is deleted from VIM
2586 stage[2] = "Waiting ns deleted from VIM."
2587 detailed_status_old = None
2588 self.logger.debug(logging_text + stage[2] + " RO_id={} ro_delete_action={}".format(ro_nsr_id,
2589 ro_delete_action))
2590 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2591 self._write_op_status(nslcmop_id, stage)
kuused124bfe2019-06-18 12:09:24 +02002592
tiernoe876f672020-02-13 14:34:48 +00002593 delete_timeout = 20 * 60 # 20 minutes
2594 while delete_timeout > 0:
2595 desc = await self.RO.show(
2596 "ns",
2597 item_id_name=ro_nsr_id,
2598 extra_item="action",
2599 extra_item_id=ro_delete_action)
2600
2601 # deploymentStatus
2602 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
2603
2604 ns_status, ns_status_info = self.RO.check_action_status(desc)
2605 if ns_status == "ERROR":
2606 raise ROclient.ROClientException(ns_status_info)
2607 elif ns_status == "BUILD":
2608 stage[2] = "Deleting from VIM {}".format(ns_status_info)
2609 elif ns_status == "ACTIVE":
2610 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2611 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2612 break
2613 else:
2614 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
2615 if stage[2] != detailed_status_old:
2616 detailed_status_old = stage[2]
2617 db_nsr_update["detailed-status"] = " ".join(stage)
2618 self._write_op_status(nslcmop_id, stage)
2619 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2620 await asyncio.sleep(5, loop=self.loop)
2621 delete_timeout -= 5
2622 else: # delete_timeout <= 0:
2623 raise ROclient.ROClientException("Timeout waiting ns deleted from VIM")
2624
2625 except Exception as e:
2626 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2627 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2628 db_nsr_update["_admin.deployed.RO.nsr_id"] = None
2629 db_nsr_update["_admin.deployed.RO.nsr_status"] = "DELETED"
2630 db_nsr_update["_admin.deployed.RO.nsr_delete_action_id"] = None
2631 self.logger.debug(logging_text + "RO_ns_id={} already deleted".format(ro_nsr_id))
2632 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
tiernoa2143262020-03-27 16:20:40 +00002633 failed_detail.append("delete conflict: {}".format(e))
2634 self.logger.debug(logging_text + "RO_ns_id={} delete conflict: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00002635 else:
tiernoa2143262020-03-27 16:20:40 +00002636 failed_detail.append("delete error: {}".format(e))
2637 self.logger.error(logging_text + "RO_ns_id={} delete error: {}".format(ro_nsr_id, e))
tiernoe876f672020-02-13 14:34:48 +00002638
2639 # Delete nsd
2640 if not failed_detail and deep_get(nsr_deployed, ("RO", "nsd_id")):
2641 ro_nsd_id = nsr_deployed["RO"]["nsd_id"]
2642 try:
2643 stage[2] = "Deleting nsd from RO."
2644 db_nsr_update["detailed-status"] = " ".join(stage)
2645 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2646 self._write_op_status(nslcmop_id, stage)
2647 await self.RO.delete("nsd", ro_nsd_id)
2648 self.logger.debug(logging_text + "ro_nsd_id={} deleted".format(ro_nsd_id))
2649 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2650 except Exception as e:
2651 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2652 db_nsr_update["_admin.deployed.RO.nsd_id"] = None
2653 self.logger.debug(logging_text + "ro_nsd_id={} already deleted".format(ro_nsd_id))
2654 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2655 failed_detail.append("ro_nsd_id={} delete conflict: {}".format(ro_nsd_id, e))
2656 self.logger.debug(logging_text + failed_detail[-1])
2657 else:
2658 failed_detail.append("ro_nsd_id={} delete error: {}".format(ro_nsd_id, e))
2659 self.logger.error(logging_text + failed_detail[-1])
2660
2661 if not failed_detail and deep_get(nsr_deployed, ("RO", "vnfd")):
2662 for index, vnf_deployed in enumerate(nsr_deployed["RO"]["vnfd"]):
2663 if not vnf_deployed or not vnf_deployed["id"]:
2664 continue
2665 try:
2666 ro_vnfd_id = vnf_deployed["id"]
2667 stage[2] = "Deleting member_vnf_index={} ro_vnfd_id={} from RO.".format(
2668 vnf_deployed["member-vnf-index"], ro_vnfd_id)
2669 db_nsr_update["detailed-status"] = " ".join(stage)
2670 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2671 self._write_op_status(nslcmop_id, stage)
2672 await self.RO.delete("vnfd", ro_vnfd_id)
2673 self.logger.debug(logging_text + "ro_vnfd_id={} deleted".format(ro_vnfd_id))
2674 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2675 except Exception as e:
2676 if isinstance(e, ROclient.ROClientException) and e.http_code == 404: # not found
2677 db_nsr_update["_admin.deployed.RO.vnfd.{}.id".format(index)] = None
2678 self.logger.debug(logging_text + "ro_vnfd_id={} already deleted ".format(ro_vnfd_id))
2679 elif isinstance(e, ROclient.ROClientException) and e.http_code == 409: # conflict
2680 failed_detail.append("ro_vnfd_id={} delete conflict: {}".format(ro_vnfd_id, e))
2681 self.logger.debug(logging_text + failed_detail[-1])
2682 else:
2683 failed_detail.append("ro_vnfd_id={} delete error: {}".format(ro_vnfd_id, e))
2684 self.logger.error(logging_text + failed_detail[-1])
2685
tiernoa2143262020-03-27 16:20:40 +00002686 if failed_detail:
2687 stage[2] = "Error deleting from VIM"
2688 else:
2689 stage[2] = "Deleted from VIM"
tiernoe876f672020-02-13 14:34:48 +00002690 db_nsr_update["detailed-status"] = " ".join(stage)
2691 self.update_db_2("nsrs", nsr_id, db_nsr_update)
2692 self._write_op_status(nslcmop_id, stage)
2693
2694 if failed_detail:
tiernoa2143262020-03-27 16:20:40 +00002695 raise LcmException("; ".join(failed_detail))
tiernoe876f672020-02-13 14:34:48 +00002696
2697 async def terminate(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02002698 # Try to lock HA task here
2699 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
2700 if not task_is_locked_by_me:
2701 return
2702
tierno59d22d22018-09-25 18:10:19 +02002703 logging_text = "Task ns={} terminate={} ".format(nsr_id, nslcmop_id)
2704 self.logger.debug(logging_text + "Enter")
tiernoe876f672020-02-13 14:34:48 +00002705 timeout_ns_terminate = self.timeout_ns_terminate
tierno59d22d22018-09-25 18:10:19 +02002706 db_nsr = None
2707 db_nslcmop = None
2708 exc = None
tiernoe876f672020-02-13 14:34:48 +00002709 error_list = [] # annotates all failed error messages
tierno59d22d22018-09-25 18:10:19 +02002710 db_nslcmop_update = {}
tiernoc2564fe2019-01-28 16:18:56 +00002711 autoremove = False # autoremove after terminated
tiernoe876f672020-02-13 14:34:48 +00002712 tasks_dict_info = {}
2713 db_nsr_update = {}
2714 stage = ["Stage 1/3: Preparing task.", "Waiting for previous operations to terminate.", ""]
2715 # ^ contains [stage, step, VIM-status]
tierno59d22d22018-09-25 18:10:19 +02002716 try:
kuused124bfe2019-06-18 12:09:24 +02002717 # wait for any previous tasks in process
2718 await self.lcm_tasks.waitfor_related_HA("ns", 'nslcmops', nslcmop_id)
2719
tiernoe876f672020-02-13 14:34:48 +00002720 stage[1] = "Getting nslcmop={} from db.".format(nslcmop_id)
2721 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
2722 operation_params = db_nslcmop.get("operationParams") or {}
2723 if operation_params.get("timeout_ns_terminate"):
2724 timeout_ns_terminate = operation_params["timeout_ns_terminate"]
2725 stage[1] = "Getting nsr={} from db.".format(nsr_id)
2726 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
2727
2728 db_nsr_update["operational-status"] = "terminating"
2729 db_nsr_update["config-status"] = "terminating"
quilesj4cda56b2019-12-05 10:02:20 +00002730 self._write_ns_status(
2731 nsr_id=nsr_id,
2732 ns_state="TERMINATING",
2733 current_operation="TERMINATING",
tiernoe876f672020-02-13 14:34:48 +00002734 current_operation_id=nslcmop_id,
2735 other_update=db_nsr_update
quilesj4cda56b2019-12-05 10:02:20 +00002736 )
quilesj3655ae02019-12-12 16:08:35 +00002737 self._write_op_status(
2738 op_id=nslcmop_id,
tiernoe876f672020-02-13 14:34:48 +00002739 queuePosition=0,
2740 stage=stage
quilesj3655ae02019-12-12 16:08:35 +00002741 )
tiernoe876f672020-02-13 14:34:48 +00002742 nsr_deployed = deepcopy(db_nsr["_admin"].get("deployed")) or {}
tierno59d22d22018-09-25 18:10:19 +02002743 if db_nsr["_admin"]["nsState"] == "NOT_INSTANTIATED":
2744 return
tierno59d22d22018-09-25 18:10:19 +02002745
tiernoe876f672020-02-13 14:34:48 +00002746 stage[1] = "Getting vnf descriptors from db."
2747 db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
2748 db_vnfds_from_id = {}
2749 db_vnfds_from_member_index = {}
2750 # Loop over VNFRs
2751 for vnfr in db_vnfrs_list:
2752 vnfd_id = vnfr["vnfd-id"]
2753 if vnfd_id not in db_vnfds_from_id:
2754 vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
2755 db_vnfds_from_id[vnfd_id] = vnfd
2756 db_vnfds_from_member_index[vnfr["member-vnf-index-ref"]] = db_vnfds_from_id[vnfd_id]
calvinosanch9f9c6f22019-11-04 13:37:39 +01002757
tiernoe876f672020-02-13 14:34:48 +00002758 # Destroy individual execution environments when there are terminating primitives.
2759 # Rest of EE will be deleted at once
2760 if not operation_params.get("skip_terminate_primitives"):
2761 stage[0] = "Stage 2/3 execute terminating primitives."
2762 stage[1] = "Looking execution environment that needs terminate."
2763 self.logger.debug(logging_text + stage[1])
2764 for vca_index, vca in enumerate(get_iterable(nsr_deployed, "VCA")):
2765 config_descriptor = None
2766 if not vca or not vca.get("ee_id") or not vca.get("needed_terminate"):
2767 continue
2768 if not vca.get("member-vnf-index"):
2769 # ns
2770 config_descriptor = db_nsr.get("ns-configuration")
2771 elif vca.get("vdu_id"):
2772 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
2773 vdud = next((vdu for vdu in db_vnfd.get("vdu", ()) if vdu["id"] == vca.get("vdu_id")), None)
2774 if vdud:
2775 config_descriptor = vdud.get("vdu-configuration")
2776 elif vca.get("kdu_name"):
2777 db_vnfd = db_vnfds_from_member_index[vca["member-vnf-index"]]
2778 kdud = next((kdu for kdu in db_vnfd.get("kdu", ()) if kdu["name"] == vca.get("kdu_name")), None)
2779 if kdud:
2780 config_descriptor = kdud.get("kdu-configuration")
2781 else:
2782 config_descriptor = db_vnfds_from_member_index[vca["member-vnf-index"]].get("vnf-configuration")
2783 task = asyncio.ensure_future(self.destroy_N2VC(logging_text, db_nslcmop, vca, config_descriptor,
2784 vca_index, False))
2785 tasks_dict_info[task] = "Terminating VCA {}".format(vca.get("ee_id"))
tierno59d22d22018-09-25 18:10:19 +02002786
tiernoe876f672020-02-13 14:34:48 +00002787 # wait for pending tasks of terminate primitives
2788 if tasks_dict_info:
2789 self.logger.debug(logging_text + 'Waiting for terminate primitive pending tasks...')
2790 error_list = await self._wait_for_tasks(logging_text, tasks_dict_info,
2791 min(self.timeout_charm_delete, timeout_ns_terminate),
2792 stage, nslcmop_id)
2793 if error_list:
2794 return # raise LcmException("; ".join(error_list))
2795 tasks_dict_info.clear()
tierno82974b22018-11-27 21:55:36 +00002796
tiernoe876f672020-02-13 14:34:48 +00002797 # remove All execution environments at once
2798 stage[0] = "Stage 3/3 delete all."
quilesj3655ae02019-12-12 16:08:35 +00002799
tierno49676be2020-04-07 16:34:35 +00002800 if nsr_deployed.get("VCA"):
2801 stage[1] = "Deleting all execution environments."
2802 self.logger.debug(logging_text + stage[1])
2803 task_delete_ee = asyncio.ensure_future(asyncio.wait_for(self._delete_all_N2VC(db_nsr=db_nsr),
2804 timeout=self.timeout_charm_delete))
2805 # task_delete_ee = asyncio.ensure_future(self.n2vc.delete_namespace(namespace="." + nsr_id))
2806 tasks_dict_info[task_delete_ee] = "Terminating all VCA"
tierno59d22d22018-09-25 18:10:19 +02002807
tiernoe876f672020-02-13 14:34:48 +00002808 # Delete from k8scluster
2809 stage[1] = "Deleting KDUs."
2810 self.logger.debug(logging_text + stage[1])
2811 # print(nsr_deployed)
2812 for kdu in get_iterable(nsr_deployed, "K8s"):
2813 if not kdu or not kdu.get("kdu-instance"):
2814 continue
2815 kdu_instance = kdu.get("kdu-instance")
tiernoa2143262020-03-27 16:20:40 +00002816 if kdu.get("k8scluster-type") in self.k8scluster_map:
tiernoe876f672020-02-13 14:34:48 +00002817 task_delete_kdu_instance = asyncio.ensure_future(
tiernoa2143262020-03-27 16:20:40 +00002818 self.k8scluster_map[kdu["k8scluster-type"]].uninstall(
2819 cluster_uuid=kdu.get("k8scluster-uuid"),
2820 kdu_instance=kdu_instance))
tiernoe876f672020-02-13 14:34:48 +00002821 else:
2822 self.logger.error(logging_text + "Unknown k8s deployment type {}".
2823 format(kdu.get("k8scluster-type")))
2824 continue
2825 tasks_dict_info[task_delete_kdu_instance] = "Terminating KDU '{}'".format(kdu.get("kdu-name"))
tierno59d22d22018-09-25 18:10:19 +02002826
2827 # remove from RO
tiernoe876f672020-02-13 14:34:48 +00002828 stage[1] = "Deleting ns from VIM."
2829 task_delete_ro = asyncio.ensure_future(
2830 self._terminate_RO(logging_text, nsr_deployed, nsr_id, nslcmop_id, stage))
2831 tasks_dict_info[task_delete_ro] = "Removing deployment from VIM"
tierno59d22d22018-09-25 18:10:19 +02002832
tiernoe876f672020-02-13 14:34:48 +00002833 # rest of staff will be done at finally
2834
2835 except (ROclient.ROClientException, DbException, LcmException, N2VCException) as e:
2836 self.logger.error(logging_text + "Exit Exception {}".format(e))
2837 exc = e
2838 except asyncio.CancelledError:
2839 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(stage[1]))
2840 exc = "Operation was cancelled"
2841 except Exception as e:
2842 exc = traceback.format_exc()
2843 self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(stage[1], e), exc_info=True)
2844 finally:
2845 if exc:
2846 error_list.append(str(exc))
tierno59d22d22018-09-25 18:10:19 +02002847 try:
tiernoe876f672020-02-13 14:34:48 +00002848 # wait for pending tasks
2849 if tasks_dict_info:
2850 stage[1] = "Waiting for terminate pending tasks."
2851 self.logger.debug(logging_text + stage[1])
2852 error_list += await self._wait_for_tasks(logging_text, tasks_dict_info, timeout_ns_terminate,
2853 stage, nslcmop_id)
2854 stage[1] = stage[2] = ""
2855 except asyncio.CancelledError:
2856 error_list.append("Cancelled")
2857 # TODO cancell all tasks
2858 except Exception as exc:
2859 error_list.append(str(exc))
2860 # update status at database
2861 if error_list:
2862 error_detail = "; ".join(error_list)
2863 # self.logger.error(logging_text + error_detail)
tiernoa2143262020-03-27 16:20:40 +00002864 error_description_nslcmop = 'Stage: {}. Detail: {}'.format(stage[0], error_detail)
2865 error_description_nsr = 'Operation: TERMINATING.{}, Stage {}.'.format(nslcmop_id, stage[0])
tierno59d22d22018-09-25 18:10:19 +02002866
tierno59d22d22018-09-25 18:10:19 +02002867 db_nsr_update["operational-status"] = "failed"
tiernoa2143262020-03-27 16:20:40 +00002868 db_nsr_update["detailed-status"] = error_description_nsr + " Detail: " + error_detail
tiernoe876f672020-02-13 14:34:48 +00002869 db_nslcmop_update["detailed-status"] = error_detail
2870 nslcmop_operation_state = "FAILED"
2871 ns_state = "BROKEN"
tierno59d22d22018-09-25 18:10:19 +02002872 else:
tiernoa2143262020-03-27 16:20:40 +00002873 error_detail = None
tiernoe876f672020-02-13 14:34:48 +00002874 error_description_nsr = error_description_nslcmop = None
2875 ns_state = "NOT_INSTANTIATED"
tierno59d22d22018-09-25 18:10:19 +02002876 db_nsr_update["operational-status"] = "terminated"
2877 db_nsr_update["detailed-status"] = "Done"
2878 db_nsr_update["_admin.nsState"] = "NOT_INSTANTIATED"
2879 db_nslcmop_update["detailed-status"] = "Done"
tiernoe876f672020-02-13 14:34:48 +00002880 nslcmop_operation_state = "COMPLETED"
tierno59d22d22018-09-25 18:10:19 +02002881
tiernoe876f672020-02-13 14:34:48 +00002882 if db_nsr:
2883 self._write_ns_status(
2884 nsr_id=nsr_id,
2885 ns_state=ns_state,
2886 current_operation="IDLE",
2887 current_operation_id=None,
2888 error_description=error_description_nsr,
tiernoa2143262020-03-27 16:20:40 +00002889 error_detail=error_detail,
tiernoe876f672020-02-13 14:34:48 +00002890 other_update=db_nsr_update
2891 )
2892 if db_nslcmop:
2893 self._write_op_status(
2894 op_id=nslcmop_id,
2895 stage="",
2896 error_message=error_description_nslcmop,
2897 operation_state=nslcmop_operation_state,
2898 other_update=db_nslcmop_update,
2899 )
2900 autoremove = operation_params.get("autoremove", False)
tierno59d22d22018-09-25 18:10:19 +02002901 if nslcmop_operation_state:
2902 try:
2903 await self.msg.aiowrite("ns", "terminated", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tiernoc2564fe2019-01-28 16:18:56 +00002904 "operationState": nslcmop_operation_state,
2905 "autoremove": autoremove},
tierno8a518872018-12-21 13:42:14 +00002906 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02002907 except Exception as e:
2908 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
quilesj7e13aeb2019-10-08 13:34:55 +02002909
tierno59d22d22018-09-25 18:10:19 +02002910 self.logger.debug(logging_text + "Exit")
2911 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_terminate")
2912
tiernoe876f672020-02-13 14:34:48 +00002913 async def _wait_for_tasks(self, logging_text, created_tasks_info, timeout, stage, nslcmop_id, nsr_id=None):
2914 time_start = time()
tiernoa2143262020-03-27 16:20:40 +00002915 error_detail_list = []
tiernoe876f672020-02-13 14:34:48 +00002916 error_list = []
2917 pending_tasks = list(created_tasks_info.keys())
2918 num_tasks = len(pending_tasks)
2919 num_done = 0
2920 stage[1] = "{}/{}.".format(num_done, num_tasks)
2921 self._write_op_status(nslcmop_id, stage)
tiernoe876f672020-02-13 14:34:48 +00002922 while pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00002923 new_error = None
tiernoe876f672020-02-13 14:34:48 +00002924 _timeout = timeout + time_start - time()
2925 done, pending_tasks = await asyncio.wait(pending_tasks, timeout=_timeout,
2926 return_when=asyncio.FIRST_COMPLETED)
2927 num_done += len(done)
2928 if not done: # Timeout
2929 for task in pending_tasks:
tiernoa2143262020-03-27 16:20:40 +00002930 new_error = created_tasks_info[task] + ": Timeout"
2931 error_detail_list.append(new_error)
2932 error_list.append(new_error)
tiernoe876f672020-02-13 14:34:48 +00002933 break
2934 for task in done:
2935 if task.cancelled():
tierno067e04a2020-03-31 12:53:13 +00002936 exc = "Cancelled"
tiernoe876f672020-02-13 14:34:48 +00002937 else:
2938 exc = task.exception()
tierno067e04a2020-03-31 12:53:13 +00002939 if exc:
2940 if isinstance(exc, asyncio.TimeoutError):
2941 exc = "Timeout"
2942 new_error = created_tasks_info[task] + ": {}".format(exc)
2943 error_list.append(created_tasks_info[task])
2944 error_detail_list.append(new_error)
2945 if isinstance(exc, (str, DbException, N2VCException, ROclient.ROClientException, LcmException)):
2946 self.logger.error(logging_text + new_error)
tiernoe876f672020-02-13 14:34:48 +00002947 else:
tierno067e04a2020-03-31 12:53:13 +00002948 exc_traceback = "".join(traceback.format_exception(None, exc, exc.__traceback__))
2949 self.logger.error(logging_text + created_tasks_info[task] + exc_traceback)
2950 else:
2951 self.logger.debug(logging_text + created_tasks_info[task] + ": Done")
tiernoe876f672020-02-13 14:34:48 +00002952 stage[1] = "{}/{}.".format(num_done, num_tasks)
2953 if new_error:
tiernoa2143262020-03-27 16:20:40 +00002954 stage[1] += " Errors: " + ". ".join(error_detail_list) + "."
tiernoe876f672020-02-13 14:34:48 +00002955 if nsr_id: # update also nsr
tiernoa2143262020-03-27 16:20:40 +00002956 self.update_db_2("nsrs", nsr_id, {"errorDescription": "Error at: " + ", ".join(error_list),
2957 "errorDetail": ". ".join(error_detail_list)})
tiernoe876f672020-02-13 14:34:48 +00002958 self._write_op_status(nslcmop_id, stage)
tiernoa2143262020-03-27 16:20:40 +00002959 return error_detail_list
tiernoe876f672020-02-13 14:34:48 +00002960
tiernoda964822019-01-14 15:53:47 +00002961 @staticmethod
2962 def _map_primitive_params(primitive_desc, params, instantiation_params):
2963 """
2964 Generates the params to be provided to charm before executing primitive. If user does not provide a parameter,
2965 The default-value is used. If it is between < > it look for a value at instantiation_params
2966 :param primitive_desc: portion of VNFD/NSD that describes primitive
2967 :param params: Params provided by user
2968 :param instantiation_params: Instantiation params provided by user
2969 :return: a dictionary with the calculated params
2970 """
2971 calculated_params = {}
2972 for parameter in primitive_desc.get("parameter", ()):
2973 param_name = parameter["name"]
2974 if param_name in params:
2975 calculated_params[param_name] = params[param_name]
tierno98ad6ea2019-05-30 17:16:28 +00002976 elif "default-value" in parameter or "value" in parameter:
2977 if "value" in parameter:
2978 calculated_params[param_name] = parameter["value"]
2979 else:
2980 calculated_params[param_name] = parameter["default-value"]
2981 if isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("<") \
2982 and calculated_params[param_name].endswith(">"):
2983 if calculated_params[param_name][1:-1] in instantiation_params:
2984 calculated_params[param_name] = instantiation_params[calculated_params[param_name][1:-1]]
tiernoda964822019-01-14 15:53:47 +00002985 else:
2986 raise LcmException("Parameter {} needed to execute primitive {} not provided".
tiernod8323042019-08-09 11:32:23 +00002987 format(calculated_params[param_name], primitive_desc["name"]))
tiernoda964822019-01-14 15:53:47 +00002988 else:
2989 raise LcmException("Parameter {} needed to execute primitive {} not provided".
2990 format(param_name, primitive_desc["name"]))
tierno59d22d22018-09-25 18:10:19 +02002991
tiernoda964822019-01-14 15:53:47 +00002992 if isinstance(calculated_params[param_name], (dict, list, tuple)):
2993 calculated_params[param_name] = yaml.safe_dump(calculated_params[param_name], default_flow_style=True,
2994 width=256)
2995 elif isinstance(calculated_params[param_name], str) and calculated_params[param_name].startswith("!!yaml "):
2996 calculated_params[param_name] = calculated_params[param_name][7:]
tiernoc3f2a822019-11-05 13:45:04 +00002997
2998 # add always ns_config_info if primitive name is config
2999 if primitive_desc["name"] == "config":
3000 if "ns_config_info" in instantiation_params:
3001 calculated_params["ns_config_info"] = instantiation_params["ns_config_info"]
tiernoda964822019-01-14 15:53:47 +00003002 return calculated_params
3003
tierno067e04a2020-03-31 12:53:13 +00003004 def _look_for_deployed_vca(self, deployed_vca, member_vnf_index, vdu_id, vdu_count_index, kdu_name=None):
tiernoe876f672020-02-13 14:34:48 +00003005 # find vca_deployed record for this action. Raise LcmException if not found or there is not any id.
3006 for vca in deployed_vca:
3007 if not vca:
3008 continue
3009 if member_vnf_index != vca["member-vnf-index"] or vdu_id != vca["vdu_id"]:
3010 continue
tiernoe876f672020-02-13 14:34:48 +00003011 if vdu_count_index is not None and vdu_count_index != vca["vdu_count_index"]:
3012 continue
3013 if kdu_name and kdu_name != vca["kdu_name"]:
3014 continue
3015 break
3016 else:
3017 # vca_deployed not found
tierno067e04a2020-03-31 12:53:13 +00003018 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} is not "
3019 "deployed".format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003020
tiernoe876f672020-02-13 14:34:48 +00003021 # get ee_id
3022 ee_id = vca.get("ee_id")
3023 if not ee_id:
tierno067e04a2020-03-31 12:53:13 +00003024 raise LcmException("charm for member_vnf_index={} vdu_id={} kdu_name={} vdu_count_index={} has not "
tiernoe876f672020-02-13 14:34:48 +00003025 "execution environment"
tierno067e04a2020-03-31 12:53:13 +00003026 .format(member_vnf_index, vdu_id, kdu_name, vdu_count_index))
tiernoe876f672020-02-13 14:34:48 +00003027 return ee_id
3028
3029 async def _ns_execute_primitive(self, ee_id, primitive, primitive_params, retries=0,
tierno067e04a2020-03-31 12:53:13 +00003030 retries_interval=30, timeout=None) -> (str, str):
tiernoda964822019-01-14 15:53:47 +00003031 try:
tierno98ad6ea2019-05-30 17:16:28 +00003032 if primitive == "config":
3033 primitive_params = {"params": primitive_params}
tierno2fc7ce52019-06-11 22:50:01 +00003034
quilesj7e13aeb2019-10-08 13:34:55 +02003035 while retries >= 0:
3036 try:
tierno067e04a2020-03-31 12:53:13 +00003037 output = await asyncio.wait_for(
3038 self.n2vc.exec_primitive(
3039 ee_id=ee_id,
3040 primitive_name=primitive,
3041 params_dict=primitive_params,
3042 progress_timeout=self.timeout_progress_primitive,
3043 total_timeout=self.timeout_primitive),
3044 timeout=timeout or self.timeout_primitive)
quilesj7e13aeb2019-10-08 13:34:55 +02003045 # execution was OK
3046 break
tierno067e04a2020-03-31 12:53:13 +00003047 except asyncio.CancelledError:
3048 raise
3049 except Exception as e: # asyncio.TimeoutError
3050 if isinstance(e, asyncio.TimeoutError):
3051 e = "Timeout"
quilesj7e13aeb2019-10-08 13:34:55 +02003052 retries -= 1
3053 if retries >= 0:
tierno73d8bd02019-11-18 17:33:27 +00003054 self.logger.debug('Error executing action {} on {} -> {}'.format(primitive, ee_id, e))
quilesj7e13aeb2019-10-08 13:34:55 +02003055 # wait and retry
3056 await asyncio.sleep(retries_interval, loop=self.loop)
tierno73d8bd02019-11-18 17:33:27 +00003057 else:
tierno067e04a2020-03-31 12:53:13 +00003058 return 'FAILED', str(e)
quilesj7e13aeb2019-10-08 13:34:55 +02003059
tiernoe876f672020-02-13 14:34:48 +00003060 return 'COMPLETED', output
quilesj7e13aeb2019-10-08 13:34:55 +02003061
tierno067e04a2020-03-31 12:53:13 +00003062 except (LcmException, asyncio.CancelledError):
tiernoe876f672020-02-13 14:34:48 +00003063 raise
quilesj7e13aeb2019-10-08 13:34:55 +02003064 except Exception as e:
tiernoe876f672020-02-13 14:34:48 +00003065 return 'FAIL', 'Error executing action {}: {}'.format(primitive, e)
tierno59d22d22018-09-25 18:10:19 +02003066
3067 async def action(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003068
3069 # Try to lock HA task here
3070 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3071 if not task_is_locked_by_me:
3072 return
3073
tierno59d22d22018-09-25 18:10:19 +02003074 logging_text = "Task ns={} action={} ".format(nsr_id, nslcmop_id)
3075 self.logger.debug(logging_text + "Enter")
3076 # get all needed from database
3077 db_nsr = None
3078 db_nslcmop = None
tiernoe876f672020-02-13 14:34:48 +00003079 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003080 db_nslcmop_update = {}
3081 nslcmop_operation_state = None
tierno067e04a2020-03-31 12:53:13 +00003082 error_description_nslcmop = None
tierno59d22d22018-09-25 18:10:19 +02003083 exc = None
3084 try:
kuused124bfe2019-06-18 12:09:24 +02003085 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003086 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003087 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
3088
quilesj4cda56b2019-12-05 10:02:20 +00003089 self._write_ns_status(
3090 nsr_id=nsr_id,
3091 ns_state=None,
3092 current_operation="RUNNING ACTION",
3093 current_operation_id=nslcmop_id
3094 )
3095
tierno59d22d22018-09-25 18:10:19 +02003096 step = "Getting information from database"
3097 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3098 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
tiernoda964822019-01-14 15:53:47 +00003099
tiernoe4f7e6c2018-11-27 14:55:30 +00003100 nsr_deployed = db_nsr["_admin"].get("deployed")
tierno1b633412019-02-25 16:48:23 +00003101 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tierno59d22d22018-09-25 18:10:19 +02003102 vdu_id = db_nslcmop["operationParams"].get("vdu_id")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003103 kdu_name = db_nslcmop["operationParams"].get("kdu_name")
tiernoe4f7e6c2018-11-27 14:55:30 +00003104 vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
tierno067e04a2020-03-31 12:53:13 +00003105 primitive = db_nslcmop["operationParams"]["primitive"]
3106 primitive_params = db_nslcmop["operationParams"]["primitive_params"]
3107 timeout_ns_action = db_nslcmop["operationParams"].get("timeout_ns_action", self.timeout_primitive)
tierno59d22d22018-09-25 18:10:19 +02003108
tierno1b633412019-02-25 16:48:23 +00003109 if vnf_index:
3110 step = "Getting vnfr from database"
3111 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3112 step = "Getting vnfd from database"
3113 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
3114 else:
tierno067e04a2020-03-31 12:53:13 +00003115 step = "Getting nsd from database"
3116 db_nsd = self.db.get_one("nsds", {"_id": db_nsr["nsd-id"]})
tiernoda964822019-01-14 15:53:47 +00003117
tierno82974b22018-11-27 21:55:36 +00003118 # for backward compatibility
3119 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3120 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3121 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3122 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3123
tiernoda964822019-01-14 15:53:47 +00003124 # look for primitive
3125 config_primitive_desc = None
3126 if vdu_id:
3127 for vdu in get_iterable(db_vnfd, "vdu"):
3128 if vdu_id == vdu["id"]:
tierno067e04a2020-03-31 12:53:13 +00003129 for config_primitive in deep_get(vdu, ("vdu-configuration", "config-primitive"), ()):
tiernoda964822019-01-14 15:53:47 +00003130 if config_primitive["name"] == primitive:
3131 config_primitive_desc = config_primitive
3132 break
tierno067e04a2020-03-31 12:53:13 +00003133 break
calvinosanch9f9c6f22019-11-04 13:37:39 +01003134 elif kdu_name:
tierno067e04a2020-03-31 12:53:13 +00003135 for kdu in get_iterable(db_vnfd, "kdu"):
3136 if kdu_name == kdu["name"]:
3137 for config_primitive in deep_get(kdu, ("kdu-configuration", "config-primitive"), ()):
3138 if config_primitive["name"] == primitive:
3139 config_primitive_desc = config_primitive
3140 break
3141 break
tierno1b633412019-02-25 16:48:23 +00003142 elif vnf_index:
tierno067e04a2020-03-31 12:53:13 +00003143 for config_primitive in deep_get(db_vnfd, ("vnf-configuration", "config-primitive"), ()):
tierno1b633412019-02-25 16:48:23 +00003144 if config_primitive["name"] == primitive:
3145 config_primitive_desc = config_primitive
3146 break
3147 else:
tierno067e04a2020-03-31 12:53:13 +00003148 for config_primitive in deep_get(db_nsd, ("ns-configuration", "config-primitive"), ()):
tierno1b633412019-02-25 16:48:23 +00003149 if config_primitive["name"] == primitive:
3150 config_primitive_desc = config_primitive
3151 break
tiernoda964822019-01-14 15:53:47 +00003152
tierno067e04a2020-03-31 12:53:13 +00003153 if not config_primitive_desc and not (kdu_name and primitive in ("upgrade", "rollback", "status")):
tierno1b633412019-02-25 16:48:23 +00003154 raise LcmException("Primitive {} not found at [ns|vnf|vdu]-configuration:config-primitive ".
3155 format(primitive))
3156
tierno1b633412019-02-25 16:48:23 +00003157 if vnf_index:
tierno626e0152019-11-29 14:16:16 +00003158 if vdu_id:
3159 vdur = next((x for x in db_vnfr["vdur"] if x["vdu-id-ref"] == vdu_id), None)
tierno067e04a2020-03-31 12:53:13 +00003160 desc_params = self._format_additional_params(vdur.get("additionalParams"))
3161 elif kdu_name:
3162 kdur = next((x for x in db_vnfr["kdur"] if x["kdu-name"] == kdu_name), None)
3163 desc_params = self._format_additional_params(kdur.get("additionalParams"))
3164 else:
3165 desc_params = self._format_additional_params(db_vnfr.get("additionalParamsForVnf"))
tierno1b633412019-02-25 16:48:23 +00003166 else:
tierno067e04a2020-03-31 12:53:13 +00003167 desc_params = self._format_additional_params(db_nsr.get("additionalParamsForNs"))
tiernoda964822019-01-14 15:53:47 +00003168
3169 # TODO check if ns is in a proper status
tierno067e04a2020-03-31 12:53:13 +00003170 if kdu_name and primitive in ("upgrade", "rollback", "status"):
3171 # kdur and desc_params already set from before
3172 if primitive_params:
3173 desc_params.update(primitive_params)
3174 # TODO Check if we will need something at vnf level
3175 for index, kdu in enumerate(get_iterable(nsr_deployed, "K8s")):
3176 if kdu_name == kdu["kdu-name"] and kdu["member-vnf-index"] == vnf_index:
3177 break
3178 else:
3179 raise LcmException("KDU '{}' for vnf '{}' not deployed".format(kdu_name, vnf_index))
quilesj7e13aeb2019-10-08 13:34:55 +02003180
tierno067e04a2020-03-31 12:53:13 +00003181 if kdu.get("k8scluster-type") not in self.k8scluster_map:
3182 msg = "unknown k8scluster-type '{}'".format(kdu.get("k8scluster-type"))
3183 raise LcmException(msg)
3184
3185 db_dict = {"collection": "nsrs",
3186 "filter": {"_id": nsr_id},
3187 "path": "_admin.deployed.K8s.{}".format(index)}
3188 self.logger.debug(logging_text + "Exec k8s {} on {}.{}".format(primitive, vnf_index, kdu_name))
3189 step = "Executing kdu {}".format(primitive)
3190 if primitive == "upgrade":
3191 if desc_params.get("kdu_model"):
3192 kdu_model = desc_params.get("kdu_model")
3193 del desc_params["kdu_model"]
3194 else:
3195 kdu_model = kdu.get("kdu-model")
3196 parts = kdu_model.split(sep=":")
3197 if len(parts) == 2:
3198 kdu_model = parts[0]
3199
3200 detailed_status = await asyncio.wait_for(
3201 self.k8scluster_map[kdu["k8scluster-type"]].upgrade(
3202 cluster_uuid=kdu.get("k8scluster-uuid"),
3203 kdu_instance=kdu.get("kdu-instance"),
3204 atomic=True, kdu_model=kdu_model,
3205 params=desc_params, db_dict=db_dict,
3206 timeout=timeout_ns_action),
3207 timeout=timeout_ns_action + 10)
3208 self.logger.debug(logging_text + " Upgrade of kdu {} done".format(detailed_status))
3209 elif primitive == "rollback":
3210 detailed_status = await asyncio.wait_for(
3211 self.k8scluster_map[kdu["k8scluster-type"]].rollback(
3212 cluster_uuid=kdu.get("k8scluster-uuid"),
3213 kdu_instance=kdu.get("kdu-instance"),
3214 db_dict=db_dict),
3215 timeout=timeout_ns_action)
3216 elif primitive == "status":
3217 detailed_status = await asyncio.wait_for(
3218 self.k8scluster_map[kdu["k8scluster-type"]].status_kdu(
3219 cluster_uuid=kdu.get("k8scluster-uuid"),
3220 kdu_instance=kdu.get("kdu-instance")),
3221 timeout=timeout_ns_action)
3222
3223 if detailed_status:
3224 nslcmop_operation_state = 'COMPLETED'
3225 else:
3226 detailed_status = ''
3227 nslcmop_operation_state = 'FAILED'
3228
3229 else:
3230 nslcmop_operation_state, detailed_status = await self._ns_execute_primitive(
3231 self._look_for_deployed_vca(nsr_deployed["VCA"],
3232 member_vnf_index=vnf_index,
3233 vdu_id=vdu_id,
3234 vdu_count_index=vdu_count_index),
3235 primitive=primitive,
3236 primitive_params=self._map_primitive_params(config_primitive_desc, primitive_params, desc_params),
3237 timeout=timeout_ns_action)
3238
3239 db_nslcmop_update["detailed-status"] = detailed_status
3240 error_description_nslcmop = detailed_status if nslcmop_operation_state == "FAILED" else ""
3241 self.logger.debug(logging_text + " task Done with result {} {}".format(nslcmop_operation_state,
3242 detailed_status))
tierno59d22d22018-09-25 18:10:19 +02003243 return # database update is called inside finally
3244
tiernof59ad6c2020-04-08 12:50:52 +00003245 except (DbException, LcmException, N2VCException, K8sException) as e:
tierno59d22d22018-09-25 18:10:19 +02003246 self.logger.error(logging_text + "Exit Exception {}".format(e))
3247 exc = e
3248 except asyncio.CancelledError:
3249 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3250 exc = "Operation was cancelled"
tierno067e04a2020-03-31 12:53:13 +00003251 except asyncio.TimeoutError:
3252 self.logger.error(logging_text + "Timeout while '{}'".format(step))
3253 exc = "Timeout"
tierno59d22d22018-09-25 18:10:19 +02003254 except Exception as e:
3255 exc = traceback.format_exc()
3256 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3257 finally:
tierno067e04a2020-03-31 12:53:13 +00003258 if exc:
3259 db_nslcmop_update["detailed-status"] = detailed_status = error_description_nslcmop = \
kuuse0ca67472019-05-13 15:59:27 +02003260 "FAILED {}: {}".format(step, exc)
tierno067e04a2020-03-31 12:53:13 +00003261 nslcmop_operation_state = "FAILED"
3262 if db_nsr:
3263 self._write_ns_status(
3264 nsr_id=nsr_id,
3265 ns_state=db_nsr["nsState"], # TODO check if degraded. For the moment use previous status
3266 current_operation="IDLE",
3267 current_operation_id=None,
3268 # error_description=error_description_nsr,
3269 # error_detail=error_detail,
3270 other_update=db_nsr_update
3271 )
3272
3273 if db_nslcmop:
3274 self._write_op_status(
3275 op_id=nslcmop_id,
3276 stage="",
3277 error_message=error_description_nslcmop,
3278 operation_state=nslcmop_operation_state,
3279 other_update=db_nslcmop_update,
3280 )
3281
tierno59d22d22018-09-25 18:10:19 +02003282 if nslcmop_operation_state:
3283 try:
3284 await self.msg.aiowrite("ns", "actioned", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00003285 "operationState": nslcmop_operation_state},
3286 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003287 except Exception as e:
3288 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3289 self.logger.debug(logging_text + "Exit")
3290 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_action")
tierno067e04a2020-03-31 12:53:13 +00003291 return nslcmop_operation_state, detailed_status
tierno59d22d22018-09-25 18:10:19 +02003292
3293 async def scale(self, nsr_id, nslcmop_id):
kuused124bfe2019-06-18 12:09:24 +02003294
3295 # Try to lock HA task here
3296 task_is_locked_by_me = self.lcm_tasks.lock_HA('ns', 'nslcmops', nslcmop_id)
3297 if not task_is_locked_by_me:
3298 return
3299
tierno59d22d22018-09-25 18:10:19 +02003300 logging_text = "Task ns={} scale={} ".format(nsr_id, nslcmop_id)
3301 self.logger.debug(logging_text + "Enter")
3302 # get all needed from database
3303 db_nsr = None
3304 db_nslcmop = None
3305 db_nslcmop_update = {}
3306 nslcmop_operation_state = None
tiernoe876f672020-02-13 14:34:48 +00003307 db_nsr_update = {}
tierno59d22d22018-09-25 18:10:19 +02003308 exc = None
tierno9ab95942018-10-10 16:44:22 +02003309 # in case of error, indicates what part of scale was failed to put nsr at error status
3310 scale_process = None
tiernod6de1992018-10-11 13:05:52 +02003311 old_operational_status = ""
3312 old_config_status = ""
tiernof578e552018-11-08 19:07:20 +01003313 vnfr_scaled = False
tierno59d22d22018-09-25 18:10:19 +02003314 try:
kuused124bfe2019-06-18 12:09:24 +02003315 # wait for any previous tasks in process
tierno3cf81a32019-11-11 17:07:00 +00003316 step = "Waiting for previous operations to terminate"
kuused124bfe2019-06-18 12:09:24 +02003317 await self.lcm_tasks.waitfor_related_HA('ns', 'nslcmops', nslcmop_id)
tierno47e86b52018-10-10 14:05:55 +02003318
quilesj4cda56b2019-12-05 10:02:20 +00003319 self._write_ns_status(
3320 nsr_id=nsr_id,
3321 ns_state=None,
3322 current_operation="SCALING",
3323 current_operation_id=nslcmop_id
3324 )
3325
ikalyvas02d9e7b2019-05-27 18:16:01 +03003326 step = "Getting nslcmop from database"
ikalyvas02d9e7b2019-05-27 18:16:01 +03003327 self.logger.debug(step + " after having waited for previous tasks to be completed")
3328 db_nslcmop = self.db.get_one("nslcmops", {"_id": nslcmop_id})
3329 step = "Getting nsr from database"
3330 db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
3331
3332 old_operational_status = db_nsr["operational-status"]
3333 old_config_status = db_nsr["config-status"]
tierno59d22d22018-09-25 18:10:19 +02003334 step = "Parsing scaling parameters"
tierno9babfda2019-06-07 12:36:50 +00003335 # self.logger.debug(step)
tierno59d22d22018-09-25 18:10:19 +02003336 db_nsr_update["operational-status"] = "scaling"
3337 self.update_db_2("nsrs", nsr_id, db_nsr_update)
tiernoe4f7e6c2018-11-27 14:55:30 +00003338 nsr_deployed = db_nsr["_admin"].get("deployed")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003339
3340 #######
3341 nsr_deployed = db_nsr["_admin"].get("deployed")
3342 vnf_index = db_nslcmop["operationParams"].get("member_vnf_index")
tiernoda6fb102019-11-23 00:36:52 +00003343 # vdu_id = db_nslcmop["operationParams"].get("vdu_id")
3344 # vdu_count_index = db_nslcmop["operationParams"].get("vdu_count_index")
3345 # vdu_name = db_nslcmop["operationParams"].get("vdu_name")
calvinosanch9f9c6f22019-11-04 13:37:39 +01003346 #######
3347
tiernoe4f7e6c2018-11-27 14:55:30 +00003348 RO_nsr_id = nsr_deployed["RO"]["nsr_id"]
tierno59d22d22018-09-25 18:10:19 +02003349 vnf_index = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["member-vnf-index"]
3350 scaling_group = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"]["scaling-group-descriptor"]
3351 scaling_type = db_nslcmop["operationParams"]["scaleVnfData"]["scaleVnfType"]
3352 # scaling_policy = db_nslcmop["operationParams"]["scaleVnfData"]["scaleByStepData"].get("scaling-policy")
3353
tierno82974b22018-11-27 21:55:36 +00003354 # for backward compatibility
3355 if nsr_deployed and isinstance(nsr_deployed.get("VCA"), dict):
3356 nsr_deployed["VCA"] = list(nsr_deployed["VCA"].values())
3357 db_nsr_update["_admin.deployed.VCA"] = nsr_deployed["VCA"]
3358 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3359
tierno59d22d22018-09-25 18:10:19 +02003360 step = "Getting vnfr from database"
3361 db_vnfr = self.db.get_one("vnfrs", {"member-vnf-index-ref": vnf_index, "nsr-id-ref": nsr_id})
3362 step = "Getting vnfd from database"
3363 db_vnfd = self.db.get_one("vnfds", {"_id": db_vnfr["vnfd-id"]})
ikalyvas02d9e7b2019-05-27 18:16:01 +03003364
tierno59d22d22018-09-25 18:10:19 +02003365 step = "Getting scaling-group-descriptor"
3366 for scaling_descriptor in db_vnfd["scaling-group-descriptor"]:
3367 if scaling_descriptor["name"] == scaling_group:
3368 break
3369 else:
3370 raise LcmException("input parameter 'scaleByStepData':'scaling-group-descriptor':'{}' is not present "
3371 "at vnfd:scaling-group-descriptor".format(scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003372
tierno59d22d22018-09-25 18:10:19 +02003373 # cooldown_time = 0
3374 # for scaling_policy_descriptor in scaling_descriptor.get("scaling-policy", ()):
3375 # cooldown_time = scaling_policy_descriptor.get("cooldown-time", 0)
3376 # if scaling_policy and scaling_policy == scaling_policy_descriptor.get("name"):
3377 # break
3378
3379 # TODO check if ns is in a proper status
tierno15b1cf12019-08-29 13:21:40 +00003380 step = "Sending scale order to VIM"
tierno59d22d22018-09-25 18:10:19 +02003381 nb_scale_op = 0
3382 if not db_nsr["_admin"].get("scaling-group"):
3383 self.update_db_2("nsrs", nsr_id, {"_admin.scaling-group": [{"name": scaling_group, "nb-scale-op": 0}]})
3384 admin_scale_index = 0
3385 else:
3386 for admin_scale_index, admin_scale_info in enumerate(db_nsr["_admin"]["scaling-group"]):
3387 if admin_scale_info["name"] == scaling_group:
3388 nb_scale_op = admin_scale_info.get("nb-scale-op", 0)
3389 break
tierno9ab95942018-10-10 16:44:22 +02003390 else: # not found, set index one plus last element and add new entry with the name
3391 admin_scale_index += 1
3392 db_nsr_update["_admin.scaling-group.{}.name".format(admin_scale_index)] = scaling_group
tierno59d22d22018-09-25 18:10:19 +02003393 RO_scaling_info = []
3394 vdu_scaling_info = {"scaling_group_name": scaling_group, "vdu": []}
3395 if scaling_type == "SCALE_OUT":
3396 # count if max-instance-count is reached
kuuse818d70c2019-08-07 14:43:44 +02003397 max_instance_count = scaling_descriptor.get("max-instance-count", 10)
3398 # self.logger.debug("MAX_INSTANCE_COUNT is {}".format(max_instance_count))
3399 if nb_scale_op >= max_instance_count:
3400 raise LcmException("reached the limit of {} (max-instance-count) "
3401 "scaling-out operations for the "
3402 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
kuuse8b998e42019-07-30 15:22:16 +02003403
ikalyvas02d9e7b2019-05-27 18:16:01 +03003404 nb_scale_op += 1
tierno59d22d22018-09-25 18:10:19 +02003405 vdu_scaling_info["scaling_direction"] = "OUT"
3406 vdu_scaling_info["vdu-create"] = {}
3407 for vdu_scale_info in scaling_descriptor["vdu"]:
3408 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3409 "type": "create", "count": vdu_scale_info.get("count", 1)})
3410 vdu_scaling_info["vdu-create"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
ikalyvas02d9e7b2019-05-27 18:16:01 +03003411
tierno59d22d22018-09-25 18:10:19 +02003412 elif scaling_type == "SCALE_IN":
3413 # count if min-instance-count is reached
tierno27246d82018-09-27 15:59:09 +02003414 min_instance_count = 0
tierno59d22d22018-09-25 18:10:19 +02003415 if "min-instance-count" in scaling_descriptor and scaling_descriptor["min-instance-count"] is not None:
3416 min_instance_count = int(scaling_descriptor["min-instance-count"])
tierno9babfda2019-06-07 12:36:50 +00003417 if nb_scale_op <= min_instance_count:
3418 raise LcmException("reached the limit of {} (min-instance-count) scaling-in operations for the "
3419 "scaling-group-descriptor '{}'".format(nb_scale_op, scaling_group))
ikalyvas02d9e7b2019-05-27 18:16:01 +03003420 nb_scale_op -= 1
tierno59d22d22018-09-25 18:10:19 +02003421 vdu_scaling_info["scaling_direction"] = "IN"
3422 vdu_scaling_info["vdu-delete"] = {}
3423 for vdu_scale_info in scaling_descriptor["vdu"]:
3424 RO_scaling_info.append({"osm_vdu_id": vdu_scale_info["vdu-id-ref"], "member-vnf-index": vnf_index,
3425 "type": "delete", "count": vdu_scale_info.get("count", 1)})
3426 vdu_scaling_info["vdu-delete"][vdu_scale_info["vdu-id-ref"]] = vdu_scale_info.get("count", 1)
3427
3428 # update VDU_SCALING_INFO with the VDUs to delete ip_addresses
tierno27246d82018-09-27 15:59:09 +02003429 vdu_create = vdu_scaling_info.get("vdu-create")
3430 vdu_delete = copy(vdu_scaling_info.get("vdu-delete"))
tierno59d22d22018-09-25 18:10:19 +02003431 if vdu_scaling_info["scaling_direction"] == "IN":
3432 for vdur in reversed(db_vnfr["vdur"]):
tierno27246d82018-09-27 15:59:09 +02003433 if vdu_delete.get(vdur["vdu-id-ref"]):
3434 vdu_delete[vdur["vdu-id-ref"]] -= 1
tierno59d22d22018-09-25 18:10:19 +02003435 vdu_scaling_info["vdu"].append({
3436 "name": vdur["name"],
3437 "vdu_id": vdur["vdu-id-ref"],
3438 "interface": []
3439 })
3440 for interface in vdur["interfaces"]:
3441 vdu_scaling_info["vdu"][-1]["interface"].append({
3442 "name": interface["name"],
3443 "ip_address": interface["ip-address"],
3444 "mac_address": interface.get("mac-address"),
3445 })
tierno27246d82018-09-27 15:59:09 +02003446 vdu_delete = vdu_scaling_info.pop("vdu-delete")
tierno59d22d22018-09-25 18:10:19 +02003447
kuuseac3a8882019-10-03 10:48:06 +02003448 # PRE-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02003449 step = "Executing pre-scale vnf-config-primitive"
3450 if scaling_descriptor.get("scaling-config-action"):
3451 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02003452 if (scaling_config_action.get("trigger") == "pre-scale-in" and scaling_type == "SCALE_IN") \
3453 or (scaling_config_action.get("trigger") == "pre-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02003454 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3455 step = db_nslcmop_update["detailed-status"] = \
3456 "executing pre-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00003457
tierno59d22d22018-09-25 18:10:19 +02003458 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02003459 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3460 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02003461 break
3462 else:
3463 raise LcmException(
3464 "Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:scaling-config-action"
tiernoda964822019-01-14 15:53:47 +00003465 "[vnf-config-primitive-name-ref='{}'] does not match any vnf-configuration:config-"
tierno59d22d22018-09-25 18:10:19 +02003466 "primitive".format(scaling_group, config_primitive))
tiernoda964822019-01-14 15:53:47 +00003467
tierno16fedf52019-05-24 08:38:26 +00003468 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00003469 if db_vnfr.get("additionalParamsForVnf"):
3470 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
quilesj7e13aeb2019-10-08 13:34:55 +02003471
tierno9ab95942018-10-10 16:44:22 +02003472 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02003473 db_nsr_update["config-status"] = "configuring pre-scaling"
kuuseac3a8882019-10-03 10:48:06 +02003474 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
3475
3476 # Pre-scale reintent check: Check if this sub-operation has been executed before
3477 op_index = self._check_or_add_scale_suboperation(
3478 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'PRE-SCALE')
3479 if (op_index == self.SUBOPERATION_STATUS_SKIP):
3480 # Skip sub-operation
3481 result = 'COMPLETED'
3482 result_detail = 'Done'
3483 self.logger.debug(logging_text +
3484 "vnf_config_primitive={} Skipped sub-operation, result {} {}".format(
3485 vnf_config_primitive, result, result_detail))
3486 else:
3487 if (op_index == self.SUBOPERATION_STATUS_NEW):
3488 # New sub-operation: Get index of this sub-operation
3489 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3490 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3491 format(vnf_config_primitive))
3492 else:
3493 # Reintent: Get registered params for this existing sub-operation
3494 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3495 vnf_index = op.get('member_vnf_index')
3496 vnf_config_primitive = op.get('primitive')
3497 primitive_params = op.get('primitive_params')
3498 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation reintent".
3499 format(vnf_config_primitive))
3500 # Execute the primitive, either with new (first-time) or registered (reintent) args
3501 result, result_detail = await self._ns_execute_primitive(
tiernoe876f672020-02-13 14:34:48 +00003502 self._look_for_deployed_vca(nsr_deployed["VCA"],
3503 member_vnf_index=vnf_index,
3504 vdu_id=None,
tiernoe876f672020-02-13 14:34:48 +00003505 vdu_count_index=None),
3506 vnf_config_primitive, primitive_params)
kuuseac3a8882019-10-03 10:48:06 +02003507 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3508 vnf_config_primitive, result, result_detail))
3509 # Update operationState = COMPLETED | FAILED
3510 self._update_suboperation_status(
3511 db_nslcmop, op_index, result, result_detail)
3512
tierno59d22d22018-09-25 18:10:19 +02003513 if result == "FAILED":
3514 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02003515 db_nsr_update["config-status"] = old_config_status
3516 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02003517 # PRE-SCALE END
tierno59d22d22018-09-25 18:10:19 +02003518
kuuseac3a8882019-10-03 10:48:06 +02003519 # SCALE RO - BEGIN
3520 # Should this block be skipped if 'RO_nsr_id' == None ?
3521 # if (RO_nsr_id and RO_scaling_info):
tierno59d22d22018-09-25 18:10:19 +02003522 if RO_scaling_info:
tierno9ab95942018-10-10 16:44:22 +02003523 scale_process = "RO"
kuuseac3a8882019-10-03 10:48:06 +02003524 # Scale RO reintent check: Check if this sub-operation has been executed before
3525 op_index = self._check_or_add_scale_suboperation(
3526 db_nslcmop, vnf_index, None, None, 'SCALE-RO', RO_nsr_id, RO_scaling_info)
3527 if (op_index == self.SUBOPERATION_STATUS_SKIP):
3528 # Skip sub-operation
3529 result = 'COMPLETED'
3530 result_detail = 'Done'
3531 self.logger.debug(logging_text + "Skipped sub-operation RO, result {} {}".format(
3532 result, result_detail))
3533 else:
3534 if (op_index == self.SUBOPERATION_STATUS_NEW):
3535 # New sub-operation: Get index of this sub-operation
3536 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3537 self.logger.debug(logging_text + "New sub-operation RO")
tierno59d22d22018-09-25 18:10:19 +02003538 else:
kuuseac3a8882019-10-03 10:48:06 +02003539 # Reintent: Get registered params for this existing sub-operation
3540 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3541 RO_nsr_id = op.get('RO_nsr_id')
3542 RO_scaling_info = op.get('RO_scaling_info')
3543 self.logger.debug(logging_text + "Sub-operation RO reintent".format(
3544 vnf_config_primitive))
3545
3546 RO_desc = await self.RO.create_action("ns", RO_nsr_id, {"vdu-scaling": RO_scaling_info})
3547 db_nsr_update["_admin.scaling-group.{}.nb-scale-op".format(admin_scale_index)] = nb_scale_op
3548 db_nsr_update["_admin.scaling-group.{}.time".format(admin_scale_index)] = time()
3549 # wait until ready
3550 RO_nslcmop_id = RO_desc["instance_action_id"]
3551 db_nslcmop_update["_admin.deploy.RO"] = RO_nslcmop_id
3552
3553 RO_task_done = False
3554 step = detailed_status = "Waiting RO_task_id={} to complete the scale action.".format(RO_nslcmop_id)
3555 detailed_status_old = None
3556 self.logger.debug(logging_text + step)
3557
3558 deployment_timeout = 1 * 3600 # One hour
3559 while deployment_timeout > 0:
3560 if not RO_task_done:
3561 desc = await self.RO.show("ns", item_id_name=RO_nsr_id, extra_item="action",
3562 extra_item_id=RO_nslcmop_id)
quilesj3655ae02019-12-12 16:08:35 +00003563
3564 # deploymentStatus
3565 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3566
kuuseac3a8882019-10-03 10:48:06 +02003567 ns_status, ns_status_info = self.RO.check_action_status(desc)
3568 if ns_status == "ERROR":
3569 raise ROclient.ROClientException(ns_status_info)
3570 elif ns_status == "BUILD":
3571 detailed_status = step + "; {}".format(ns_status_info)
3572 elif ns_status == "ACTIVE":
3573 RO_task_done = True
3574 step = detailed_status = "Waiting ns ready at RO. RO_id={}".format(RO_nsr_id)
3575 self.logger.debug(logging_text + step)
3576 else:
3577 assert False, "ROclient.check_action_status returns unknown {}".format(ns_status)
tierno59d22d22018-09-25 18:10:19 +02003578 else:
quilesj7e13aeb2019-10-08 13:34:55 +02003579
kuuseac3a8882019-10-03 10:48:06 +02003580 if ns_status == "ERROR":
3581 raise ROclient.ROClientException(ns_status_info)
3582 elif ns_status == "BUILD":
3583 detailed_status = step + "; {}".format(ns_status_info)
3584 elif ns_status == "ACTIVE":
3585 step = detailed_status = \
3586 "Waiting for management IP address reported by the VIM. Updating VNFRs"
3587 if not vnfr_scaled:
3588 self.scale_vnfr(db_vnfr, vdu_create=vdu_create, vdu_delete=vdu_delete)
3589 vnfr_scaled = True
3590 try:
3591 desc = await self.RO.show("ns", RO_nsr_id)
quilesj3655ae02019-12-12 16:08:35 +00003592
3593 # deploymentStatus
3594 self._on_update_ro_db(nsrs_id=nsr_id, ro_descriptor=desc)
3595
kuuseac3a8882019-10-03 10:48:06 +02003596 # nsr_deployed["nsr_ip"] = RO.get_ns_vnf_info(desc)
3597 self.ns_update_vnfr({db_vnfr["member-vnf-index-ref"]: db_vnfr}, desc)
3598 break
3599 except LcmExceptionNoMgmtIP:
3600 pass
3601 else:
3602 assert False, "ROclient.check_ns_status returns unknown {}".format(ns_status)
3603 if detailed_status != detailed_status_old:
3604 self._update_suboperation_status(
3605 db_nslcmop, op_index, 'COMPLETED', detailed_status)
3606 detailed_status_old = db_nslcmop_update["detailed-status"] = detailed_status
3607 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
tierno59d22d22018-09-25 18:10:19 +02003608
kuuseac3a8882019-10-03 10:48:06 +02003609 await asyncio.sleep(5, loop=self.loop)
3610 deployment_timeout -= 5
3611 if deployment_timeout <= 0:
3612 self._update_suboperation_status(
3613 db_nslcmop, nslcmop_id, op_index, 'FAILED', "Timeout when waiting for ns to get ready")
3614 raise ROclient.ROClientException("Timeout waiting ns to be ready")
tierno59d22d22018-09-25 18:10:19 +02003615
kuuseac3a8882019-10-03 10:48:06 +02003616 # update VDU_SCALING_INFO with the obtained ip_addresses
3617 if vdu_scaling_info["scaling_direction"] == "OUT":
3618 for vdur in reversed(db_vnfr["vdur"]):
3619 if vdu_scaling_info["vdu-create"].get(vdur["vdu-id-ref"]):
3620 vdu_scaling_info["vdu-create"][vdur["vdu-id-ref"]] -= 1
3621 vdu_scaling_info["vdu"].append({
3622 "name": vdur["name"],
3623 "vdu_id": vdur["vdu-id-ref"],
3624 "interface": []
tierno59d22d22018-09-25 18:10:19 +02003625 })
kuuseac3a8882019-10-03 10:48:06 +02003626 for interface in vdur["interfaces"]:
3627 vdu_scaling_info["vdu"][-1]["interface"].append({
3628 "name": interface["name"],
3629 "ip_address": interface["ip-address"],
3630 "mac_address": interface.get("mac-address"),
3631 })
3632 del vdu_scaling_info["vdu-create"]
3633
3634 self._update_suboperation_status(db_nslcmop, op_index, 'COMPLETED', 'Done')
3635 # SCALE RO - END
tierno59d22d22018-09-25 18:10:19 +02003636
tierno9ab95942018-10-10 16:44:22 +02003637 scale_process = None
tierno59d22d22018-09-25 18:10:19 +02003638 if db_nsr_update:
3639 self.update_db_2("nsrs", nsr_id, db_nsr_update)
3640
kuuseac3a8882019-10-03 10:48:06 +02003641 # POST-SCALE BEGIN
tierno59d22d22018-09-25 18:10:19 +02003642 # execute primitive service POST-SCALING
3643 step = "Executing post-scale vnf-config-primitive"
3644 if scaling_descriptor.get("scaling-config-action"):
3645 for scaling_config_action in scaling_descriptor["scaling-config-action"]:
kuuseac3a8882019-10-03 10:48:06 +02003646 if (scaling_config_action.get("trigger") == "post-scale-in" and scaling_type == "SCALE_IN") \
3647 or (scaling_config_action.get("trigger") == "post-scale-out" and scaling_type == "SCALE_OUT"):
tierno59d22d22018-09-25 18:10:19 +02003648 vnf_config_primitive = scaling_config_action["vnf-config-primitive-name-ref"]
3649 step = db_nslcmop_update["detailed-status"] = \
3650 "executing post-scale scaling-config-action '{}'".format(vnf_config_primitive)
tiernoda964822019-01-14 15:53:47 +00003651
tierno589befb2019-05-29 07:06:23 +00003652 vnfr_params = {"VDU_SCALE_INFO": vdu_scaling_info}
tiernoda964822019-01-14 15:53:47 +00003653 if db_vnfr.get("additionalParamsForVnf"):
3654 vnfr_params.update(db_vnfr["additionalParamsForVnf"])
3655
tierno59d22d22018-09-25 18:10:19 +02003656 # look for primitive
tierno59d22d22018-09-25 18:10:19 +02003657 for config_primitive in db_vnfd.get("vnf-configuration", {}).get("config-primitive", ()):
3658 if config_primitive["name"] == vnf_config_primitive:
tierno59d22d22018-09-25 18:10:19 +02003659 break
3660 else:
3661 raise LcmException("Invalid vnfd descriptor at scaling-group-descriptor[name='{}']:"
3662 "scaling-config-action[vnf-config-primitive-name-ref='{}'] does not "
tierno47e86b52018-10-10 14:05:55 +02003663 "match any vnf-configuration:config-primitive".format(scaling_group,
3664 config_primitive))
tierno9ab95942018-10-10 16:44:22 +02003665 scale_process = "VCA"
tiernod6de1992018-10-11 13:05:52 +02003666 db_nsr_update["config-status"] = "configuring post-scaling"
kuuseac3a8882019-10-03 10:48:06 +02003667 primitive_params = self._map_primitive_params(config_primitive, {}, vnfr_params)
tiernod6de1992018-10-11 13:05:52 +02003668
kuuseac3a8882019-10-03 10:48:06 +02003669 # Post-scale reintent check: Check if this sub-operation has been executed before
3670 op_index = self._check_or_add_scale_suboperation(
3671 db_nslcmop, nslcmop_id, vnf_index, vnf_config_primitive, primitive_params, 'POST-SCALE')
quilesj4cda56b2019-12-05 10:02:20 +00003672 if op_index == self.SUBOPERATION_STATUS_SKIP:
kuuseac3a8882019-10-03 10:48:06 +02003673 # Skip sub-operation
3674 result = 'COMPLETED'
3675 result_detail = 'Done'
3676 self.logger.debug(logging_text +
3677 "vnf_config_primitive={} Skipped sub-operation, result {} {}".
3678 format(vnf_config_primitive, result, result_detail))
3679 else:
quilesj4cda56b2019-12-05 10:02:20 +00003680 if op_index == self.SUBOPERATION_STATUS_NEW:
kuuseac3a8882019-10-03 10:48:06 +02003681 # New sub-operation: Get index of this sub-operation
3682 op_index = len(db_nslcmop.get('_admin', {}).get('operations')) - 1
3683 self.logger.debug(logging_text + "vnf_config_primitive={} New sub-operation".
3684 format(vnf_config_primitive))
3685 else:
3686 # Reintent: Get registered params for this existing sub-operation
3687 op = db_nslcmop.get('_admin', {}).get('operations', [])[op_index]
3688 vnf_index = op.get('member_vnf_index')
3689 vnf_config_primitive = op.get('primitive')
3690 primitive_params = op.get('primitive_params')
3691 self.logger.debug(logging_text + "vnf_config_primitive={} Sub-operation reintent".
3692 format(vnf_config_primitive))
3693 # Execute the primitive, either with new (first-time) or registered (reintent) args
3694 result, result_detail = await self._ns_execute_primitive(
tiernoe876f672020-02-13 14:34:48 +00003695 self._look_for_deployed_vca(nsr_deployed["VCA"],
3696 member_vnf_index=vnf_index,
3697 vdu_id=None,
tiernoe876f672020-02-13 14:34:48 +00003698 vdu_count_index=None),
3699 vnf_config_primitive, primitive_params)
kuuseac3a8882019-10-03 10:48:06 +02003700 self.logger.debug(logging_text + "vnf_config_primitive={} Done with result {} {}".format(
3701 vnf_config_primitive, result, result_detail))
3702 # Update operationState = COMPLETED | FAILED
3703 self._update_suboperation_status(
3704 db_nslcmop, op_index, result, result_detail)
3705
tierno59d22d22018-09-25 18:10:19 +02003706 if result == "FAILED":
3707 raise LcmException(result_detail)
tiernod6de1992018-10-11 13:05:52 +02003708 db_nsr_update["config-status"] = old_config_status
3709 scale_process = None
kuuseac3a8882019-10-03 10:48:06 +02003710 # POST-SCALE END
tierno59d22d22018-09-25 18:10:19 +02003711
3712 db_nslcmop_update["operationState"] = nslcmop_operation_state = "COMPLETED"
3713 db_nslcmop_update["statusEnteredTime"] = time()
3714 db_nslcmop_update["detailed-status"] = "done"
tiernod6de1992018-10-11 13:05:52 +02003715 db_nsr_update["detailed-status"] = "" # "scaled {} {}".format(scaling_group, scaling_type)
ikalyvas02d9e7b2019-05-27 18:16:01 +03003716 db_nsr_update["operational-status"] = "running" if old_operational_status == "failed" \
3717 else old_operational_status
tiernod6de1992018-10-11 13:05:52 +02003718 db_nsr_update["config-status"] = old_config_status
tierno59d22d22018-09-25 18:10:19 +02003719 return
3720 except (ROclient.ROClientException, DbException, LcmException) as e:
3721 self.logger.error(logging_text + "Exit Exception {}".format(e))
3722 exc = e
3723 except asyncio.CancelledError:
3724 self.logger.error(logging_text + "Cancelled Exception while '{}'".format(step))
3725 exc = "Operation was cancelled"
3726 except Exception as e:
3727 exc = traceback.format_exc()
3728 self.logger.critical(logging_text + "Exit Exception {} {}".format(type(e).__name__, e), exc_info=True)
3729 finally:
quilesj3655ae02019-12-12 16:08:35 +00003730 self._write_ns_status(
3731 nsr_id=nsr_id,
3732 ns_state=None,
3733 current_operation="IDLE",
3734 current_operation_id=None
3735 )
tierno59d22d22018-09-25 18:10:19 +02003736 if exc:
3737 if db_nslcmop:
3738 db_nslcmop_update["detailed-status"] = "FAILED {}: {}".format(step, exc)
3739 db_nslcmop_update["operationState"] = nslcmop_operation_state = "FAILED"
3740 db_nslcmop_update["statusEnteredTime"] = time()
3741 if db_nsr:
tiernod6de1992018-10-11 13:05:52 +02003742 db_nsr_update["operational-status"] = old_operational_status
3743 db_nsr_update["config-status"] = old_config_status
3744 db_nsr_update["detailed-status"] = ""
3745 if scale_process:
3746 if "VCA" in scale_process:
3747 db_nsr_update["config-status"] = "failed"
3748 if "RO" in scale_process:
3749 db_nsr_update["operational-status"] = "failed"
3750 db_nsr_update["detailed-status"] = "FAILED scaling nslcmop={} {}: {}".format(nslcmop_id, step,
3751 exc)
tiernobaa51102018-12-14 13:16:18 +00003752 try:
3753 if db_nslcmop and db_nslcmop_update:
3754 self.update_db_2("nslcmops", nslcmop_id, db_nslcmop_update)
3755 if db_nsr:
quilesj4cda56b2019-12-05 10:02:20 +00003756 self._write_ns_status(
3757 nsr_id=nsr_id,
3758 ns_state=None,
3759 current_operation="IDLE",
tiernoe876f672020-02-13 14:34:48 +00003760 current_operation_id=None,
3761 other_update=db_nsr_update
quilesj4cda56b2019-12-05 10:02:20 +00003762 )
3763
tiernobaa51102018-12-14 13:16:18 +00003764 except DbException as e:
3765 self.logger.error(logging_text + "Cannot update database: {}".format(e))
tierno59d22d22018-09-25 18:10:19 +02003766 if nslcmop_operation_state:
3767 try:
3768 await self.msg.aiowrite("ns", "scaled", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id,
tierno8a518872018-12-21 13:42:14 +00003769 "operationState": nslcmop_operation_state},
3770 loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003771 # if cooldown_time:
tiernod8323042019-08-09 11:32:23 +00003772 # await asyncio.sleep(cooldown_time, loop=self.loop)
tierno59d22d22018-09-25 18:10:19 +02003773 # await self.msg.aiowrite("ns","scaled-cooldown-time", {"nsr_id": nsr_id, "nslcmop_id": nslcmop_id})
3774 except Exception as e:
3775 self.logger.error(logging_text + "kafka_write notification Exception {}".format(e))
3776 self.logger.debug(logging_text + "Exit")
3777 self.lcm_tasks.remove("ns", nsr_id, nslcmop_id, "ns_scale")